diff --git "a/2313.jsonl" "b/2313.jsonl" new file mode 100644--- /dev/null +++ "b/2313.jsonl" @@ -0,0 +1,1042 @@ +{"seq_id":"4234357423","text":"import os\nimport re\n\nfrom scrapy.http import Request, FormRequest\nfrom scraper.base_scrapper import (\n SitemapSpider,\n SiteMapScrapper,\n PROXY_USERNAME,\n PROXY_PASSWORD,\n PROXY\n)\n\nimport uuid\n\n\nfrom selenium.webdriver import (\n Chrome,\n ChromeOptions\n)\n\n\nUSER = 'Cyrax_011'\nPASS = 'Night#India065'\n\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) '\\\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\\\n 'Chrome/81.0.4044.138 Safari/537.36'\n\n\nclass DemonForumsSpider(SitemapSpider):\n name = 'demonforums_spider'\n\n use_proxy = \"VIP\"\n proxy_countries = ['us', 'uk']\n\n handle_httpstatus_list = [403, 503]\n\n ip_check_xpath = \"//text()[contains(.,\\\"Your IP\\\")]\"\n\n rotation_tries = 0\n\n base_url = 'https://demonforums.net/'\n avatar_name_pattern = re.compile(r'avatar_(\\d+\\.\\w+)')\n pagination_pattern = re.compile(r'.*page=(\\d+)')\n\n # # Xpath stuffs\n forum_xpath = '//a[contains(@href, \"Forum-\")]/@href'\n thread_xpath = '//tr[contains(@class,\"inline_row\")]'\n\n thread_first_page_xpath = './/span[contains(@class, \"subject_\")'\\\n ' and contains(@id, \"tid_\")]/a'\n\n thread_last_page_xpath = './/td[contains(@class,\"lastpost\")]//'\\\n 'a[text()=\"Last Post\"]/@href'\n\n # thread date later\n thread_date_xpath = './/td[contains(@class,\"last_post\")]/span/span/@title|'\\\n './/td[contains(@class,\"last_post\")]/span/a[last()]/'\\\n 'following-sibling::text()'\n\n pagination_xpath = '//a[contains(@class,\"pagination_next\")]/@href'\n\n thread_pagination_xpath = '//a[contains(@class,\"pagination_previous\")]'\\\n '/@href'\n\n thread_page_xpath = '//span[contains(@class,\"pagination_current\")]/text()'\n\n post_date_xpath = '//span[@class=\"post_date\"]/span/@title|//span'\\\n '[@class=\"post_date\"]/text()'\n\n avatar_xpath = '//div[@class=\"author_avatar\"]//img/@src'\n \n # Login Failed Message\n login_failed_xpath = '//div[contains(., \"You have entered an invalid username or password\")]'\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.base_url = 'https://demonforums.net/'\n # self.headers = {\n # 'accept': 'text/html,application/xhtml+xml,application/xml'\n # ';q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,'\n # 'application/signed-exchange;v=b3;q=0.9',\n # 'accept-language': 'en-US,en;q=0.9',\n # 'cache-control': 'max-age=0',\n # 'referer': 'https://demonforums.net/',\n # 'sec-fetch-dest': 'document',\n # 'sec-fetch-mode': 'navigate',\n # 'sec-fetch-site': 'same-origin',\n # 'sec-fetch-user': '?1',\n # 'upgrade-insecure-requests': '1',\n # 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6)'\n # ' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'\n # '85.0.4183.83 Safari/537.36'\n # }\n\n def start_requests(self):\n cookies, ip = self.get_cookies(\n base_url=self.base_url,\n proxy=self.use_proxy,\n fraud_check=True,\n )\n\n self.logger.info(f'COOKIES: {cookies}')\n\n # Init request kwargs and meta\n meta = {\n \"cookiejar\": uuid.uuid1().hex,\n \"ip\": ip\n }\n\n self.logger.info(f'COOKIES: {cookies}')\n yield Request(\n url=self.base_url,\n headers=self.headers,\n callback=self.process_login,\n cookies=cookies,\n meta=meta,\n dont_filter=True\n )\n\n def parse_captcha(self, response):\n ip_ban_check = response.xpath(\n self.ip_check_xpath\n ).extract_first()\n\n # Report bugs\n if \"error code: 1005\" in response.text:\n self.logger.info(\n \"Ip for error 1005 code. Rotating.\"\n )\n elif ip_ban_check:\n self.logger.info(\n \"%s has been permanently banned. Rotating.\" % ip_ban_check\n )\n\n if self.use_proxy == 'Off':\n return\n\n if self.rotation_tries < 20:\n self.rotation_tries += 1\n yield from self.start_requests()\n\n def process_login(self, response):\n\n self.synchronize_headers(response)\n\n if response.status == 403:\n yield from self.parse_captcha(response)\n return\n\n my_post_key = response.xpath(\n '//input[@name=\"my_post_key\"]/@value').extract_first()\n if not my_post_key:\n return\n\n self.logger.info(\"Found post_key\")\n\n form_data = {\n 'action': 'do_login',\n 'url': '',\n 'quick_login': '1',\n 'my_post_key': my_post_key,\n 'quick_username': USER,\n 'quick_password': PASS,\n 'quick_remember': 'yes',\n 'submit': 'Login',\n }\n login_url = 'https://demonforums.net/member.php'\n yield FormRequest(\n url=login_url,\n formdata=form_data,\n callback=self.parse,\n headers=response.request.headers,\n meta=self.synchronize_meta(response),\n dont_filter=True,\n )\n\n def parse(self, response):\n\n self.synchronize_headers(response)\n\n if response.status == 403:\n yield from self.parse_captcha(response)\n return\n \n # Check if login failed\n self.check_if_logged_in(response)\n\n yield Request(\n url=self.base_url,\n callback=self.parse_start,\n headers=response.request.headers,\n meta=self.synchronize_meta(response),\n dont_filter=True\n )\n\n def parse_start(self, response):\n\n # Synchronize user agent for cloudfare middlewares\n self.synchronize_headers(response)\n\n # If captcha detected\n if response.status in [503, 403]:\n yield from self.parse_captcha(response)\n return\n\n # Load all forums\n all_forums = response.xpath(self.forum_xpath).extract()\n\n # update stats\n self.crawler.stats.set_value(\"mainlist/mainlist_count\", len(all_forums))\n\n for forum_url in all_forums:\n # Standardize url\n if 'http://' not in forum_url and 'https://' not in forum_url:\n if self.base_url not in forum_url:\n forum_url = self.base_url + forum_url\n\n yield Request(\n url=forum_url,\n headers=self.headers,\n callback=self.parse_forum,\n meta=self.synchronize_meta(response)\n )\n\n def parse_thread(self, response):\n\n self.synchronize_headers(response)\n\n if response.status == 403:\n meta = response.meta.copy()\n tries = meta.get('tries', 1)\n if tries < 20:\n meta['tries'] = tries + 1\n yield Request(\n url=response.url,\n callback=self.parse_forum,\n headers=response.request.headers,\n meta=self.synchronize_meta(response),\n dont_filter=True\n )\n return\n\n yield from super().parse_thread(response)\n yield from super().parse_avatars(response)\n\nclass DemonForumsScrapper(SiteMapScrapper):\n\n spider_class = DemonForumsSpider\n site_name = 'demonforums.net'\n site_type = 'forum'\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"scraper/demonforums.py","file_name":"demonforums.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31834658640","text":"import json\n\nimport numpy as np\nimport rasterio\nimport click\n\nfrom object_detection.utils import object_detection_evaluation, label_map_util\n\nfrom rv.commands.utils import (\n download_if_needed, make_temp_dir, get_local_path, upload_if_needed,\n get_boxes_from_geojson, download_and_build_vrt)\nfrom rv.commands.settings import max_num_classes\n\n\ndef get_eval_result(ground_truth_path, predictions_path, image_dataset):\n gt_boxes, gt_box_to_class_id, _ = \\\n get_boxes_from_geojson(ground_truth_path, image_dataset)\n gt_class_ids = np.array(\n [gt_box_to_class_id[box] for box in gt_boxes], dtype=int)\n # Subtract one because class id's start at 1, but evaluation api assumes\n # the start at 0. You might think we could just write the label_map.pbtxt\n # so the class ids start at 0, but that throws an exception.\n gt_class_ids -= 1\n gt_boxes = np.array(gt_boxes, dtype=float)\n\n pred_boxes, pred_box_to_class_id, pred_box_to_score = \\\n get_boxes_from_geojson(predictions_path, image_dataset)\n pred_class_ids = np.array(\n [pred_box_to_class_id[box] for box in pred_boxes], dtype=int)\n pred_class_ids -= 1\n pred_scores = np.array(\n [pred_box_to_score[box] for box in pred_boxes], dtype=float)\n pred_boxes = np.array(pred_boxes, dtype=float)\n\n nb_gt_classes = len(set(gt_box_to_class_id.values()))\n od_eval = object_detection_evaluation.ObjectDetectionEvaluation(\n nb_gt_classes, matching_iou_threshold=0.1)\n image_key = 'image'\n od_eval.add_single_ground_truth_image_info(\n image_key, gt_boxes, gt_class_ids)\n od_eval.add_single_detected_image_info(\n image_key, pred_boxes, pred_scores, pred_class_ids)\n\n od_eval.evaluate()\n return od_eval.get_eval_result()\n\n\ndef write_results(output_path, label_map_path, eval_result):\n label_map = label_map_util.load_labelmap(label_map_path)\n categories = label_map_util.convert_label_map_to_categories(\n label_map, max_num_classes=max_num_classes, use_display_name=True)\n category_index = label_map_util.create_category_index(categories)\n\n results = []\n for class_id in range(1, len(category_index) + 1):\n class_name = category_index[class_id]['name']\n # Get precision and recall assuming all boxes are used.\n # Subtract one to account for fact that class id's start at 1.\n precision = eval_result.precisions[class_id - 1][-1]\n recall = eval_result.recalls[class_id - 1][-1]\n class_results = {\n 'name': class_name,\n 'precision': precision,\n 'recall': recall\n }\n results.append(class_results)\n\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(results, indent=4))\n\n\n@click.command()\n@click.argument('image_uris', nargs=-1)\n@click.argument('ground_truth_uri')\n@click.argument('predictions_uri')\n@click.argument('label_map_uri')\n@click.argument('output_uri')\ndef eval_predictions(image_uris, ground_truth_uri, predictions_uri,\n label_map_uri, output_uri):\n \"\"\"Evaluate predictions against ground truth.\n\n Args:\n ground_truth_uri: GeoJSON file with ground truth bounding boxes\n predictions_uri: GeoJSON file with predicted bounding boxes\n output_uri: JSON file with metrics\n \"\"\"\n temp_dir = '/opt/data/temp/'\n make_temp_dir(temp_dir)\n\n image_path = download_and_build_vrt(temp_dir, image_uris)\n image_dataset = rasterio.open(image_path)\n\n ground_truth_path = download_if_needed(temp_dir, ground_truth_uri)\n predictions_path = download_if_needed(temp_dir, predictions_uri)\n label_map_path = download_if_needed(temp_dir, label_map_uri)\n\n eval_result = get_eval_result(\n ground_truth_path, predictions_path, image_dataset)\n\n output_path = get_local_path(temp_dir, output_uri)\n write_results(output_path, label_map_path, eval_result)\n upload_if_needed(output_path, output_uri)\n\n\nif __name__ == '__main__':\n eval_predictions()\n","repo_name":"NeuralNetworkingTechnologies/raster-vision","sub_path":"src/detection/rv/commands/eval_predictions.py","file_name":"eval_predictions.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"4015665873","text":"# Ex. QProgressBar.\n\nimport sys\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\n\n\nclass Widget(QWidget):\n def __init__(self):\n super().__init__()\n self.slider = QSlider(Qt.Horizontal, self)\n self.dial = QDial(self)\n self.button = QPushButton('Default', self)\n\n self.init_ui()\n\n def init_ui(self):\n self.slider.move(30, 30)\n self.slider.setRange(0, 50)\n self.slider.setSingleStep(2)\n\n self.dial.move(30, 50)\n self.dial.setRange(0, 50)\n\n self.button.move(35, 160)\n\n self.slider.valueChanged.connect(self.dial.setValue)\n self.dial.valueChanged.connect(self.slider.setValue)\n self.button.clicked.connect(self.on_clicked)\n\n self.setWindowTitle('QSlider and QDial')\n self.setGeometry(300, 300, 400, 200)\n self.show()\n\n def on_clicked(self):\n self.slider.setValue(0)\n self.dial.setValue(0)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n widget = Widget()\n exit(app.exec())\n\n","repo_name":"ottogi99/tutorials","sub_path":"PyQt5_Tutorial/03.Widget/08.QSlider_QDial.py","file_name":"08.QSlider_QDial.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7819446856","text":"import numpy as np\nimport pandas as pd\nimport tkinter as tk\nimport seaborn as sns\nimport tkinter.messagebox\nimport statistics\nimport matplotlib.pyplot as plt\n\nspielfeldhälfte = 1 #1 oder -1. 1 ist bei team A\nlänge_des_ballwechsels = 0\n\nclass Team():\n def __init__(self, name, stärke, risiko = 0.5):\n self.name = name\n self.stärke = stärke\n self.punkte = 0\n self.risiko = risiko\n\n def onepunkt(self):\n self.punkte += 1\n\ndef calculate_strategy():\n try:\n # Lese die Eingaben des Benutzers aus den Eingabefeldern aus\n strength_a = int(entry_a.get())/10\n strength_b = int(entry_b.get())/10\n ermüdung = float(entry_ermüdung.get())\n except ValueError:\n # Wenn die Eingaben des Benutzers ungültig sind, zeige eine Fehlermeldung an\n tk.messagebox.showerror(\"Fehler\", \"Bitte gib gültige Zahlen ein.\")\n return\n if ermüdung > 0.001:\n tk.messagebox.showerror(\"Fehler\", \"Ermüdung zu hoch.\")\n raise Exception(\"Ermüdung zu hoch\")\n if strength_a > 1:\n tk.messagebox.showerror(\"Fehler\", \"Stärke Team A zu hoch.\")\n raise Exception(\"Stärke Team A zu hoch\")\n if strength_b > 1:\n tk.messagebox.showerror(\"Fehler\", \"Stärke Team B zu hoch.\")\n raise Exception(\"Stärke Team B zu hoch\")\n # Anzeige der Ergebnisse in einem DataFrame\n won_a = 0\n spiele_count = 0\n teams = dict()\n teams[\"A\"] = Team(\"TeamA\", strength_a)\n teams[\"B\"] = Team(\"TeamB\", strength_b)\n #print(\"Teams:\")\n #print(teams[\"A\"].stärke)\n #print(teams[\"B\"].stärke)\n risiko_list = [0.2, 0.4, 0.6, 0.8, 1.0]\n risiko_results = spielen(teams, risiko_list, ermüdung)\n result_window = tk.Toplevel()\n result_window.title(\"Resultate\")\n result_text = tk.Text(result_window)\n #print(\"Risiko results in calculate_strategy:\")\n #print(risiko_results)\n\n #result_text.insert(tk.END, str(risiko_results))\n for spiel_id,spiel in risiko_results.items():\n #print(\"spiel_id in calculate_strategy: \")\n #print(spiel_id)\n #print(\"spiel in calculate_strategy: \")\n #print(spiel)\n #result_text.insert(tk.END, \"Spiel: \"+str(spiel\"\\n\")\n for risiko,result in spiel.items():\n #print(\"risiko in for loop calculate_strategy: \")\n #print(risiko)\n #print(\"result in for loop calculate_strategy: \")\n #print(result)\n result_text.insert(tk.END, \"Risiko \"+str(risiko)+\":\\n\")\n result_text.insert(tk.END, \"A: \"+str(result[\"A\"])+\" B: \"+str(result[\"B\"])+\"\\n\")\n result_text.insert(tk.END, \"Ballwechseldurchschnitt: \"+str(round(result[\"ballwechsel\"],2))+\"\\n\")\n if result[\"A\"]>result[\"B\"]:\n won_a +=1\n spiele_count += 1\n result_text.insert(tk.END, \"***************\\n\")\n result_text.insert(tk.END, f\"Anzahl Spiele {spiele_count} \\n\")\n result_text.insert(tk.END, f\"{won_a} Spiele von A gewonnen\\n\")\n result_text.insert(tk.END, f\"{spiele_count-won_a} Spiele von B gewonnen\\n\")\n result_text.config(state=tk.DISABLED)\n result_text.pack()\n\ndef spielen(teams, risiko_list, ermüdung):\n risiko_results = dict()\n ballwechsel_längen = []\n track_spiele = pd.DataFrame()\n ergebnis_liste = []\n\n for spiel in range(0, 100):\n risiko_results[spiel]=dict()\n #track_spiele[spiel] = dict()\n #spielfeldhälfte = 1\n for risiko in risiko_list:\n #track_spiele[spiel][risiko] = dict()\n risiko_results[spiel][risiko] = dict()\n #print(\"Risiko: \",risiko)\n teams[\"A\"].risiko = risiko\n teams[\"A\"].punkte = 0\n teams[\"B\"].punkte = 0\n stärke = dict()\n stärke[\"A\"] = teams[\"A\"].stärke\n stärke[\"B\"] = teams[\"B\"].stärke\n ballposession = \"A\" if np.random.randint(0, 2)==0 else \"B\"\n #print(\"startballposession:\",ballposession)\n #track_spiel = np.array(30,30)\n zwischenergebnisse = []\n while abs(teams[\"A\"].punkte - teams[\"B\"].punkte) < 2 or max(teams[\"A\"].punkte, teams[\"B\"].punkte) < 21:\n länge_des_ballwechsels = 0\n while True:\n #ergebnis = np.random.randint(0, 100)\n ergebnis = np.random.normal(stärke[ballposession], teams[ballposession].risiko)\n #print(\"stärke A: \"+str(stärke[\"A\"]))\n #print(\"stärke B: \"+str(stärke[\"B\"]))\n länge_des_ballwechsels += 1\n #print(\"Länge des Ballwechsels:\"+str(länge_des_ballwechsels))\n ballposession = \"B\" if ballposession==\"A\" else \"A\"\n if ergebnis >= 0.5: #Erfolgreicher Ball\n #print(\"ballposession =\",ballposession)\n pass\n else:\n teams[ballposession].onepunkt()\n #track_spiele[spiel][risiko][teams[\"A\"].punkte] = []\n #track_spiele[spiel][risiko][teams[\"A\"].punkte][teams[\"B\"].punkte] = 1\n #track_spiele.at[spiel,risiko,teams[\"A\"].punkte,teams[\"B\"].punkte] = 1\n zwischenergebnisse.append([teams[\"A\"].punkte, teams[\"B\"].punkte, risiko, stärke[\"A\"]])\n #track_spiel[teams[\"A\"].punkte][teams[\"B\"].punkte] = risiko\n break\n #Ermüdung\n stärke[\"A\"] -= ermüdung\n stärke[\"B\"] -= ermüdung\n ballwechsel_längen.append(länge_des_ballwechsels)\n #print(teams[\"A\"].name,str(teams[\"A\"].punkte),teams[\"B\"].name,str(teams[\"B\"].punkte))\n ballwechsel_längen_mittelwert=statistics.mean(ballwechsel_längen)\n #print(\"Ende des Spiels. Team A: \",teams[\"A\"].punkte,\"Team B:\",teams[\"B\"].punkte)\n #print(\"Spiel:\",spiel)\n #print(\"Ballwechsellängenmittelwert: \"+str(ballwechsel_längen_mittelwert))\n risiko_results[spiel][risiko][\"A\"] = teams[\"A\"].punkte\n risiko_results[spiel][risiko][\"B\"] = teams[\"B\"].punkte\n risiko_results[spiel][risiko][\"ballwechsel\"] = ballwechsel_längen_mittelwert\n #track_spiele[spiel][risiko][\"won_A\"] = 1 if teams[\"A\"].punkte>teams[\"B\"].punkte else 0\n if teams[\"A\"].punkte > teams[\"B\"].punkte:\n for zwischenergebnis in zwischenergebnisse:\n ergebnis_liste.append(zwischenergebnis)\n #print(\"Zwischenergebnisse: \")\n #print(zwischenergebnisse)\n #print(\"Ergebnisliste: \")\n #print(ergebnis_liste)\n\n\n\n heatmap_df = pd.DataFrame([[[] for i in range(40)] for j in range(40)])\n\n for ergebnis in ergebnis_liste:\n heatmap_df[ergebnis[0]][ergebnis[1]].append(ergebnis[2])\n\n\n #heatmap_df.applymap(lambda x: behandel_x(x))\n #print(\"heatmap_df:\")\n #print(heatmap_df)\n\n for rowIndex, row in heatmap_df.iterrows(): # iterate over rows\n for columnIndex, value in row.items():\n #print(\"value: \")\n #print(value)\n if not(isinstance(value, float) or isinstance(value, int)) and len(value)>0:\n heatmap_df[rowIndex][columnIndex] = max(set(value), key=value.count)\n else:\n heatmap_df[rowIndex][columnIndex] = 0\n\n #print(\"heatmap2:\")\n #print(heatmap_df)\n #print(\"type heatmap:\")\n #print(type(heatmap_df))\n #print(\"type heatmap[0]:\")\n #print(type(heatmap_df[0]))\n #print(\"type heatmap[0][0]:\")\n #print(type(heatmap_df[0][0]))\n heatmap_df.to_csv(\"heatmap.csv\", index=False)\n heatmap_df = pd.read_csv(\"heatmap.csv\")\n #heat_map = sns.heatmap(heatmap_df)\n #plt.savefig(\"heatmap.png\")\n #track_spiele.append(track_spiel)\n #for track_spiel in track_spiele:\n #plt = sns.heatmap(heatmap_df, vmin=0, vmax=1.0, cmap='RdYlGn', linewidths=0.30, annot=False, cbar_kws={'label': 'Risk with most observed victories'})\n\n #plt.xlabel(\"Score \" + teams[\"A\"].name)\n #plt.ylabel(\"Score \" + teams[\"B\"].name)\n #plt.title(\"Optimal risk by score situation:\\n\" + teams[\"A\"].name + \" skill \" + str(teams[\"A\"].stärke) + \" vs. \" + teams[\"B\"].name + \" skill \" + str(teams[\"B\"].stärke))\n\n #heatmap_fig = plt.get_figure()\n #heatmap_fig.savefig(\"heatmap.png\")\n #plt.show()\n fig, ax = plt.subplots(figsize=(20, 20))\n plot = sns.heatmap(heatmap_df, cmap='RdYlGn', linewidths=0.30, annot=True,\n cbar_kws={'label': 'Risk with most observed victories'})\n\n plt.xlabel(\"Score \" + teams[\"A\"].name)\n plt.ylabel(\"Score \" + teams[\"B\"].name)\n plt.title(\"Optimal risk by score situation:\\n\" + teams[\"A\"].name + \" skill \" + str(\n teams[\"A\"].stärke*10) + \" vs. \" + teams[\"B\"].name + \" skill \" + str(teams[\"B\"].stärke*10))\n\n plt.savefig(\"heatmap.png\")\n #heatmap_fig = plt.get_figure()\n #heatmap_fig.savefig(\"heatmap.png\")\n #plt.show()\n\n\n return risiko_results\n\n# GUI-Setup\nroot = tk.Tk()\nroot.title(\"Optimale Risikostrategie\")\n\n# Label und Eingabefelder für die Spielstärken\nlabel_a = tk.Label(root, text=\"Spielstärke Mannschaft A (0-10):\")\nlabel_a.grid(row=0, column=0)\nentry_a = tk.Entry(root)\nentry_a.grid(row=0, column=1)\n\nlabel_b = tk.Label(root, text=\"Spielstärke Mannschaft B (0-10):\")\nlabel_b.grid(row=1, column=0)\nentry_b = tk.Entry(root)\nentry_b.grid(row=1, column=1)\n\nlabel_ermüdung = tk.Label(root, text=\"Ermüdungsfaktor (0-0.001):\")\nlabel_ermüdung.grid(row=2, column=0)\nentry_ermüdung = tk.Entry(root)\nentry_ermüdung.grid(row=2, column=1)\n\n# Button zum Berechnen der optimalen Risikostrategie\nbutton = tk.Button(root, text=\"Berechnen\", command=calculate_strategy)\nbutton.grid(row=3, column=0, columnspan=2)\n\n# Hauptloop der GUI\nroot.mainloop()\n","repo_name":"StefanDataCraft/volleyball","sub_path":"src/volleyball.py","file_name":"volleyball.py","file_ext":"py","file_size_in_byte":9775,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40080797147","text":"# !/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\n\"\"\"\n-------------------------------------------------\n File Name: demo4\n Author: zhulongkun20@gmail.com\n Date: 2020/10/17 5:40 下午\n Description : \n-------------------------------------------------\n Change Activity:\n 2020/10/17:\n-------------------------------------------------\n\"\"\"\n\n__author__ = 'zhulongkun20@gmail.com'\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread(\"../imgs/pi.png\")\nrows, cols, ch = img.shape\npts1 = np.float32([[50, 50], [200, 50], [50, 200]])\npts2 = np.float32([[10, 100], [200, 50], [100, 250]])\n\nM = cv2.getAffineTransform(pts1, pts2)\ndst = cv2.warpAffine(img, M, (cols, rows))\nplt.subplot(121, plt.imshow(img), plt.title('Input'))\nplt.subplot(121, plt.imshow(img), plt.title('Output'))\nplt.show()\n","repo_name":"longkun-dev/python-opencv-demo","sub_path":"ch14/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16030381399","text":"# Kata name: Factorial\n\n# My two solutions\nfrom math import factorial\ndef factorial_0(n): # max n -> 998\n if n == 1:\n return n\n return n * factorial_0(n - 1)\n\n\ndef factorial_1(n): # max n -> 1558\n result = 1\n while n:\n result *= n\n n -= 1\n return result\n\n\n# other solutions\ndef factorial_2(n):\n j = 1\n for i in range(1, n + 1):\n j *= i\n return j\n\n\ndef factorial_3(n):\n if n > 1:\n return n * factorial_3(n - 1)\n return 1\n\n\n# another task\ndef factorial_4(n):\n if n < 0 or n > 12:\n raise ValueError(\"Error\")\n elif n > 1:\n return n * factorial_4(n-1)\n return 1\n\n\nprint(factorial_2(1558))\nprint(factorial(1558))\n","repo_name":"stepanskyvlad/Learning-Python","sub_path":"Tasks/Codewars/7-kyu/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73497176515","text":"import os\nimport sys\nimport getopt\n\nargv = sys.argv[1:]\n\nfps=30\ntreshhold=0.5\nmaxClients=5\ngDriveAuthKey=\"ya29.a0AfH6SMB3dGAYotHUuc3vaG6FNznrwMcYVRu4hfg1oig5F7MNhyG2AFkkNSa6P6qZuFVRSrL9OMOD_Oa0zCZxBnGny6s_tcGDMICQozoFgHLg52uF-vynow8FguJg9R1Icy7b6WSn5MAirq0cr4HiRtRfkx_m\"\n\ntry:\n opts, args = getopt.getopt(argv, \"hftca\", [\"help\", \"fps=\", \"treshhold=\", \"maxClients=\", \"authKey=\"])\nexcept getopt.GetoptError:\n print('server.py -f -t -c -a ')\n sys.exit(2)\n\ntry: \n for opt, arg in opts:\n if opt == '-h':\n print('server.py -f -t -c -a ')\n sys.exit()\n\n elif opt in (\"-f\", \"--fps\"):\n fps = int(arg)\n\n elif opt in (\"-t\", \"--treshhold\"):\n treshhold = float(arg)\n\n elif opt in (\"-c\", \"--maxClients\"):\n maxClients = int(arg)\n\n elif opt in (\"-a\", \"--authKey\"):\n gDriveAuthKey = arg\n\nexcept ValueError as e:\n print(e)\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nimport socket\nimport threading\nfrom clienthandler import ClientHandler\n\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserversocket.bind((socket.gethostname(), 9999))\nserversocket.listen(maxClients)\nprint(\"SERVER STARTED\")\n\n\ntry:\n while True:\n clientSocket, address = serversocket.accept()\n\n print('\\nConnected to: ' + address[0] + ':' + str(address[1]))\n clh = ClientHandler(clientSocket, fps, treshhold, gDriveAuthKey)\n clh.start()\n\nexcept KeyboardInterrupt:\n serversocket.close()\n print(\"Closing\")","repo_name":"RoggemanBent/2MCT-S4-IndustryProject","sub_path":"Backend/server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35246291487","text":"\nclass Duenio():\n id=0\n\n def __init__(self,nombre):\n self.__nombre = nombre\n self.telefono = \"\"\n self.mail=\"\"\n self.__mascotas = []\n \n\n def AgregarMascota(self,mascota):\n self.__mascotas.append(mascota)\n\n def __str__(self):\n datos = \"\"\n datos += f\"Nombre: {self.__nombre} - ID: {self.id}\\n\"\n datos += \"Listado de mascotas: \\n\"\n for m in self.__mascotas:\n datos += str(m)\n datos += \"\\n\"\n\n return datos \n\n \n","repo_name":"plorenti/veterinaria","sub_path":"duenio.py","file_name":"duenio.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981085527","text":"# Implement pow(x, n).\n#\n#\n# Example 1:\n#\n# Input: 2.00000, 10\n# Output: 1024.00000\n# Example 2:\n#\n# Input: 2.10000, 3\n# Output: 9.26100\n\nclass Solution(object):\n def myPow(self, x, n):\n \"\"\"\n :type x: float\n :type n: int\n :rtype: float\n \"\"\"\n if not n:\n return 1\n elif n < 0:\n return 1 / self.myPow(x, -n)\n elif n % 2:\n return x * self.myPow(x, n - 1)\n else:\n return self.myPow(x * x, n / 2)\n\n\ns = Solution()\n# print(s.myPow(2.00000, 10))\n# print(s.myPow(2.10000, 3))\nprint(s.myPow(34.00515, -3))\nprint(pow(34.00515, -3))\n","repo_name":"yshshadow/Leetcode","sub_path":"1-50/50.py","file_name":"50.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27741457796","text":"__author__ = 'Rafał'\n\"\"\"\nMAC: 80:1f:02:a2:f2:43\nlogin: pi\nhaslo: UJlidar\n\"\"\"\n\nfrom PySide import QtCore\nimport sys\nimport communication\nimport robot\nimport signal\n\n\nclass RobotController:\n \"\"\"\n Main class for robot controller\n \"\"\"\n def __init__(self):\n # Time\n self.t = 0\n self.scan_time = 100 # ms\n\n # Robot\n self.robot = robot.Robot()\n\n # Communication\n self.communication = communication.Communication()\n self.communication.make_server()\n self.communication.new_data.connect(self.new_message)\n\n # run clock\n self.timer = QtCore.QTimer()\n QtCore.QObject.connect(self.timer, QtCore.SIGNAL('timeout()'), self.run)\n self.timer.start(self.scan_time)\n\n @QtCore.Slot(object)\n def run(self):\n pass\n\n @QtCore.Slot(object)\n def new_message(self, message):\n self.robot.move(message['left_motor']/255.0, message['right_motor']/255.0)\n\n def sigint_handler(self, *args):\n \"\"\"\n Stop thread before end\n \"\"\"\n self.communication.stop()\n QtCore.QCoreApplication.quit()\n\nif __name__ == \"__main__\":\n app = QtCore.QCoreApplication(sys.argv)\n robotController = RobotController()\n signal.signal(signal.SIGINT, robotController.sigint_handler) # allow ctrl+c\n app.exec_()\n robotController.communication.stop()\n sys.exit()","repo_name":"uj-robotics/lidar-wheel-controller","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14512651186","text":"'''\n*** pipeline 사용법 다시 익히기 ***\n\nPurpose: hyper parameter tunning and pipeline\n\n1. Check the data dimensions and characters\n1.1 Data loading\n1.2 Check data format through keys (almost dictionary form)\n1.3 Check dimension X and y\n \n2. Data preprocessing\n2.1 scaling: Scaling ensures equal contribution from each feature.\n\n3. Model \n\n4. Prediction and Evaluation\n\n5. Build a piple line to solve an issue\n\n''' \n\nimport numpy as np\nfrom sklearn.datasets import load_breast_cancer\n\n# 1. Check the data dimensions and characters\ncancer = load_breast_cancer()\n#print(cancer.DESCR)\n#print(type(cancer))\n#print(cancer.keys())\n # dict_keys(['data', 'target', 'frame', 'target_names', 'DESCR', \n # 'feature_names', 'filename', 'data_module'])\n\nX = cancer.data\ny = cancer.target\n\nprint(f'X.shape: {X.shape}, y.shape: {y.shape}')\n # Dataset = {(569,31)}, X in R^30, y in R^1, Dataset size#: 569\n\n# 1.2 Degree of data bias.\nprint(np.unique(y, return_counts = True))\n # (array([0, 1]), array([212, 357], dtype=int64))\n # -> mal: 37%, benign: 63%\n # -> when training data, you should keep the ratio by stratify\n\n# 2. Data preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 0, stratify = y)\n # Keep ratio by stratify = y\nprint(f'X_train : {X_train.shape}, y_train: {y_train.shape}, X_test: {X_test.shape}, y_test: {y_test.shape}')\nprint(f'y_train: {np.unique(y_train, return_counts = True)}, y_test: {np.unique(y_test, return_counts = True)}')\n\nscaler = MinMaxScaler().fit(X_train)\nX_train_scaled = scaler.transform(X_train)\nprint('X_train_scaled: {}'.format(X_train_scaled))\n \n# 2. model\nfrom sklearn.svm import SVC\n\nclf = SVC()\nclf.fit(X_train_scaled, y_train)\n\n# 3. Prediction and Evaluation \nfrom sklearn.metrics import accuracy_score, recall_score\n\nX_test_scaled = scaler.transform(X_test)\ny_pred = clf.predict(X_test_scaled)\n\nprint(\"score: {}\".format(clf.score(X_test_scaled, y_test)))\nprint(f'accuracy: {accuracy_score(y_test, y_pred)}, recall: {recall_score(y_test, y_pred)}')\n\n \n################# \n\n# 4. Hyper parameter tunnning <- GridSearch, RandomizedSearchCV, ...\n# 4.1 GridSearch\nfrom sklearn.model_selection import GridSearchCV\n\nparam_grid = {\n 'C':[0.001, 0.01, 0.1, 1, 10, 100, 1000],\n 'gamma':[0.001, 0.01, 0.1, 1, 10, 100, 1000],\n}\n\ngrid = GridSearchCV(SVC(), param_grid, cv = 5)\ngrid.fit(X_train_scaled, y_train)\n\nprint(\"best_estimator_: {}, best_score_: {}, best_params_: {}\".format(\n grid.best_estimator_, grid.best_score_, grid.best_params_))\nprint(\"best_estimator_.score: {}\".format(grid.best_estimator_.score(X_test, y_test)))\n\n#print(grid.cv_results_.keys())\n# I think it is better to know what types of data are in grid.cv\n# dict_keys(['mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time', 'param_C', \n# 'param_gamma', 'params', 'split0_test_score', 'split1_test_score', 'split2_test_score', \n# 'split3_test_score', 'split4_test_score', 'mean_test_score', 'std_test_score', 'rank_test_score'])\n\n'''\n** Issue ** \nWhen separating the train set into train and evaluation sets, \nthe evaluation set is already included in the initial scaling. \nAs a result, the effectiveness of the evaluation is reduced.\n\n'''\n# 5. Build a pipeline\n\nfrom sklearn.pipeline import make_pipeline, Pipeline\n\n# steps = [('name', objects)]\npipe = Pipeline([('scaler', MinMaxScaler()), ('clf', SVC())])\nprint(\"pipe:{}\".format(pipe))\n# Configuration: cross validation + hyper parameter tunning + n algorithms + n preprocessing\n# Finding a best option\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.ensemble import RandomForestClassifier\n\n# n alogrithms -> [{}1, {}2, ..., {}n]\nparam_grid = [\n {\n 'scaler': [StandardScaler(), MinMaxScaler()],\n 'clf': [SVC()],\n 'clf__C': [0.001, 0.01, 0.1, 1, 10, 100, 1000],\n 'clf__gamma': [0.001, 0.01, 0.1, 1, 10, 100, 1000]\n },\n {\n 'scaler': [None],\n 'clf': [RandomForestClassifier(n_estimators=90)],\n 'clf__max_features': [1, 2, 3]\n }\n]\n\nprint(\"param_grid:{}\".format(param_grid))\n\n# configuraiton of gridsearch\ngrid = GridSearchCV(pipe, param_grid, cv = 5)\n\ngrid.fit(X_train, y_train) # <-- you do 'not' adjust scaling.\n\nprint(f\"grid.best_estimator_: {grid.best_estimator_}, \"\n f\"grid.best_score_: {grid.best_score_}, \"\n f\"grid.best_params_: {grid.best_params_}\")\n\nprint(f\"grid.best_estimator_.score: {grid.best_estimator_.score(X_test, y_test)}\")","repo_name":"JJLee1215/Sesac_MachineLearning","sub_path":"TunningandPipeling.py","file_name":"TunningandPipeling.py","file_ext":"py","file_size_in_byte":4543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17310388244","text":"# Configuration settings\r\nconfig = {\r\n \"bucket_name\": \"10alytics-capstone-bucket\",\r\n \"folder_name\": \"orders_data\",\r\n \"database_url\": \"postgresql://c5gp015210:rYm1NiK0qt@34.89.230.185:5432/d2b_accessment\",\r\n \"schema\": \"c5gp015210_staging\",\r\n \"tables\": {\r\n \"orders\": {\r\n \"file_name\": \"orders.csv\",\r\n \"local_file_name\": \"orders_data.csv\",\r\n \"preprocess_function\": \"preprocess_orders_data\"\r\n }, \r\n \"reviews\": {\r\n \"file_name\": \"reviews.csv\",\r\n \"local_file_name\": \"reviews_data.csv\",\r\n \"preprocess_function\": None \r\n },\r\n \"shipments_deliveries\": {\r\n \"file_name\": \"shipments_deliveries.csv\",\r\n \"local_file_name\": \"shipments_deliveries_data.csv\",\r\n \"preprocess_function\": \"preprocess_shipments_deliveries_data\" \r\n }\r\n }\r\n}\r\n\r\n# function to return config variable\r\ndef get_config():\r\n return config\r\n","repo_name":"blueride/Norby-Inc.-Data-Integration-ELT-Pipeline","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5710922634","text":"import unittest\nimport dataset\nfrom dataset import load_dataset\nimport torch\n\n\nclass TestDatasetIdxSplit(unittest.TestCase):\n def runTest(self):\n d = dataset.Dataset(\"HELLO WORLD\")\n d.label = torch.arange(25)\n train_indices, valid_indices, test_indices = d.get_idx_split(0.5, 0.25)\n self.assertEqual(train_indices.shape[0], 12)\n\nclass TestDatasetGenerateArxiv(unittest.TestCase):\n def runTest(self):\n dataset = load_dataset(\"ogbn-arxiv\")\n train_idx, valid_idx, test_idx = dataset.get_idx_split()\n print(dataset.graph['edge_index'])\n \n for key, value in dataset.graph.items():\n print(key, value)\n print(\"_________\")\n\n self.assertEqual(train_idx.shape[0], 90941)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n","repo_name":"FelixHohne/6850_project","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22361350199","text":"#%% 10.5.8\ndef probPath(path, states, transition):\n index = dict()\n result = 1 / len(states)\n for i in range(len(states)):\n add = {states[i] : i}\n index.update(add)\n for i in range(len(path) - 1):\n state1 = path[i]\n state2 = path[i+1]\n prob = transition[index[state1]][index[state2]]\n print(state1, state2, prob)\n result *= prob\n return result\n\nwith open(\"hw7/dataset_925971_8.txt\") as f:\n path = f.readline().strip()\n f.readline()\n states = f.readline().strip().split(\" \")\n f.readline()\n transition = [[float(j) for j in i.strip().split(\"\\t\")[1:]] for i in f.readlines()[1:]]\n prob = probPath(path, states, transition)\n print(prob)\n \n\n#%% 10.5.10\ndef probEmissionsGivenPath(string, alphabet, path, states, emission):\n alphaIndex = dict()\n stateIndex = dict()\n result = 1\n for i in range(len(alphabet)):\n add = {alphabet[i] : i}\n alphaIndex.update(add)\n for i in range(len(states)):\n add = {states[i] : i}\n stateIndex.update(add)\n for i in range(len(string)):\n emit = string[i]\n state = path[i]\n prob = emission[stateIndex[state]][alphaIndex[emit]]\n print(emit, state, prob)\n result *= prob\n return(result)\n\n\nwith open(\"hw7/dataset_925971_10.txt\") as f:\n string = f.readline().strip()\n f.readline()\n alphabet = f.readline().strip().split(\" \")\n f.readline()\n path = f.readline().strip()\n f.readline()\n states = f.readline().strip().split(\" \")\n f.readline()\n emission = [[float(j) for j in i.strip().split(\"\\t\")[1:]] for i in f.readlines()[1:]]\n prob = probEmissionsGivenPath(string, alphabet, path, states, emission)\n print(prob)\n\n#%% 10.6.7\nimport numpy as np\n\ndef Weight(emit, statePrev, stateCur, \n alphaIndex, stateIndex, transition, emission, \n probInitialTransition = None):\n if statePrev != None:\n probTransition = transition[stateIndex[statePrev]][stateIndex[stateCur]]\n else:\n probTransition = probInitialTransition\n probEmission = emission[stateIndex[stateCur]][alphaIndex[emit]]\n result = probTransition * probEmission\n return result\n \ndef ViterbiDP(string, states, alphaIndex, stateIndex, transition, emission):\n # initializing\n s = np.zeros((len(states), len(string)), dtype=float)\n backtrack = np.zeros((len(states), len(string) + 1), dtype=str)\n for i in range(len(states)):\n initialWeight = Weight(string[0], None, states[i], \n alphaIndex, stateIndex, transition, emission,\n probInitialTransition = 1 / len(states))\n s[i][0] = initialWeight\n # dynamic programming and traversal\n for j in range(1, len(string)):\n for i in range(len(states)):\n emit = string[j]\n stateCur = states[i]\n weights = [Weight(emit, statePrev, stateCur, \n alphaIndex, stateIndex, transition, emission)\n for statePrev in states]\n allWeights = [weights[k] * s[k][j-1]\n for k in range(len(states))]\n s[i][j] = max(allWeights)\n statePrev = states[allWeights.index(s[i][j])]\n backtrack[i][j] = statePrev\n # adding final entry to backtrack\n lastWeights = [i[-1] for i in s]\n lastStateIndex = lastWeights.index(max(lastWeights))\n lastState = states[lastStateIndex]\n for i in range(len(states)):\n backtrack[i][-1] = lastState\n return backtrack\n\ndef ViterbiPath(backtrack, states, i, j):\n if j == 0:\n return(\"\")\n prevState = backtrack[i][j]\n prevIndex = states.index(prevState)\n return(ViterbiPath(backtrack, states, prevIndex, j - 1) + prevState)\n\ndef Viterbi(string, alphabet, states, transition, emission):\n alphaIndex = dict()\n stateIndex = dict()\n for i in range(len(alphabet)):\n add = {alphabet[i] : i}\n alphaIndex.update(add)\n for i in range(len(states)):\n add = {states[i] : i}\n stateIndex.update(add)\n backtrack = ViterbiDP(string, states, \n alphaIndex, stateIndex, transition, emission)\n path = ViterbiPath(backtrack, states, 0, len(string))\n return(path)\n\nwith open(\"hw7/dataset_925972_7.txt\") as f:\n string = f.readline().strip()\n f.readline()\n alphabet = f.readline().strip().split(\" \")\n f.readline()\n states = f.readline().strip().split(\" \")\n f.readline()\n f.readline()\n transition = []\n for i in range(4):\n transition.append([float(j) for j in f.readline().strip().split(\"\\t\")[1:]])\n f.readline()\n emission = [[float(j) for j in i.strip().split(\"\\t\")[1:]] for i in f.readlines()[1:]]\n path = Viterbi(string, alphabet, states, transition, emission)\n print(path)\n\n#%% 10.7.4\ndef ForwardDP(string, states, alphaIndex, stateIndex, transition, emission):\n # initializing\n s = np.zeros((len(states), len(string)), dtype=float)\n for i in range(len(states)):\n initialWeight = Weight(string[0], None, states[i], \n alphaIndex, stateIndex, transition, emission,\n probInitialTransition = 1 / len(states))\n s[i][0] = initialWeight\n # dynamic programming and traversal\n for j in range(1, len(string)):\n for i in range(len(states)):\n emit = string[j]\n stateCur = states[i]\n weights = [Weight(emit, statePrev, stateCur, \n alphaIndex, stateIndex, transition, emission)\n for statePrev in states]\n allWeights = [weights[k] * s[k][j-1]\n for k in range(len(states))]\n s[i][j] = sum(allWeights)\n # returning the forward result\n lastWeights = [i[-1] for i in s]\n return sum(lastWeights)\n\ndef Forward(string, alphabet, states, transition, emission):\n alphaIndex = dict()\n stateIndex = dict()\n for i in range(len(alphabet)):\n add = {alphabet[i] : i}\n alphaIndex.update(add)\n for i in range(len(states)):\n add = {states[i] : i}\n stateIndex.update(add)\n result = ForwardDP(string, states, \n alphaIndex, stateIndex, transition, emission)\n return result\n\nwith open(\"hw7/dataset_925973_4.txt\") as f:\n string = f.readline().strip()\n f.readline()\n alphabet = f.readline().strip().split(\" \")\n f.readline()\n states = f.readline().strip().split(\" \")\n f.readline()\n f.readline()\n transition = []\n for i in range(4):\n transition.append([float(j) for j in f.readline().strip().split(\"\\t\")[1:]])\n f.readline()\n emission = [[float(j) for j in i.strip().split(\"\\t\")[1:]] for i in f.readlines()[1:]]\n result = Forward(string, alphabet, states, transition, emission)\n print(result)\n\n#%% 10.11.4\ndef SupervisedHMMLearning(string, alphabet, path, states):\n # initialize matrices\n alphaIndex = dict()\n stateIndex = dict()\n for i in range(len(alphabet)):\n add = {alphabet[i] : i}\n alphaIndex.update(add)\n for i in range(len(states)):\n add = {states[i] : i}\n stateIndex.update(add)\n transition = np.zeros((len(states), len(states)), dtype=float)\n emission = np.zeros((len(states), len(alphabet)), dtype=float)\n # fill in emission matrix\n nStates = []\n for state in states:\n filteredString = []\n for i in range(len(path)):\n s = path[i]\n x = string[i]\n if s == state:\n filteredString.append(x)\n pseudo = 0\n if len(filteredString) == 0:\n pseudo = 1\n nState = [s for s in path].count(state) + pseudo * len(alphabet)\n nStates.append(nState)\n for emit in alphabet:\n nEmit = filteredString.count(emit) + pseudo\n emission[stateIndex[state]][alphaIndex[emit]] = nEmit / nState \n # fill in transition matrix\n for i in range(len(path) - 1):\n state1 = path[i]\n state2 = path[i + 1]\n transition[stateIndex[state1]][stateIndex[state2]] += 1\n for i in range(len(transition)):\n row = transition[i]\n if sum(row) == 0:\n row = [r + 1 for r in row]\n denom = sum(row)\n transition[i] = [r / denom for r in row]\n return transition, emission\n\ndef Display(result):\n transition = result[0]\n emission = result[1]\n for s in states:\n print(\"\\t\", end = s)\n print()\n for i in range(len(states)):\n s = states[i]\n print(s, end = \"\")\n for j in transition[i]:\n print(\"\\t\", end = str(round(j, 3)))\n print()\n print(\"--------\")\n for x in alphabet:\n print(\"\\t\", end = x)\n print()\n for i in range(len(states)):\n s = states[i]\n print(s, end = \"\")\n for j in emission[i]:\n print(\"\\t\", end = str(round(j, 3)))\n print()\n\nwith open(\"hw7/dataset_925977_4.txt\") as f:\n string = f.readline().strip()\n f.readline()\n alphabet = f.readline().strip().split(\" \")\n f.readline()\n path = f.readline().strip()\n f.readline()\n states = f.readline().strip().split(\" \")\n result = SupervisedHMMLearning(string, alphabet, path, states)\n Display(result)\n\n#%% 10.12.5\ndef ForwardMat(string, states, alphaIndex, stateIndex, transition, emission):\n # initializing\n s = np.zeros((len(states), len(string)), dtype=float)\n for i in range(len(states)):\n initialWeight = Weight(string[0], None, states[i], \n alphaIndex, stateIndex, transition, emission,\n probInitialTransition = 1 / len(states))\n s[i][0] = initialWeight\n # dynamic programming and traversal\n for j in range(1, len(string)):\n for i in range(len(states)):\n emit = string[j]\n stateCur = states[i]\n weights = [Weight(emit, statePrev, stateCur, \n alphaIndex, stateIndex, transition, emission)\n for statePrev in states]\n allWeights = [weights[k] * s[k][j-1]\n for k in range(len(states))]\n s[i][j] = sum(allWeights)\n return s\n\ndef BackwardMat(string, states, alphaIndex, stateIndex, transition, emission):\n # initializing\n s = np.zeros((len(states), len(string)), dtype=float)\n for i in range(len(states)):\n initialWeight = 1\n s[i][-1] = initialWeight\n # dynamic programming and traversal\n for j in reversed(range(len(string) - 1)):\n for i in range(len(states)):\n emit = string[j + 1]\n stateCur = states[i]\n weights = [Weight(emit, stateCur, statePrev, \n alphaIndex, stateIndex, transition, emission)\n for statePrev in states]\n allWeights = [weights[k] * s[k][j+1]\n for k in range(len(states))]\n s[i][j] = sum(allWeights)\n return s\n\ndef ForwardBackward(string, alphabet, states, transition, emission):\n alphaIndex = dict()\n stateIndex = dict()\n for j in range(len(alphabet)):\n add = {alphabet[j] : j}\n alphaIndex.update(add)\n for j in range(len(states)):\n add = {states[j] : j}\n stateIndex.update(add)\n result = np.zeros((len(states), len(string)), dtype=float)\n forwardMat = ForwardMat(string, states, alphaIndex, stateIndex, transition, emission)\n backwardMat = BackwardMat(string, states, alphaIndex, stateIndex, transition, emission)\n forwardSink = sum([i[-1] for i in forwardMat])\n result = np.multiply(forwardMat, backwardMat) / forwardSink \n return(result) \n\nwith open(\"hw7/dataset_925978_5.txt\") as f:\n string = f.readline().strip()\n f.readline()\n alphabet = f.readline().strip().split(\" \")\n f.readline()\n states = f.readline().strip().split(\" \")\n f.readline()\n f.readline()\n transition = []\n for i in range(len(states)):\n transition.append([float(j) for j in f.readline().strip().split(\"\\t\")[1:]])\n f.readline()\n emission = [[float(j) for j in i.strip().split(\"\\t\")[1:]] for i in f.readlines()[1:]]\n\n SoftDecoding = ForwardBackward(string, alphabet, states, transition, emission)\n result = np.transpose(SoftDecoding)\n print(\"\\t\".join(states))\n test = [\"\\t\".join([str(round(c, 3)) for c in r]) for r in result]\n for r in test:\n print(r)\n\n#%% 10.13.5\ndef BaumWelch(j, string, alphabet, states, transition, emission):\n # initializing\n alphaIndex = dict()\n stateIndex = dict()\n for i in range(len(alphabet)):\n add = {alphabet[i] : i}\n alphaIndex.update(add)\n for i in range(len(states)):\n add = {states[i] : i}\n stateIndex.update(add)\n M = [transition, emission]\n # iterating\n for iteration in range(j):\n # calculating forward and backward variables\n forwardMat = ForwardMat(string, states, alphaIndex, stateIndex, M[0], M[1])\n backwardMat = BackwardMat(string, states, alphaIndex, stateIndex, M[0], M[1])\n forwardSink = sum([i[-1] for i in forwardMat])\n # estimating parameters\n PiStar = np.multiply(forwardMat, backwardMat) / forwardSink\n PiStarStar = np.zeros((len(states)**2, len(string) - 1), dtype=float)\n for l in range(len(string) - 1):\n r = 0\n for state1 in states:\n for state2 in states:\n forward = forwardMat[stateIndex[state1]][l]\n weight = Weight(string[l], state1, state2, \n alphaIndex, stateIndex, M[0], M[1])\n backward = backwardMat[stateIndex[state2]][l + 1]\n PiStarStar[r][l] = (forward * weight * backward) / forwardSink\n r += 1\n # updating transition\n r = 0\n for state1 in states:\n for state2 in states:\n newProb = sum(PiStarStar[r])\n M[0][stateIndex[state1]][stateIndex[state2]] = newProb\n r += 1\n M[0] = M[0] / np.sum(M[0])\n # updating emission\n sums = dict()\n for s in states:\n innersum = dict()\n for x in alphabet:\n innersum.update({x : 0})\n sums.update({s : innersum})\n for k in range(len(states)):\n for m in range(len(string)):\n emit = string[m]\n sums[states[k]][emit] += PiStar[k][m]\n for s in states:\n for x in alphabet:\n M[1][stateIndex[s]][alphaIndex[x]] = sums[s][x]\n M[1] = M[1] / np.sum(M[1])\n return M\n\nwith open(\"hw7/dataset_925979_5.txt\") as f:\n j = int(f.readline().strip())\n f.readline()\n string = f.readline().strip()\n f.readline()\n alphabet = f.readline().strip().split(\"\\t\")\n f.readline()\n states = f.readline().strip().split(\"\\t\")\n f.readline()\n f.readline()\n transition = []\n for i in range(len(states)):\n transition.append([float(j) for j in f.readline().strip().split(\"\\t\")[1:]])\n f.readline()\n emission = [[float(j) for j in i.strip().split(\"\\t\")[1:]] for i in f.readlines()[1:]]\n \n j = 10\n string = \"xzyyzyzyxy\"\n alphabet = [\"x\",\t\"y\",\t\"z\"]\n states = [\"A\",\t\"B\"]\n transition = [[0.019,\t0.981], \n [0.668,\t0.332]]\n emission = [[0.175,\t0.003,\t0.821],\n [0.196,\t0.512,\t0.293]]\n \n result = BaumWelch(j, string, alphabet, states, transition, emission)\n Display(result)\n \n#%%\nfrom hmmlearn import hmm\nimport numpy as np\n\n# Initialize the HMM model\nmodel = hmm.MultinomialHMM(n_components=3, n_iter=100)\n\n# Training data (observed sequence)\nX = np.array([[0, 1, 0, 2]])\n\n# Fit the model to the training data using the Baum-Welch algorithm\nmodel.fit(X)\n\n# Get the estimated model parameters\nprint(\"Estimated initial state probabilities:\", model.startprob_)\nprint(\"Estimated transition matrix:\", model.transmat_)\nprint(\"Estimated emission matrix:\", model.emissionprob_)\n","repo_name":"zaid0bustami/CM122","sub_path":"hw7/hw7.py","file_name":"hw7.py","file_ext":"py","file_size_in_byte":16040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27616052756","text":"# 56. Merge Intervals\n# Medium\n# Array, Sorting\n# https://leetcode.com/problems/merge-intervals\n#\n# Return an array of the non-overlapping intervals after merging overlapping intervals.\n# def merge(self, intervals: List[List[int]]) -> List[List[int]]:\n# Input: intervals = [[1,3], [2,6], [8,10], [15,18]]\n# Output: [[1,6], [8,10], [15,18]]\n\nfrom operator import itemgetter\n\nclass Solution:\n # Stack + Sorting | Time: O(nlogn) | Space: O(n)\n def merge(self, intervals: list[list[int]]) -> list[list[int]]:\n get_start, get_end = itemgetter(0), itemgetter(1)\n intervals.sort(key=get_start)\n result = []\n\n for interval in intervals:\n if result and get_start(interval) <= get_end(result[-1]):\n # Set top of stack's end to longer interval end.\n result[-1][1] = max(get_end(interval), get_end(result[-1]))\n else:\n result.append(interval)\n\n return result\n","repo_name":"daviscvance/Practice","sub_path":"Leetcode/Python/intervals/medium/56-merge-intervals.py","file_name":"56-merge-intervals.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18466154941","text":"import tweepy\nimport keys\nimport schedule\nimport time\n\n# Download the libraries first\n#beware, twitter's api thing is a whole mess. You will most likely to have subscribe to one of \n# their tiers to actually get this to work as you will most likely encounter a Forbidden error\n\n\ndef create_api():\n auth = tweepy.OAuthHandler(\"API_KEY\", \n \"API_SECRET\")\n auth.set_access_token(\"ACCESS_TOKEN\",\n \"ACCESS_TOKEN_SECRET\")\n\n return tweepy.API(auth)\n\n\ndef tweet(api: tweepy.API, message: str, image_path=None):\n if image_path:\n media = api.media_upload(image_path)\n api.update_status(message, media_ids=[media.media_id])\n else:\n api.update_status(message)\n\n print('Tweeted!')\n\n\nif __name__ == '__main__':\n api = create_api()\n tweet(api, 'This is not a donut', 'cat.png')\n\nschedule.every().hour.do(tweet)\nschedule.every().hour.do(create_api)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n#this will allow you to tweet with your bot every hour. Haven't really tested it","repo_name":"donutdellsprinkles/DonutBot-twitter-","sub_path":"tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20311957565","text":"# -*- coding: utf-8 -*-\nimport os\nimport pickle\nimport numpy as np\nimport json\n\ndata_dir = 'data'\n\norigin_path = os.path.join(data_dir, 'paragraphs_v1.json')\nimg2sents_path = os.path.join(data_dir, 'img2sents.pkl')\nimg2dense_path = os.path.join(data_dir, 'img2dense.json')\nimg2onehot_path = os.path.join(data_dir, 'img2onehot.pkl')\n# img2densevec_path = os.path.join(data_dir, 'img2dense_vec.pkl')\nvocab_path = os.path.join(data_dir, 'vocab.pkl')\n\ndef get_img2sents(update_flag=False):\n if os.path.exists(img2sents_path) and not update_flag:\n return\n \n origin_data = json.load(open(origin_path, 'r'))\n img2paragraph = {}\n \n for each_data in origin_data:\n image_id = each_data['image_id']\n paragraph = each_data['paragraph']\n paragraph = paragraph.replace('t.v.', 'tv').replace('U.S.', 'US').replace('T.C.', 'TC').replace(\n 'C.E.T.', 'CET')\n paragraph = paragraph.replace(' st.', ' st').replace(' ST.', ' ST').replace(' Mt. ', ' Mt ').replace(\n ' St.', ' St').replace(' Dept. ', ' Dept ')\n paragraph = paragraph.replace(' S. ', ' st ').replace('welcomebackveterans.org.',\n 'welcomebackveterans')\n paragraph = paragraph.replace('$1.00', '$1').replace('3.20', '320').replace('us.open.org',\n 'usopenorg')\n paragraph = paragraph.replace('evil. ECC. IV.23', 'evilECCIV23').replace('neweracap.com',\n 'neweracapcom')\n paragraph = paragraph.replace(' Baby toys. And boxes.', ' ').replace('$1.25', '$1').replace(\n '.UMBRELLA.', 'UMBRELLA.')\n paragraph = paragraph.replace('www.kiwirail.co.nz', 'wwwkiwirailconz').replace('28.41',\n '2841').replace(\n 'http://www.tmz.com/', '')\n paragraph = paragraph.replace(' Handle. ', ' Handle ').replace(' oz. ', ' oz ')\n paragraph = paragraph.replace('CapeTreasures.com', 'CapeTreasurescom').replace('NW Meadow... DR.',\n 'DR')\n paragraph = paragraph.replace('$.20', '$1').replace('www.theimpusilvebuy.com',\n 'wwwtheimpusilvebuycom')\n paragraph = paragraph.replace('transavia.com', 'transaviacom').replace('XL.com', 'XLcom')\n\n paragraph.replace(' .', '.')\n paragraph.replace('. ', '.')\n sentences = paragraph.split('.')\n sentences = map(lambda sent: sent.strip(), sentences)\n sentences = filter(lambda sent: len(sent) >= 2, sentences)\n # if sentences!=[]:\n # print sentences\n # if '.cn' in paragraph:\n # print paragraph\n\n img2paragraph[image_id] = sentences\n\n pickle.dump(img2paragraph, open(img2sents_path, 'wb'))\n\n\ndef get_vocab(word_count_threshold=5, update_flag=False):\n if os.path.exists(vocab_path) and not update_flag:\n return\n\n img2para = pickle.load(open(img2sents_path, 'rb'))\n all_sents = []\n for key, para in img2para.items():\n for each_sent in para:\n each_sent = each_sent.replace(',', ' , ')\n all_sents.append(each_sent)\n\n print('preprocessing word counts and creating vocab based on word count threshold %d' % (word_count_threshold,))\n\n word_counts = {}\n nsents = 0\n\n for sent in all_sents:\n nsents += 1\n tmp_sent = sent.lower().split(' ')\n\n for w in tmp_sent:\n if w != '' and w != ' ':\n word_counts[w] = word_counts.get(w, 0) + 1\n\n vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]\n print('filtered words from %d to %d' % (len(word_counts), len(vocab)))\n\n idx2word = {}\n idx2word[0] = ''\n idx2word[1] = ''\n idx2word[2] = ''\n idx2word[3] = ''\n\n word2idx = {}\n word2idx[''] = 0\n word2idx[''] = 1\n word2idx[''] = 2\n word2idx[''] = 3\n\n for idx, w in enumerate(vocab):\n word2idx[w] = idx + 4\n idx2word[idx + 4] = w\n\n word_counts[''] = nsents\n word_counts[''] = nsents\n word_counts[''] = nsents\n word_counts[''] = nsents\n\n # bias_init_vector = np.array([1.0 * word_counts[ ixtoword[i] ] for i in ixtoword])\n # bias_init_vector /= np.sum(bias_init_vector) # normalize to frequencies\n # bias_init_vector = np.log(bias_init_vector)\n # bias_init_vector -= np.max(bias_init_vector) # shift to nice numeric range\n\n pickle.dump([word2idx, idx2word], open(vocab_path, 'wb'))\n\n\ndef get_one_hot(s_max, n_max, update_flag):\n if os.path.exists(img2onehot_path) or not update_flag:\n return\n\n word2idx = pickle.load(open(vocab_path, 'rb'))[0]\n img2para = pickle.load(open(img2sents_path, 'rb'))\n img2para_vec = {}\n\n for img, para in img2para.items():\n\n num_sents = min(len(para), s_max)\n sent_stop = np.zeros(s_max, dtype=int)\n sent_stop[num_sents - 1:] = 1\n paras_idx = np.ones([s_max, n_max + 1], dtype=int) * 2\n\n for sent_id, sent in enumerate(para):\n if sent_id == num_sents:\n break\n sent = sent.replace(',', ' , ')\n\n sent = ' ' + sent + ' '\n word_count = 0\n tmp_sent = sent.lower().split(' ')\n tmp_sent = filter(lambda x: x != '' and x != ' ', tmp_sent)\n\n for word_id, word in enumerate(tmp_sent):\n if word_id == n_max + 1:\n break\n\n word_count += 1\n if word in word2idx:\n paras_idx[sent_id, word_id] = word2idx[word]\n else:\n paras_idx[sent_id, word_id] = word2idx['']\n\n img2para_vec[str(img)] = [paras_idx, sent_stop]\n\n pickle.dump(img2para_vec, open(img2onehot_path, 'wb'))\n\n# def getDenseVec():\n# img2dense = json.load(open(img2dense_path, 'r'))\n# img2dense_vec = {}\n# for img, captions in img2dense.items():\n# dense_vec = np.ones((50, 6), dtype=int) * 2\n# for i, caption in enumerate(captions):\n# if i >= 50:\n# break\n# words = caption.split(' ')\n# for j, word in enumerate(words):\n# if j >= 6:\n# break\n# if word in word2idx:\n# dense_vec[i, j] = word2idx[word]\n# else:\n# dense_vec[i, j] = word2idx['']\n# img2dense_vec[img] = dense_vec\n# with open(img2densevec_path, 'wb') as f:\n# pickle.dump(img2dense_vec, f)\n\ndef run(update_flag):\n get_img2sents(update_flag)\n get_vocab(update_flag=update_flag)\n get_one_hot(6, 30, update_flag=update_flag)\n # getDenseVec()\n\n print('Data preprocess done')\n\nif __name__ == '__main__':\n run(True)","repo_name":"bupt-mmai/CNN-Caption","sub_path":"data_prepare.py","file_name":"data_prepare.py","file_ext":"py","file_size_in_byte":7109,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"71206338753","text":"user_input = int(input(\"Enter a number\"))\n\nadded_number = 0\nfor i in range(user_input, 0, -1):\n if user_input % i == 0:\n added_number += 1\nif added_number == 2:\n print(\"Number is prime\")\nelse:\n print(\"Number is not prime\")\n\n","repo_name":"deezah12/python","sub_path":"pythonProject/work/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1799719252","text":"# 1) Betrachte zunächst nur den Programm-Code. Was macht das Programm? Überprüfe deine Vermutung.\n# 2) Verändere das Programm so, dass die Radien der Kreise zunehmen, die Kreise sich aber weiter nicht berühren.\n\nfrom gpanel import * \n\nmakeGPanel(0, 20, 0, 20)\n\nbgColor(\"gray\")\nsetColor(\"red\")\ni = 1\nx = 1\ny = 1\nfor i in range(1, 22, 2):\n fillCircle(y, y, x)\n x = x + 0.2\n y = y + x + 1\n ","repo_name":"ChickenWizzard1/imp","sub_path":"05_for_schleife.py","file_name":"05_for_schleife.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73789465795","text":"# 3.1 Neville's Iterated Interpolation\n# To evaluate the interpolating polynomial P on the n+1 distinct numbers\n# x0,..., xn at the number x for the function f:\n# Input: numbers x, x0, x1,..., xn; values f(x0), f(x1),..., f(xn)\n# as the first column Q[0,0], Q[1,0],..., Q[n,0] of Q.\n# Output: the table Q with P(x) = Q[n,n].\n\n# function for value input\ndef inp(OK, n, x, xvals, Q):\n # Input n\n OK = False\n while not OK:\n n = int(input(\"Please enter the number of points to be input (n): \"))\n if n > 0:\n OK = True\n else:\n print(\"Please enter a positive value for n.\")\n \n # Populate Q as empty nested list\n for i in range(n):\n Q.append([None for j in range(n)])\n\n # Assign x, xi for 1,...,n and Q[i,0] for 1,....,n\n OK = False\n while not OK:\n x = float(input(\"Please enter the value to be evaluated using Neville's method (x): \"))\n print(\"Please enter the values for the points x0, x1,..., xn: \")\n for i in range(n):\n xvals.append(float(input(f\"x{i}: \")))\n print(\"Please enter the function values f(xi) at the points x0, x1,..., xn: \")\n for j in range(n):\n Q[j][0] = float(input(f\"f(x{j}): \"))\n OK = True\n\n return OK, n, x, xvals, Q\n\n# function for table output\ndef output(n, x, xvals, Q, prec1, prec2):\n print('-' * (n + 1) * 12)\n print(f\"i\\t\", f\"xi\\t\", f\"x-xi\\t\", \"\".join([f\"Qi{i}\\t\" for i in range(n)]))\n print('-' * (n + 1) * 12)\n for j in range(n):\n print(f\"{j}\\t\", f\"{xvals[j]}\\t\", \"{:.{}f}\\t\".format(x - xvals[j], prec1), \"\".join([\"{:.{}f}\\t\".format(Q[j][i], prec2) for i in range(j+1)]))\n print('-' * (n + 1) * 12)\n\n\ndef main():\n # Assign initial variables\n OK = False\n n = 0\n x = 0\n xvals = []\n Q = []\n\n # Print introduction and input values\n print(\"This is Neville's Iterated Interpolation method.\")\n OK, n, x, xvals, Q = inp(OK, n, x, xvals, Q)\n\n if OK:\n # STEP 1: Set Q[i,j] for each value\n for i in range(1, n):\n for j in range(1, i+1):\n Q[i][j] = (((x - xvals[i-j]) * Q[i][j-1]) - ((x - xvals[i]) * Q[i-1][j-1])) / (xvals[i] - xvals[i-j])\n\n # STEP 2: Output values in table format\n output(n, x, xvals, Q, 1, 4)\n\n\nmain()","repo_name":"GrahamStrickland/burden_numerical_analysis","sub_path":"ch03/neville_iterated_interpolation.py","file_name":"neville_iterated_interpolation.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23579406471","text":"x='/storage/emulated/0/qpython/projects3/cj17r1b/'\nwith open(x+'a2.in','r') as f:\n with open(x+'a2.out','w') as o:\n T=int(f.readline().strip())\n for case in range(1,T+1):\n t = f.readline().strip().split(' ')\n D = int(t[0])\n N = int(t[1])\n horses = []\n for n in range(N):\n t = f.readline().strip().split(' ')\n horses.append([int(t[0]),int(t[1])])\n times = []\n for h in horses:\n times.append((D-h[0])/h[1])\n mytime = max(times)\n myspeed = D/mytime\n o.write('Case #{}: {}\\n'.format(case,myspeed))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/1541.py","file_name":"1541.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30159426109","text":"import cv2\r\nimport numpy as np\r\nimport time\r\nimport os\r\nimport sys\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\n\r\ndef preprocess(image):\r\n image_256x256 = cv2.resize(image,(128,128))\r\n colors = getcolors(image_256x256)\r\n #print colors\r\n image_gray = cv2.cvtColor(image_256x256,cv2.COLOR_BGR2GRAY)\r\n image_cleanup = cv2.medianBlur(image_gray,3)\r\n edges = cv2.Canny(image_cleanup,50,180)\r\n return edges, colors\r\n\r\n#code courtesy: http://www.alanzucconi.com/2015/05/24/how-to-find-the-main-colours-in-an-image/\r\ndef getcolors(image):\r\n image_array = image.reshape((image.shape[0] * image.shape[1], 3))\r\n clusters = KMeans(n_clusters = 8)\r\n clusters.fit(image_array)\r\n \r\n hist = centroid_histogram(clusters)\r\n \r\n # Sort the clusters according to how many pixel they have\r\n zipped = zip (hist, clusters.cluster_centers_)\r\n zipped.sort(reverse=True, key=lambda x : x[0])\r\n hist, clusters.cluster_centers = zip(*zipped)\r\n\r\n return hist\r\n \r\ndef centroid_histogram(clt):\r\n numLabels = np.arange(0, len(np.unique(clt.labels_)) + 1)\r\n (hist, _) = np.histogram(clt.labels_, bins = numLabels)\r\n hist = hist.astype(\"float\")\r\n hist /= hist.sum()\r\n return hist\r\n\r\ndef main():\r\n None\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"sanamshakya/workshop_on_RPi_image_processing_2017","sub_path":"image_processing/Image_Description_Recog/imagepreprocess.py","file_name":"imagepreprocess.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74626099074","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pyngw\", # Replace with your own username\n version=\"1.0.0\",\n author=\"Artem Svetlov\",\n author_email=\"artem.svetlov@nextgis.com\",\n description=\"Python wraper for NextGIS Web REST API\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/nextgis/pyngw\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'tuspy',\n ],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.0',\n)\n","repo_name":"nextgis/pyngw","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74522628034","text":"import math\nimport time\nimport cv2\nimport mediapipe as mp\n\n\nclass poseDetector():\n def __init__(self, mode=False, upBody=False,\n smooth=True, detectionCon=0.5, trackCon=0.5):\n self.mode = mode\n self.upBody = upBody\n self.smooth = smooth\n self.detectionCon = detectionCon\n self.trackCon = trackCon\n\n self.mpPose = mp.solutions.pose\n self.mpDraw = mp.solutions.drawing_utils\n self.pose = self.mpPose.Pose(self.mode, self.upBody,\n self.smooth, self.detectionCon, self.trackCon)\n\n def findPose(self, img, draw=True):\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n self.results = self.pose.process(imgRGB)\n # print(results.pose_landmarks)\n\n if self.results.pose_landmarks:\n if draw:\n self.mpDraw.draw_landmarks(img, self.results.pose_landmarks,\n self.mpPose.POSE_CONNECTIONS)\n return img\n\n def findPosition(self, img, draw=True):\n # je cree une liste pour stocker les lm\n self.lmList = []\n for id, lm in enumerate(self.results.pose_landmarks.landmark):\n h, w, c = img.shape\n # print(id, lm)\n # j'ai cree les deux variable x et y pour avoir un int des points , car les landmarks retourné des decimaux\n cx, cy = int(lm.x * w), int(lm.y * h)\n self.lmList.append([id, cx, cy])\n if draw:\n cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)\n return self.lmList\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n\n # _, x1, y1 = self.lmList[p1] # je peu faire comme ca pour ignorer aussi le premier avec son index 0\n # Get the landmarks\n x1, y1 = self.lmList[p1][1:]\n x2, y2 = self.lmList[p2][1:]\n x3, y3 = self.lmList[p3][1:]\n\n # Calculate the Angle\n angle = math.degrees(math.atan2(y3 - y2, x2 - x3) -\n math.atan2(y1 - y2, x1 - x2))\n print(angle)\n\n # Draw\n if draw:\n\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 3)\n cv2.line(img, (x2, y2), (x3, y3), (255, 255, 255), 3)\n\n cv2.circle(img, (x1, y1), 10, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 15, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 10, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 15, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 10, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 15, (0, 0, 255), 2)\n\n\n\ndef main():\n cap = cv2.VideoCapture(0)\n pTime = 0\n detector = poseDetector()\n while True:\n success, img = cap.read()\n detector.findPose(img)\n lmList = detector.findPosition(img, draw=False)\n if len(lmList[14]) != 0:\n print(lmList[14])\n cv2.circle(img, (lmList[14][1], lmList[14][2]), 15, (0, 0, 255), cv2.FILLED)\n\n cTime = time.time()\n fps = 1 / (cTime - pTime)\n pTime = cTime\n\n cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)\n cv2.imshow(\"Hichem\", img)\n\n key = cv2.waitKey(1) & 0xFF\n if key == ord('q'):\n break\n\n cv2.destroyAllWindows()\n cap.release()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hichemseriket/HandTracking","sub_path":"Pose/PoseModule.py","file_name":"PoseModule.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44270913251","text":"import gym\n\n\ndef register(id, entry_point, force=True):\n env_specs = gym.envs.registry.env_specs\n if id in env_specs.keys():\n if not force:\n return\n del env_specs[id]\n gym.register(\n id=id,\n entry_point=entry_point,\n )\n\n\nenvironments = [\n ['multi_armed_bandit', 'MultiArmedBandit', 'v0'],\n ['jacks_car_rental', 'JacksCarRental', 'v1']\n]\n\nfor file, name, version in environments:\n register(\n id=f'{name}-{version}',\n entry_point=f'doctrina.envs.{file}:{name}Env',\n )\n","repo_name":"rhalbersma/doctrina","sub_path":"src/doctrina/envs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"72660245315","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\ndf = pd.read_csv(\"jpy24h.csv\")\ndf = df.reset_index(drop=True)\ndfd = df.iloc[:1500]\n\n# macd 相關state\ndfd['dif'] = (dfd['Close'].ewm(span = 12).mean()-dfd['Close'].ewm(span = 26).mean()) #訊號線\ndfd['dem'] = dfd['dif'].ewm(span = 9).mean() #macd\ndfd['osc'] = dfd['dif'] - dfd['dem']\ndfd['osc_dummy'] = dfd['osc'].apply(lambda x:0 if x>=0 else 1) # osc state -> neg =1,pos = 0\ndfd['dif_dummy'] = dfd['dif'].apply(lambda x:0 if x>=0 else 1) # 訊號線 state -> neg =1,pos = 0\ndfd['dem_dummy'] = dfd['dem'].apply(lambda x:0 if x>=0 else 1) # macd state -> neg =1,pos = 0\n\n#定義交叉與糾結\ndfd['cross'] = 0\ndfd['cross'][dfd['osc_dummy'] != dfd['osc_dummy'].shift()] = 1\ndfd['cross_dummy'] = dfd['cross']*np.sign(dfd['osc'])+0 # 黃金交叉 = 1 同邊 = 0\ndfd['cross_dummy'] = dfd['cross_dummy'].astype(int)\ndfd['cross_dummy'] = dfd['cross_dummy'].replace(-1,2) # 死亡交叉 = 2\n\ndfd['rol_cross'] = dfd['cross'].rolling(6,min_periods = 1).sum() #糾結(前6跟bar內出現兩次以上交叉)\ndfd['rol_cross'] = dfd['rol_cross'].apply(lambda x: 1 if x > 1 else 0) #糾結 = 1 , 其他 = 0 \n\ndef rsi_d(x):\n if 0 <= x <= 30:\n return 0\n if 30 < x <= 50:\n return 1\n if 50 < x <= 70:\n return 2\n if 70 < x <= 100:\n return 3\n \ndfd['RSI_dummy'] = dfd['RSI'].apply(rsi_d)\n\n# 定義背離\ndfd['bali'] = 0\n\ntemp = dfd[dfd['cross_dummy'] == 2].index\nfor idx in temp:\n try:\n idx_next = temp[idx+1]\n if dfd.loc[idx,'dif'] > dfd.loc[idx_next,'dif']: # 當兩次死叉出現時 後面的訊號線位置比前面低 且後面的價格比前面高\n if dfd.loc[idx,'Close'] <= dfd.loc[idx_next,'Close']:\n dfd.loc[idx_next:idx_next+3,'bali'] = 1 # 頂背離 = 1\n except:\n continue\n\ntemp = dfd[dfd['cross_dummy'] == 1].index\nfor idx in temp:\n try:\n idx_next = temp[idx+1]\n if dfd.loc[idx,'dif'] < dfd.loc[idx_next,'dif']: # 當兩次金叉出現時 後面的訊號線位置比前面高 且後面的價格比前面低\n if dfd.loc[idx,'Close'] >= dfd.loc[idx_next,'Close']:\n dfd.loc[idx_next:idx_next+3,'bali'] = 1 # 底背離 = 1\n except:\n continue \n\n# 背離二版 (正在寫)\n''' \ndfd['osc_h/l'] = 0\ndfd['cluster_h/l'] = 0\n\nfor idx in range(3,len(dfd)):\n if abs(dfd.loc[idx,'osc']) < abs(dfd.loc[idx-1,'osc']):\n if abs(dfd.loc[idx,'osc']) < abs(dfd.loc[idx-2,'osc']):\n if abs(dfd.loc[idx-2,'osc']) > abs(dfd.loc[idx-3,'osc']):\n if (dfd.loc[idx,'osc_dummy'] == dfd.loc[idx-1,'osc_dummy']) &(dfd.loc[idx,'osc_dummy'] == dfd.loc[idx-2,'osc_dummy']) &((dfd.loc[idx-2,'osc_dummy'] == dfd.loc[idx-3,'osc_dummy'])):\n dfd.loc[idx-2,'cluster_h/l_p'] =1\n dfd.loc[idx-2,'osc_h/l'] = dfd.loc[idx-2,'osc']\n'''\n\nstate_list = ['RSI_dummy','osc_dummy','dif_dummy','dem_dummy','cross_dummy'] #'bali','rol_cross'\n#糾結與背離目前太不平均 導致許多state幾乎不會出現\n\ndfd.to_csv('macd_st.csv',index=None,encoding = 'utf-8')\n\n# plot macd\nfig, ax = plt.subplots(nrows=4, ncols=1)\nplt.subplot(4, 1, 1)\nplt.plot(dfd['Close'])\nplt.subplot(4, 1, 2)\nplt.plot(dfd['RSI'])\nplt.subplot(4, 1, 3)\nplt.plot(dfd['dif'])\nplt.plot(dfd['dem'])\nplt.subplot(4, 1, 4)\ncolors = np.array([(1,0,0)]*len(dfd['osc']))\ncolors[dfd['osc'] >= 0] = (0,0,1)\nplt.bar(dfd.index,dfd['osc'],color = colors)\nplt.show()\n","repo_name":"rlfx/RL_try","sub_path":"state_generate.py","file_name":"state_generate.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33290376418","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n\n# IMPORTS -------------------------------------------------------------------+\n# +--- Scraping imports -----------------------------------------------------+\nimport requests\nimport mimetypes\n\n\n# +--- Os imports -----------------------------------------------------------+\nimport os\n\n\n# CLASS ---------------------------------------------------------------------+\nclass Downloader:\n \"\"\" An image downloader.\n Download binary data from an given url and write it in a file with given\n name at a given directory path.\n Instantiating an object from this class will automatically create a file\n without needing another method call.\n Even if designed to download image, it could be used to download any\n binary file from an url. \"\"\"\n\n def __init__(self, url: str, image_name: str, directory_path: str):\n \"\"\" Downloader class constructor. It will automatically call private\n method _write. \"\"\"\n response = requests.get(url)\n extension = mimetypes.guess_extension(response.headers[\"content-type\"])\n if os.path.exists(directory_path):\n self.path = os.path.join(directory_path, image_name + extension)\n else:\n raise FileExistsError\n if response.ok:\n self.content = response.content\n self._write()\n else:\n print(f\"Couldn't download image at URL : {url}\")\n\n def _write(self):\n \"\"\" Write the downloaded binary data in a file. \"\"\"\n with open(self.path, \"wb\") as file:\n file.write(self.content)\n","repo_name":"YaShuHee/openclassrooms_project_2","sub_path":"src/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41483363082","text":"#\n#\n# =================================================================\n# =================================================================\n\nfrom paxes_cinder.k2aclient import _\nfrom paxes_cinder.k2aclient.v1.k2uom import k2attr as uom_k2attr\nfrom paxes_cinder.k2aclient.v1.k2uom import typeset as uom_typeset\nfrom paxes_cinder.k2aclient.v1.k2web import k2attr as web_k2attr\nfrom paxes_cinder.k2aclient.v1.k2web import typeset as web_typeset\nfrom paxes_k2.k2operator import K2Element, web_ns, uom_ns\n\nimport logging\nimport types\n\n_logger = logging.getLogger(__name__)\n\nK2_SCHEMA_VERSION = \"V1_0\"\n\n\ndef _enum(**enums):\n return type('Enum', (), enums)\n\n# Status of test\nMode = _enum(UPDATE=\"UPDATE\",\n CREATE=\"CREATE\")\n\n\ndef _attr_skip(mode, obj, attrus, av, ptype, cat1, cat2, rtype, xag):\n if mode is Mode.UPDATE:\n if av is None:\n return True\n# if cat1 == \"ro\":\n# return True\n# if not attrus in obj.modified_attrs:\n# return True\n return False\n elif mode is Mode.CREATE:\n if not attrus in obj.modified_attrs:\n return True\n return False\n else:\n msg = (\"Illegal mode: >%s<\")\n raise ValueError(msg % mode)\n\n\ndef _process_node(ns, typeset, k2attr, mode,\n attr, obj, ptype, cat1, cat2, rtype, xag):\n\n # IOAdapterChoiceCollection.Type\",\n if ptype.endswith(\"ChoiceCollection.Type\"):\n# print \"ChoiceCollection.Type: >%s<\" % (obj.__class__.__name__,)\n cname = obj.__class__.__name__[:-10]\n iptype = cname + \".Type\"\n\n (x, clazzattrs) = k2attr[obj.__class__.__name__]\n assert len(clazzattrs) == 1\n clazzattr = clazzattrs[0] # Only one \"choice\" !\n (iattrus, iattr, x, icat1, icat2, irtype, ixag) = clazzattr\n\n iav = getattr(obj, \"_pattr_%s\" % iattrus)\n # No skip?\n assert isinstance(iav, types.ListType)\n k2nes = []\n for li in iav:\n k2nes.append(_process_node(ns, typeset, k2attr, mode,\n iattr, li, iptype, icat1,\n icat2, irtype, ixag))\n # children\n k2e = K2Element(attr,\n ns=ns,\n attrib={'schemaVersion': K2_SCHEMA_VERSION},\n children=k2nes)\n\n if obj.group is not None:\n# print \"AAAA: attr: >%s<, group: >%s<\" % (attr, obj.group)\n k2e._element.set(\"group\", obj.group)\n\n return k2e\n\n elif ptype.endswith(\"Choice.Type\"):\n iptype = obj.__class__.__name__ + \".Type\"\n iattr = obj.__class__.__name__\n k2nes = [_process_node(ns, typeset, k2attr, mode,\n iattr, obj, iptype, cat1, cat2, rtype, xag)]\n k2e = K2Element(attr, ns=ns, children=k2nes)\n\n if obj.group is not None:\n# print \"BBBB: attr: >%s<, group: >%s<\" % (attr, obj.group)\n k2e._element.set(\"group\", obj.group)\n\n return k2e\n\n elif attr == \"link\" or ptype.startswith(\"link rel=\"):\n return K2Element(attr,\n ns=ns,\n attrib={'href': str(obj),\n 'rel': 'related'})\n\n # not a link and not in typeset, so set the string content\n elif not type(obj) in typeset:\n return K2Element(attr, ns=ns, text=str(obj))\n\n # traverse inheritance, top down\n it = obj.__class__.__name__\n itypes = []\n while True:\n itypes.append(it)\n (parenttype, x) = k2attr[it]\n it = parenttype\n if it is None:\n break\n itypes.reverse()\n\n # accumulate hierarchy of attributes\n k2nes = []\n for it in itypes:\n (x, clazzattrs) = k2attr[it]\n for clazzattr in clazzattrs:\n (iattrus, iattr, iptype, icat1, icat2, irtype, ixag) = clazzattr\n iav = getattr(obj, \"_pattr_%s\" % iattrus)\n if _attr_skip(mode, obj, iattrus, iav,\n iptype, icat1, icat2, irtype, ixag):\n continue\n\n if isinstance(iav, types.ListType):\n for li in iav:\n k2nes.append(_process_node(ns, typeset, k2attr, mode,\n iattr, li, iptype, icat1,\n icat2, irtype, ixag))\n else:\n k2nes.append(_process_node(ns, typeset, k2attr, mode, iattr,\n iav, iptype, icat1, icat2,\n irtype, ixag))\n\n if ptype.endswith(\"Choice.Type\") or ptype.endswith(\"Links.Type\"):\n objk2ne = K2Element(attr,\n ns=ns,\n children=k2nes)\n else:\n objk2ne = K2Element(attr,\n ns=ns,\n attrib={'schemaVersion': K2_SCHEMA_VERSION},\n children=k2nes)\n\n if obj.group is not None:\n# print \"CCCC: attr: >%s<, group: >%s<\" % (attr, obj.group)\n objk2ne._element.set(\"group\", obj.group)\n\n return objk2ne\n\n\ndef process_root(service, mode, obj):\n \"\"\"Create k2node elements from object model instance\"\"\"\n if service == \"web\":\n ns = web_ns\n typeset = web_typeset\n k2attr = web_k2attr\n elif service == \"uom\":\n ns = uom_ns\n typeset = uom_typeset\n k2attr = uom_k2attr\n else:\n msg = _(\"k2aclient: during process_root, unrecognized service: >%s<\")\n raise ValueError(msg % service)\n\n k2ne = _process_node(ns, typeset, k2attr, mode,\n obj.__class__.__name__, obj,\n \"root\", \"co\", \"r\", \"R\", \"\")\n return k2ne\n","repo_name":"windskyer/k_cinder","sub_path":"paxes_cinder/k2aclient/v1/v1k2creater.py","file_name":"v1k2creater.py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33438716396","text":"#importing the pygame\nimport pygame,random\n#pygame initialization\npygame.init()\n\nwidth = 1000\nheight = 500\n\n\nfrogImage = pygame.image.load(\"frog.png\")\nfrogWidth = frogImage.get_width()\nfrogHeight = frogImage.get_height()\n\nsound = pygame.mixer.Sound(\"point.wav\")\n\ngameBoard = pygame.display.set_mode((width,height))\n\n#R(REd)G(Green)B(blue) FORMAT -> 0-255\nred = 255,0,0\nblue = 0,0,255\ngreen = 0,255,0\nblack = 0,0,0\nwhite = 255,255,255\ncolor = 150,50,150\ngameBg = pygame.image.load(\"StartBackground.png\")\ngameBg = pygame.transform.scale(gameBg,(width,height))\ndef homeScreen():\n msg = \"PRESS 'SPACE' to Start the Game\"\n font = pygame.font.SysFont(None,60)\n #text = font.render(TEXT,ANTILIASING Property,COLOR)\n text = font.render(msg,True,red)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n mainGame()\n\n gameBoard.blit(gameBg,(0,0))\n gameBoard.blit(text,(50,450))\n pygame.display.flip()\n \ndef score(counter):\n font = pygame.font.SysFont(None,30)\n text = font.render(f\"Score : {counter}\",True,red)\n gameBoard.blit(text,(800,20))\n \ndef snake(snakeList,colorList,w,h):\n for i in range(len(snakeList)):\n pygame.draw.rect(gameBoard,colorList[i],[snakeList[i][0],snakeList[i][1],w,h])\ndef mainGame():\n x=0\n y=0\n w=40\n h=40\n movex = 0\n movey = 0\n counter=0\n snakeList=[]\n colorList=[]\n snakeLength = 1\n\n frogX = random.randint(0,width-frogWidth)\n frogY = random.randint(0,height-frogHeight)\n \n\n while True:\n \n for event in pygame.event.get():\n #print(event)\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n movex=5\n movey=0\n elif event.key == pygame.K_LEFT:\n movex=-5\n movey=0\n elif event.key == pygame.K_UP:\n movex=0\n movey=-5\n elif event.key == pygame.K_DOWN:\n movex=0\n movey=5\n\n gameBoard.fill(white)\n gameBoard.blit(frogImage,(frogX,frogY))\n myRect = pygame.draw.rect(gameBoard,red,(x,y,w,h))\n frogRect = pygame.Rect(frogX,frogY,frogWidth,frogHeight)\n x+=movex\n y+=movey\n\n\n\n head =[]\n head.append(x)\n head.append(y)\n\n snakeList.append(head)\n\n color = random.randint(0,255),random.randint(0,255),random.randint(0,255)\n colorList.append(color)\n if len(snakeList)>snakeLength:\n del snakeList[0]\n del colorList[0]\n\n snake(snakeList,colorList,w,h)\n \n score(counter)\n\n if frogRect.colliderect(myRect):\n frogX = random.randint(0,width-frogWidth)\n frogY = random.randint(0,height-frogHeight)\n sound.play()\n snakeLength+=5\n counter+=1\n \n for each in snakeList[:-1]:\n if each == snakeList[-1]:\n print(\"Game Over\")\n\n if x>width-w:\n movex=-5\n elif x <0:\n movex=5\n\n if y>height-h:\n movey=-5\n elif y<0:\n movey=5\n \n \n\n pygame.display.flip() #update\n\n\nhomeScreen()\n","repo_name":"Sahil4UI/PythonJuly2020Reg2","sub_path":"Game DEv/SNAKE GAME 2PM/SNAKE GAME v1.py","file_name":"SNAKE GAME v1.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23009438512","text":"\"\"\"\n \n\"\"\"\nfrom typing import List, Optional, Tuple\nimport logging\n\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom fastapi.logger import logger as fastapi_logger\nfrom sqlalchemy.orm import Session\n\nimport movieapi.crud as crud\nimport movieapi.models as models\nimport movieapi.schemas as schemas\nfrom .database import SessionLocal, engine\n\n# generate auo ddl in mode update\nmodels.Base.metadata.create_all(bind=engine)\n\n\napp = FastAPI()\n\nlogger = logging.getLogger(\"uvicorn\")\nfastapi_logger.handlers = logger.handlers\nfastapi_logger.setLevel(logger.level)\nlogger.error(\"API Started\")\n\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post(\"/api/movies/\", response_model=schemas.Movie)\ndef create_movie(movie: schemas.MovieCreate, db: Session = Depends(get_db)):\n return crud.create_movie(db=db, movie=movie)\n\n\n@app.get(\"/api/movies/\", response_model=List[schemas.Movie])\ndef read_movies(skip: Optional[int] = 0, limit: Optional[int] = 100, db: Session = Depends(get_db)):\n # read movies from database\n movies = crud.get_movies(db, skip=skip, limit=limit)\n # return them as json\n return movies\n\n@app.get(\"/api/movies/byId/{movie_id}\", response_model=schemas.Movie)\ndef read_movie(movie_id: int, db: Session = Depends(get_db)):\n db_movie = crud.get_movie(db, movie_id=movie_id)\n if db_movie is None:\n raise HTTPException(status_code=404, detail=\"Movie to read not found\")\n return db_movie\n\n@app.get(\"/api/movies/byTitle\", response_model=List[schemas.Movie])\ndef read_movies_by_title(t: str, db: Session = Depends(get_db)):\n return crud.get_movies_by_title_part(db=db, title=t)","repo_name":"matthcol/moviefastapi","sub_path":"movieapi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23588856441","text":"in_file = 'C-small-1-attempt1.in'\r\nout_file = 'C-small-1.out'\r\ninp = open(in_file, 'r')\r\nout = open(out_file, 'w')\r\n\r\nt = int(inp.readline())\r\nfor case in range(1, t+1):\r\n n, k = list(map(int, inp.readline().split()))\r\n u = float(inp.readline())\r\n p = list(map(float, inp.readline().split()))\r\n p.sort()\r\n q = 1\r\n\r\n avg = (sum(p) + u)/n\r\n\r\n cont = True\r\n while cont:\r\n for i in range(len(p)):\r\n\r\n cont = False\r\n if p[i] > avg + 0.00000001:\r\n for j in range(i, len(p)):\r\n q *= p[j]\r\n p = p[:i]\r\n cont = True\r\n avg = (sum(p) + u)/len(p)\r\n break\r\n\r\n q *= avg**len(p)\r\n\r\n out.write('Case #{}: {}\\n'.format(case, q))\r\n\r\ninp.close()\r\nout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_211/86.py","file_name":"86.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31302676168","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom functional import seq\nfrom matplotlib import colors\nfrom matplotlib import patches\n\nfrom model import Frame, SiameseDB\nfrom .kalman import Sort, associate_detections_to_trackers\n\n\nclass KalmanTracking:\n\n def __init__(self):\n self.mot_tracker = Sort() # create instance of the SORT tracker\n self.max_id = 0\n self.reid_dict = {}\n\n def __call__(self, frame: Frame, siamese: SiameseDB, debug=False, plot_number=False):\n viridis = colors.ListedColormap(np.random.rand(256, 3))\n self.new_color = viridis(np.linspace(0, 1, 256))\n\n detections = seq(frame.detections).map(lambda d: d.to_sort_format()).to_list()\n detections = np.array(detections)\n trackers = self.mot_tracker.update(detections)\n matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(detections, trackers)\n\n for match in matched:\n det_id = int(trackers[match[1], 4])\n if det_id > self.max_id and siamese is not None:\n new_id = siamese.query(frame.image, frame.detections[match[0]])\n if new_id != -1:\n self.reid_dict[det_id] = new_id\n self.max_id = det_id\n if det_id in self.reid_dict:\n det_id = self.reid_dict[det_id]\n frame.detections[match[0]].id = det_id\n\n for unmatched in unmatched_dets:\n if siamese is not None:\n new_id = siamese.query(frame.image, frame.detections[unmatched])\n if new_id != -1:\n frame.detections[unmatched].id = new_id\n\n if debug:\n self.plot_tracking_color(frame, plot_number)\n\n def plot_tracking_color(self, frame: Frame, plot_number):\n plt.imshow(cv2.cvtColor(frame.image, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n\n for det in frame.detections:\n if det.id != -1:\n rect = patches.Rectangle((det.top_left[0], det.top_left[1]), det.width, det.height,\n linewidth=2, edgecolor=self.new_color[det.id, :], facecolor='none')\n plt.gca().add_patch(rect)\n if plot_number:\n plt.text(det.top_left[0] - 0, det.top_left[1] - 50, s='{}'.format(det.id),\n color='white', verticalalignment='top',\n bbox={'color': 'blue', 'pad': 0})\n plt.gca().add_patch(rect)\n plt.imshow(cv2.cvtColor(frame.image, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n plt.show()\n plt.close()\n\n @staticmethod\n def plot_tracking(frame: Frame):\n plt.imshow(cv2.cvtColor(frame.image, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n for det in frame.detections:\n rect = patches.Rectangle((det.top_left[0], det.top_left[1]), det.width, det.height,\n linewidth=1, edgecolor='blue', facecolor='none')\n plt.gca().add_patch(rect)\n\n plt.text(det.top_left[0] - 0, det.top_left[1] - 50, s='{}'.format(det.id),\n color='white', verticalalignment='top',\n bbox={'color': 'blue', 'pad': 0})\n plt.gca().add_patch(rect)\n plt.imshow(cv2.cvtColor(frame.image, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n plt.show()\n plt.close()\n","repo_name":"mcv-m6-video/mcv-m6-2019-team5","sub_path":"src/tracking/kalman_tracking.py","file_name":"kalman_tracking.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"4110795027","text":"#!/usr/bin/env python\nfrom requests import get\nimport sys\nimport re\nimport tqdm\nfrom bs4 import BeautifulSoup\nfrom mutagen.easyid3 import EasyID3\n\nBASE_URL = 'https://muzofond.fm/'\n\n\ndef download_file(url: str, filename: str):\n r = get(url, allow_redirects=True)\n with open(filename, 'wb') as f:\n f.write(r.content)\n return filename\n\n\ndef item_to_data_to_download(item: BeautifulSoup):\n url = item.select_one('li.play').attrs['data-url']\n autor_name = item.select_one('.desc h3 .artist').get_text().strip()\n track_name = item.select_one('span.track').get_text().strip()\n return {\n 'url': url,\n 'artist': autor_name,\n 'title': track_name\n }\n\n\ndef get_urls(url: str) -> list:\n resp = get(url)\n text = resp.text\n soup = BeautifulSoup(text, 'html.parser')\n data = [item_to_data_to_download(d)\n for d in soup.select('.mainSongs li.item')]\n return data\n\n\ndef set_attrs_to_file(filename: str, attrs: dict):\n f = EasyID3(filename)\n for attr, val in attrs.items():\n if attr != 'url':\n f[attr] = val\n f.save()\n\n\ndef start_download(url: str):\n urls = get_urls(url)\n for i, data in tqdm.tqdm(enumerate(urls)):\n _url = data['url']\n filename = f'track_{i}.mp3'\n download_file(_url, filename)\n set_attrs_to_file(filename, data)\n\n\ndef check_url(arg: str):\n return arg.startswith(BASE_URL)\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) == 2 and check_url(args[1]):\n start_download(args[1])\n","repo_name":"pyaji/music_resources","sub_path":"muzofond_fm.py","file_name":"muzofond_fm.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16517508189","text":"import torch.nn as nn\n\n\nclass WGAN_D(nn.Module):\n def __init__(self):\n super(WGAN_D, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=3, stride=1, padding=0)\n self.norm1 = nn.InstanceNorm2d(256, affine=True)\n self.relu1 = nn.LeakyReLU(0.2, inplace=True)\n\n self.conv2 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=2, padding=1)\n self.norm2 = nn.InstanceNorm2d(512, affine=True)\n self.relu2 = nn.LeakyReLU(0.2, inplace=True)\n\n self.conv3 = nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=4, stride=2, padding=1)\n self.norm3 = nn.InstanceNorm2d(1024, affine=True)\n self.relu3 = nn.LeakyReLU(0.2, inplace=True)\n\n self.conv4 = nn.Conv2d(in_channels=1024, out_channels=1, kernel_size=4, stride=1, padding=0)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.norm1(x)\n x = self.relu1(x)\n\n x = self.conv2(x)\n x = self.norm2(x)\n x = self.relu2(x)\n\n x = self.conv3(x)\n x = self.norm3(x)\n x = self.relu3(x)\n\n x = self.conv4(x)\n # x size (1, 1, 3, 3)\n\n return x","repo_name":"Zzznorlax/WGAN-gp-MNIST","sub_path":"WGAN_GP_D.py","file_name":"WGAN_GP_D.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981763407","text":"# Given an array of non-negative integers, you are initially positioned at the first index of the array.\n#\n# Each element in the array represents your maximum jump length at that position.\n#\n# Determine if you are able to reach the last index.\n#\n# Example 1:\n#\n# Input: [2,3,1,1,4]\n# Output: true\n# Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.\n# Example 2:\n#\n# Input: [3,2,1,0,4]\n# Output: false\n# Explanation: You will always arrive at index 3 no matter what. Its maximum\n# jump length is 0, which makes it impossible to reach the last index.\n\nclass Solution(object):\n def canJump(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n if not nums or len(nums) == 0:\n return False\n # dp\n # dp = [0] * len(nums)\n # dp[0] = nums[0]\n # for i in range(1, len(nums)):\n # dp[i] = max(nums[i - 1], dp[i - 1]) - 1\n # if dp[i] < 0:\n # return False\n # return True\n\n # greedy\n reach = 0\n for idx, num in enumerate(nums):\n if idx > reach:\n return False\n if reach >= len(nums) - 1:\n return True\n reach = max(reach, idx + num)\n return False\n","repo_name":"yshshadow/Leetcode","sub_path":"51-100/55.py","file_name":"55.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16209234058","text":"import random\nfrom Genetic.Polygon import Polygon\n\n\ndef Point_in_polygon(point,\n polygon): # Checks if the point is inside a polygon, returns 1 if is inside or 0 if is outside\n count = 0\n vertexes = polygon.points\n for i in range(len(vertexes) - 1):\n if ((vertexes[i].y <= point.y) and (vertexes[i + 1].y > point.y)) or (\n (vertexes[i].y > point.y) and (vertexes[i + 1].y <= point.y)):\n vt = (point.y - vertexes[i].y) / (vertexes[i + 1].y - vertexes[i].y)\n if point.x < vertexes[i].x + vt * (vertexes[i + 1].x - vertexes[i].x):\n count += 1\n return count % 2\n\n\ndef crossover(bin1, bin2): # Crossover between bits (the color)\n div = random.randint(0, 15)\n num1 = bin1[:div]\n num2 = bin2[div:]\n new_num = num1 + num2\n return new_num\n\n\n\n\n\nclass Individual:\n def __init__(self, grid):\n self.grid = grid\n self.distribution = grid.get_map() # distribution of colors\n self.sample = grid.sample # Sample of points to be tested\n self.polygons = []\n self.pop_size = 0\n self.pop_max = 20\n self.finished = False # Boolean to check if the algorithm has finished\n for number in range(6): # Creates the generation 0\n self.genetic_distribution()\n self.pop_size += 1\n for polygon in self.polygons: # Sets the fitness for the gen 0\n self.fitness(polygon)\n\n def genetic_distribution(\n self): # Uses the distribution of colors in the grid to set the genetic distribution of the gen 0\n adn = random.randint(0, 65535)\n polygon = Polygon(bin(adn))\n self.classify(adn, polygon)\n self.generate_vertexes(polygon)\n self.polygons.append(polygon)\n\n def generate_vertexes(self, polygon): # Generates the vertexes of a polygon within the grid\n coordinates = self.grid.coordinates\n for i in range(3):\n x = random.randint(coordinates[0], coordinates[2])\n y = random.randint(coordinates[1], coordinates[3])\n polygon.add_point(x, y)\n\n def classify(self, adn, polygon): # Classifies the color of a polygon usign the genetic distribution method\n accumulator = 0\n for color, number in self.distribution.items():\n percentage = number / self.grid.total\n if adn < (65535 * percentage) + accumulator:\n polygon.color = color\n break\n else:\n accumulator += 65536 * percentage\n polygon.color = color\n\n def mutation(self, polygon): # Mutation\n binary = polygon.adn\n prob = random.randint(0, 1000)\n if prob >= 10:\n gen_pos = random.randint(2, len(binary) - 1)\n gen = binary[gen_pos]\n if gen == '1':\n binary = binary[:gen_pos - 1] + '0' + binary[gen_pos:]\n else:\n binary = binary[:gen_pos - 1] + '1' + binary[gen_pos:]\n polygon.adn = binary\n self.classify(int(polygon.adn, 2), polygon)\n\n def get_color_polygons(self,\n color_target): # Gets the the percentage of polygons of a certain color in the population\n count = 0\n for polygon in self.polygons:\n if polygon.color == color_target:\n count += 1\n return count / len(self.polygons)\n\n def test_points_on_polygon(self, polygon): # test all the pixels of the same color of the polygon to see if it's inside them\n counter = 0\n pixels = self.sample[polygon.color]\n for pixel in pixels:\n counter += Point_in_polygon(pixel, polygon)\n return counter\n\n def selection(self): # Selection of the genetic algorithm\n mating_pool = []\n self.polygons.sort(key=lambda x: x.fitness_score, reverse=True)\n start = int(round(len(self.polygons) * 0.70))\n for number in range(start):\n mating_pool.append(self.polygons[number])\n del self.polygons[start:len(self.polygons)]\n self.finished = self.check_if_finished()\n if self.finished:\n return\n if self.pop_size < self.pop_max:\n self.pop_size += 2\n for i in range(self.pop_size - len(self.polygons)):\n r = random.randint(0, len(mating_pool) - 1)\n father = mating_pool[r]\n r = random.randint(0, len(mating_pool) - 1)\n mother = mating_pool[r]\n baby = Polygon(crossover(father.adn, mother.adn))\n self.classify(int(baby.adn, 2), baby)\n self.generate_vertexes(baby)\n self.fitness(baby)\n self.polygons.append(baby)\n for polygon in self.polygons:\n self.mutation(polygon)\n\n def check_if_finished(self): # Checks if the goal has been meet to stop the algorithm\n for polygon in self.polygons:\n if polygon.fitness_score < 1:\n return False\n return True\n\n def fitness(self, polygon): # Calculate the fitness of the polygon and assigns it to it\n inside = self.test_points_on_polygon(polygon)\n color_percentage = self.get_color_polygons(polygon.color)\n score = abs((1 * inside) - (color_percentage / self.distribution[polygon.color]))\n polygon.fitness_score = score\n","repo_name":"Dchengg/Caso8-PolyImages","sub_path":"Genetic/Individual.py","file_name":"Individual.py","file_ext":"py","file_size_in_byte":5325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9939452000","text":"import string\nconversion = dict(zip(string.ascii_lowercase + string.ascii_uppercase, range(1, 53)))\nlines = [x.strip(\"\\n\") for x in open(\"input.txt\")]\nresult = []\nfor line in lines:\n half_len = len(line)//2\n half1, half2 = set(line[:half_len]), set(line[half_len:])\n both = list(half1.intersection(half2))[0]\n result.append(conversion[both])\n\nprint(\"Part 1: \", sum(result))\n","repo_name":"brisutom/Aoc2022","sub_path":"03/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4169393993","text":"#!/usr/bin/env python\n\nimport h5py\nimport numpy\nimport argparse\nimport cPickle\n\nfrom fuel.datasets.hdf5 import H5PYDataset\n\ndef pack(f, name, dataset_pathes):\n datasets = [cPickle.load(open(path)) for path in dataset_pathes]\n data = sum(datasets, [])\n dtype = h5py.special_dtype(vlen=numpy.dtype('int32'))\n table = f.create_dataset(name, (len(data),), dtype=dtype)\n for i, example in enumerate(data):\n table[i] = example\n return numpy.array([len(d) for d in datasets])\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Pack data to HDF5\")\n parser.add_argument('-s', dest='sources', nargs='*', help=\"Source datasets\")\n parser.add_argument('-t', dest='targets', nargs='*', help=\"Target datasets\")\n parser.add_argument('-n', dest='names', nargs='*', help=\"Dataset names\")\n parser.add_argument('-i', dest='add_ids',\n action='store_true', default=False,\n help=\"Add integer IDs\")\n parser.add_argument('dest', help=\"Destination\")\n args = parser.parse_args()\n\n assert len(args.sources) == len(args.targets)\n assert len(args.sources) == len(args.names)\n with h5py.File(args.dest, mode='w') as f:\n lengths = pack(f, \"sources\", args.sources)\n assert numpy.all(lengths == pack(f, \"targets\", args.targets))\n\n offsets = [0] + list(lengths.cumsum())\n total_len = offsets[-1]\n if args.add_ids:\n id_table = f.create_dataset('ids',\n data=numpy.arange(total_len,\n dtype='int32'))\n\n split_dict = {\n args.names[i]:\n {'sources': (offsets[i], offsets[i + 1]),\n 'targets': (offsets[i], offsets[i + 1]),\n 'ids': (offsets[i], offsets[i + 1])}\n for i in range(len(args.names))}\n else:\n split_dict = {\n args.names[i]:\n {'sources': (offsets[i], offsets[i + 1]),\n 'targets': (offsets[i], offsets[i + 1])}\n for i in range(len(args.names))}\n\n f.attrs['split'] = H5PYDataset.create_split_array(split_dict)\n","repo_name":"rizar/actor-critic-public","sub_path":"bin/pack_to_hdf5.py","file_name":"pack_to_hdf5.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"61"} +{"seq_id":"74822613633","text":"import argparse\nimport train\nimport test\n\ndef parse_input():\n parser = argparse.ArgumentParser(description='Inverses Pendel mit QLearning')\n parser.add_argument('-t,','--training',action='store_true',help='Switches to training mode. Default=false')\n\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n number_of_buckets = (5, 1, 6, 3)\n args = parse_input()\n if args.training is True:\n print('training started')\n train.start_train()\n else:\n print('test started')\n test.start_test()","repo_name":"danikhani/cartpole_rl","sub_path":"Q-learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4159995193","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport torch.utils.model_zoo as model_zoo\nimport torchvision.models as models\nfrom tqdm import tqdm\n\nclass CNN_model(nn.Module):\n def __init__(self, embedding_feature = 1000, pre_trained = True, model_use = 'resnet'):\n super(CNN_model, self).__init__()\n self.model = models.resnet50(pretrained = pre_trained)\n self.model_use = model_use\n if (model_use == 'inception'):\n self.model = models.inception_v3(pretrained = pre_trained)\n if (model_use == 'densenet'):\n self.model = models.densenet161(pretrained = pre_trained)\n if model_use != 'densenet':\n fc_features = self.model.fc.in_features\n else:\n fc_features = self.model.classifier.in_features\n \n self.feature_number = embedding_feature\n # https://blog.csdn.net/whut_ldz/article/details/78845947 for how to customize the CNN model\n if (embedding_feature != 1000):\n if model_use != 'densenet':\n self.model.fc = nn.Linear(fc_features, embedding_feature)\n else:\n self.model.classifier = nn.Linear(fc_features, embedding_feature)\n \n def forward(self, x):\n x = self.model(x)\n if isinstance(x,tuple):\n x = x[0]\n x = x.view(-1, self.feature_number)\n return x\n\n\n","repo_name":"MingyuZha/CS598-DeepLearning","sub_path":"Final_Project/CNN/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"36536073793","text":"import ctypes\nimport math\n\n# lib = ctypes.CDLL(\"libSampleSorter.dylib\")\n# lib = ctypes.CDLL(\"../build/libSampleSorter.so\")\nimport os\ncwd = os.getcwd()\nos.chdir(\"A:\\\\SampleSorter\\\\build\\\\Release\")\ndll_path = \"SampleSorter.dll\"\nlib = ctypes.CDLL(dll_path)\nos.chdir(cwd)\n\nclass Sample:\n def __init__(self, fileName, userLibrary, forceReprocess):\n lib.NewAbletonSampleFile.restype = ctypes.c_void_p\n self.s = lib.NewAbletonSampleFile(str(fileName).encode('ascii'), str(userLibrary).encode('ascii'), forceReprocess)\n\n def getFileName(self):\n lib.getFileName.argtypes = [ctypes.c_void_p]\n lib.getFileName.restype = ctypes.c_char_p\n return lib.getFileName(self.s)\n\n def getAudioPath(self):\n lib.getAudioPath.argtypes = [ctypes.c_void_p]\n lib.getAudioPath.restype = ctypes.c_char_p\n audio_path = lib.getAudioPath(self.s).decode(\"utf-8\")\n audio_path = audio_path.replace(\"\\\\\", \"/\")\n return audio_path\n \n def process(self):\n lib.process.argtypes = [ctypes.c_void_p]\n lib.process.restype = ctypes.c_bool\n return lib.process(self.s)\n\n def getTuning(self):\n lib.getTuningCents.argtypes = [ctypes.c_void_p]\n lib.getTuningCents.restype = ctypes.c_long\n return lib.getTuningCents(self.s)\n\n def getFundamental(self):\n lib.getFundemental.argtypes = [ctypes.c_void_p]\n lib.getFundemental.restype = ctypes.c_short\n out = lib.getFundemental(self.s)\n return lib.getFundemental(self.s)\n\n def getOctave(self):\n # allocate enough space\n lib.getOctave.argtypes = [ctypes.c_void_p]\n lib.getOctave.restype = ctypes.POINTER(ctypes.c_double * 12)\n c_octave = lib.getOctave(self.s)\n octave = [value for value in c_octave.contents]\n\n # delete allocated space\n lib.deleteOctave.argtypes = [ctypes.c_void_p]\n lib.deleteOctave(c_octave)\n\n return octave\n\n def getTheOne(self):\n lib.getTheOneWithTuning.argtypes = [ctypes.c_void_p]\n lib.getTheOneWithTuning.restype = ctypes.c_double\n return lib.getTheOneWithTuning(self.s)\n\n def getTempo(self):\n lib.getBeatWithTuning.argtypes = [ctypes.c_void_p]\n lib.getBeatWithTuning.restype = ctypes.c_double\n return lib.getBeatWithTuning(self.s)\n\n def getChords(self):\n # get the number\n lib.getNumChords.argtypes = [ctypes.c_void_p]\n lib.getNumChords.restype = ctypes.c_size_t\n numChords = lib.getNumChords(self.s)\n\n # allocate enough space\n lib.getChords.argtypes = [ctypes.c_void_p]\n lib.getChords.restype = ctypes.POINTER(ctypes.POINTER(ctypes.c_double * 12) * numChords)\n c_chords = lib.getChords(self.s)\n chords = [[value for value in chord.contents] for chord in c_chords.contents]\n\n # delete allocated space\n lib.deleteChords.argtypes = [ctypes.c_void_p, ctypes.c_size_t]\n lib.deleteChords(c_chords, numChords)\n\n # return\n return chords\n\n def writeToFile(self):\n lib.writeToFile.argtypes = [ctypes.c_void_p]\n lib.writeToFile(self.s)\n\n def delete(self):\n lib.deleteAbletonSampleFile.argtypes = [ctypes.c_void_p]\n lib.deleteAbletonSampleFile(self.s)\n\n def writeToMIDI(self, midiFileName):\n import midi\n chords = self.getChords()\n # find the maximum amplitude\n maximumAmp = 0\n for c in range(len(chords)):\n for b in range(12):\n maximumAmp = max(maximumAmp, math.sqrt(chords[c][b]))\n\n MIDI_velocity_ratio = 127./maximumAmp\n ticks_per_quarter_note = 220\n\n # initialize MIDI\n pattern = midi.Pattern()\n track = midi.Track()\n pattern.append(track)\n\n # add Tempo\n tempo = midi.SetTempoEvent(tick = 0, bpm = self.getTempo() * 60)\n track.append(tempo)\n\n for c in range(len(chords)):\n # Add note ons\n for b in range(12):\n vel = MIDI_velocity_ratio*math.sqrt(chords[c][b])\n on = midi.NoteOnEvent(tick = 0, velocity = int(vel), pitch=midi.A_4+b)\n track.append(on)\n\n # Add note offs\n off = midi.NoteOffEvent(tick = ticks_per_quarter_note, pitch=midi.A_4)\n track.append(off)\n for b in range(1,12):\n off = midi.NoteOffEvent(tick = 0, pitch=midi.A_4+b)\n track.append(off)\n\n # Add the end of the file\n eot = midi.EndOfTrackEvent(tick=1)\n track.append(eot)\n\n # write to file\n midi.write_midifile(midiFileName, pattern)\n","repo_name":"sportdeath/SampleSorter","sub_path":"python_src/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"42420688272","text":"# coding: utf-8\nimport attr\nimport datetime\nimport pytz\nfrom flaskbb.extensions import db_onyx, db_eos, db_dragon\nfrom sqlalchemy import Column, DateTime, Integer, String, Text, Index, text\nfrom sqlalchemy.dialects.mysql import TINYINT, INTEGER, SMALLINT, VARCHAR\n\n\n@attr.s\nclass BanRecord:\n ckey = attr.ib()\n bantime = attr.ib()\n expiration_time = attr.ib()\n a_ckey = attr.ib()\n bantype = attr.ib()\n expired = attr.ib()\n role = attr.ib()\n unbanned = attr.ib()\n unbanned_ckey = attr.ib()\n unbanned_datetime = attr.ib()\n reason = attr.ib()\n desc = attr.ib(default=\"\")\n\n\nclass ErroBan:\n id = Column(Integer, primary_key=True, nullable=False)\n bantime = Column(DateTime, nullable=False)\n serverip = Column(String(32), nullable=False)\n bantype = Column(String(32), nullable=False)\n reason = Column(Text, nullable=False)\n job = Column(String(32))\n duration = Column(Integer, nullable=False)\n rounds = Column(Integer)\n expiration_time = Column(DateTime, nullable=False)\n ckey = Column(String(32), nullable=False)\n computerid = Column(String(32), nullable=False)\n ip = Column(String(32), nullable=False)\n a_ckey = Column(String(32), nullable=False)\n a_computerid = Column(String(32), nullable=False)\n a_ip = Column(String(32), nullable=False)\n who = Column(Text, nullable=False)\n adminwho = Column(Text, nullable=False)\n edits = Column(Text)\n unbanned = Column(TINYINT(1))\n unbanned_datetime = Column(DateTime)\n unbanned_reason = Column(Text)\n unbanned_ckey = Column(String(32))\n unbanned_computerid = Column(String(32))\n unbanned_ip = Column(String(32))\n server_id = Column(String(32), primary_key=True, nullable=False)\n\n def get_ban_record(self):\n return BanRecord(\n ckey=self.ckey,\n bantime=self.bantime and self.bantime.astimezone(pytz.UTC),\n expiration_time=self.expiration_time and self.expiration_time.astimezone(pytz.UTC),\n a_ckey=self.a_ckey,\n bantype=self.bantype.lower(),\n expired=(self.bantype.lower() != \"permaban\" and\n self.bantype.lower() != \"job_permaban\" and\n self.expiration_time.astimezone(pytz.UTC) < datetime.datetime.now(datetime.timezone.utc)),\n role=self.job,\n unbanned=self.unbanned,\n unbanned_ckey=self.unbanned_ckey,\n unbanned_datetime=self.unbanned_datetime and self.unbanned_datetime.astimezone(pytz.UTC),\n reason=self.reason\n )\n\n\nclass ErroBanChaotic(db_onyx.Model, ErroBan):\n __bind_key__ = 'chaotic'\n __tablename__ = 'erro_ban'\n\n\nclass ErroBanEos(db_eos.Model, ErroBan):\n __bind_key__ = 'eos'\n __tablename__ = 'erro_ban'\n\n\nclass ErroBanDragon(db_dragon.Model):\n __bind_key__ = 'dragon'\n __tablename__ = 'SS13_ban'\n __table_args__ = (\n Index('idx_ban_isbanned_details', 'ckey', 'ip', 'computerid', 'role', 'unbanned_datetime', 'expiration_time'),\n Index('idx_ban_count', 'bantime', 'a_ckey', 'applies_to_admins', 'unbanned_datetime', 'expiration_time'),\n Index('idx_ban_isbanned', 'ckey', 'role', 'unbanned_datetime', 'expiration_time')\n )\n\n id = Column(INTEGER, primary_key=True)\n bantime = Column(DateTime, nullable=False)\n server_name = Column(String(32))\n server_ip = Column(INTEGER, nullable=False)\n server_port = Column(SMALLINT, nullable=False)\n round_id = Column(INTEGER, nullable=False)\n role = Column(String(32))\n expiration_time = Column(DateTime)\n applies_to_admins = Column(TINYINT, nullable=False, server_default=text(\"'0'\"))\n reason = Column(String(2048), nullable=False)\n ckey = Column(String(32))\n ip = Column(INTEGER)\n computerid = Column(String(32))\n a_ckey = Column(String(32), nullable=False)\n a_ip = Column(INTEGER, nullable=False)\n a_computerid = Column(String(32), nullable=False)\n who = Column(String(2048), nullable=False)\n adminwho = Column(String(2048), nullable=False)\n edits = Column(Text)\n unbanned_datetime = Column(DateTime)\n unbanned_ckey = Column(String(32))\n unbanned_ip = Column(INTEGER)\n unbanned_computerid = Column(String(32))\n unbanned_round_id = Column(INTEGER)\n global_ban = Column(TINYINT, nullable=False, server_default=text(\"'1'\"))\n hidden = Column(TINYINT, nullable=False, server_default=text(\"'0'\"))\n\n def get_ban_record(self):\n bantype = \"\"\n if self.role == \"Server\":\n if self.expiration_time:\n bantype = \"tempban\"\n else:\n bantype = \"permaban\"\n else:\n if self.expiration_time:\n bantype = \"job_tempban\"\n else:\n bantype = \"job_permaban\"\n\n return BanRecord(\n ckey=self.ckey,\n bantime=self.bantime and self.bantime.astimezone(pytz.UTC),\n expiration_time=self.expiration_time and self.expiration_time.astimezone(pytz.UTC),\n a_ckey=self.a_ckey,\n bantype=bantype,\n expired=(bantype != \"permaban\" and\n bantype != \"job_permaban\" and\n self.expiration_time.astimezone(pytz.UTC) < datetime.datetime.now(datetime.timezone.utc)),\n role=self.role,\n unbanned=bool(self.unbanned_datetime),\n unbanned_ckey=self.unbanned_ckey,\n unbanned_datetime=self.unbanned_datetime and self.unbanned_datetime.astimezone(pytz.UTC),\n reason=self.reason\n )\n\n\n@attr.s\nclass ConnectionRecord:\n datetime = attr.ib()\n ckey = attr.ib()\n ip = attr.ib()\n computerid = attr.ib()\n\n\nclass Connection():\n id = Column(Integer, primary_key=True)\n datetime = Column(DateTime)\n ckey = Column(VARCHAR(50))\n ip = Column(VARCHAR(50), nullable=False)\n computerid = Column(VARCHAR(50), nullable=False)\n\n def get_record(self):\n return ConnectionRecord(\n datetime=self.datetime.astimezone(pytz.UTC),\n ckey=self.ckey,\n ip=self.ip,\n computerid=self.computerid\n )\n\n\nclass ConnectionChaotic(db_onyx.Model, Connection):\n __bind_key__ = 'chaotic'\n __tablename__ = 'connection'\n\n\nclass ConnectionEos(db_eos.Model, Connection):\n __bind_key__ = 'eos'\n __tablename__ = 'connection'\n\n\nclass ConnectionDragon(db_dragon.Model):\n __bind_key__ = 'dragon'\n __tablename__ = 'SS13_connection_log'\n\n id = Column(Integer, primary_key=True)\n datetime = Column(DateTime)\n server_name = Column(String(32))\n server_ip = Column(INTEGER, nullable=False)\n server_port = Column(SMALLINT, nullable=False)\n round_id = Column(INTEGER, nullable=False)\n ckey = Column(String(45))\n ip = Column(INTEGER, nullable=False)\n computerid = Column(String(45))\n\n def get_record(self):\n return ConnectionRecord(\n datetime=self.datetime.astimezone(pytz.UTC),\n ckey=self.ckey,\n ip=self.ip,\n computerid=self.computerid\n )\n\n@attr.s\nclass AdminRecord:\n ckey = attr.ib()\n rank = attr.ib()\n flags = attr.ib()\n\nclass ErroAdmin():\n id = Column(Integer, primary_key=True)\n ckey = Column(VARCHAR(50), nullable=False)\n rank = Column(VARCHAR(50), nullable=False)\n flags = Column(INTEGER, nullable=False)\n \n def get_record(self):\n return AdminRecord(\n ckey=self.ckey,\n rank=self.rank,\n flags=self.flags\n )\n\nclass ErroAdminChaotic(db_onyx.Model, ErroAdmin):\n __bind_key__ = 'chaotic'\n __tablename__ = 'erro_admin'\n\n\nclass ErroAdminEos(db_eos.Model, ErroAdmin):\n __bind_key__ = 'eos'\n __tablename__ = 'erro_admin'\n\ngame_models = {\n \"chaotic\":\n {\n \"ErroBan\": ErroBanChaotic,\n \"Connection\": ConnectionChaotic,\n \"ErroAdmin\": ErroAdminChaotic\n },\n \"eos\":\n {\n \"ErroBan\": ErroBanEos,\n \"Connection\": ConnectionEos,\n \"ErroAdmin\": ErroAdminEos\n },\n \"dragon\":\n {\n \"ErroBan\": ErroBanDragon,\n \"Connection\": ConnectionDragon\n }\n}\n","repo_name":"ChaoticOnyx/OnyxForum","sub_path":"modules/hub/hub/gameserver_models.py","file_name":"gameserver_models.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"41479981821","text":"from uuid import UUID\r\n\r\nfrom aplicacao.models import ModeloDocente, ModeloUnidadeSenai, ModeloTelefone\r\nfrom dominio.entidades import Docente\r\n\r\n\r\nclass ServicoConverterModeloDocente:\r\n @staticmethod\r\n def de_entidade(entidade: Docente) -> ModeloDocente:\r\n unidade_senai = ModeloUnidadeSenai.objects.get(pk=entidade.unidade_senai_id.valor)\r\n return ModeloDocente(\r\n nome=entidade.nome.valor,\r\n id=entidade.id.valor,\r\n email=entidade.email.valor,\r\n tipo_de_contratacao=entidade.tipo_de_contratacao.valor.value,\r\n unidade_senai=unidade_senai,\r\n ativo=entidade.ativo\r\n )\r\n\r\n @staticmethod\r\n def para_entidade(modelo: ModeloDocente) -> Docente:\r\n id_ = UUID(str(modelo.id))\r\n telefones = ModeloTelefone.objects.filter(docente_id=id_)\r\n return Docente.construir(\r\n nome=str(modelo.nome),\r\n id_=id_,\r\n email=modelo.email,\r\n telefones=[telefone.numero for telefone in telefones],\r\n tipo_de_contratacao=str(modelo.tipo_de_contratacao),\r\n unidade_senai_id=modelo.unidade_senai.id,\r\n ativo=bool(modelo.ativo)\r\n )\r\n","repo_name":"fr-mm/competencias-backend","sub_path":"aplicacao/servicos/servico_converter_modelo_docente.py","file_name":"servico_converter_modelo_docente.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"gl","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23451324811","text":"#!/usr/bin/python\n\n\ndef getCase(l, k):\n s=c=si=0\n for i in list(l.split(\" \")[1]):\n if (s+c) 1 -> 6) + (5 -> 9 -> 2). That is, 617 + 295. Output: 2 -> 1 -> 9. That is, 912.\n\n617 + \n295 = 912\n 2 | 1\n 1 | 1\n 9\n\n2467\n34152 = 36619\n\n 9\n 1 | 1\n 6 \n 6\n3 \n\n2467\n3815 = 6282 | 1\n \n\n\n\n987 +\n765 = 1752\n 2 | 1\n 5 | 1\n 7 | 1\n 1 \n\n7+5 = 12 -> 2 | 1\n8+6 = 14 = > 3 | 1\n7+9 = 16 -> 6 | 1\n\n\n\nFOLLOW UP\n\nSuppose the digits are stored in forward order. Repeat the above problem. \n\nEXAMPLE\n\nInput:(6 -> 1 -> 7) + (2 -> 9 -> 5). That is, 617 + 295. Output: 9 -> 1 -> 2. That is, 912.\n\"\"\"\n\nclass Node:\n def __init__(self, data=None):\n self.data = data\n self.next = None\n\nclass LinkedList:\n def __init__(self, value=None):\n self.head = Node(value)\n self.length = 1 if value is not None else 0\n \n def __repr__(self):\n p = self.head\n s = ''\n while p is not None:\n s += str(p.data) + \" -> \"\n p = p.next\n return s\n\n def add_to_tail(self, value):\n new_tail = Node(value)\n p = self.head\n while p.next is not None:\n p = p.next\n p.next = new_tail\n self.length += 1\n\n\ndef sum_list(listA, listB):\n pA = listA.head\n pB = listB.head\n new_list = LinkedList()\n carry = 0\n remainder = None\n\n while pA is not None and pA is not None:\n valA = pA.data\n valB = pB.data\n if valA < 0 or valA > 9 or valB < 0 or valB > 9:\n raise ValueError(\"Input must be a linked list of digits, i.e. positive integers less than 10\")\n _sum = valA + valB + carry\n if len(str(_sum)) == 2:\n carry = 1\n else:\n carry = 0\n new_value = int(str(_sum)[-1:])\n new_list.add_to_tail(new_value)\n pA = pA.next\n pB = pB.next\n\n if pA is not None:\n while pA is not None:\n valA = pA.data\n _sum = valA + carry\n if len(str(_sum)) == 2:\n carry = 1\n else:\n carry = 0\n new_value = int(str(_sum)[-1:])\n new_list.add_to_tail(new_value)\n pA = pA.next\n\n elif pB is not None:\n while pB is not None:\n valB = pB.data\n _sum = valB + carry\n if len(str(_sum)) == 2:\n carry = 1\n else:\n carry = 0\n new_value = int(str(_sum)[-1:])\n new_list.add_to_tail(new_value)\n pB = pB.next\n\n new_list.head = new_list.head.next\n\n return new_list\n\n\ndef create_list(nums):\n head = LinkedList(nums[0])\n for i in range(1, len(nums)):\n head.add_to_tail(nums[i])\n return head\n\ndef _print_list(list):\n s = ''\n for i in list:\n s += \" -> \"\n return s\n\nsum1 = create_list([7, 6, 4, 2])\nsum2 = create_list([2, 5, 1, 4, 3])\nprint(sum_list(sum1, sum2)) # -> 36619\n\nsum3 = create_list([3, 4, 2, 0, 0, 2, 1, 4]) \nsum4 = create_list([4, 8, 9, 6, 3, 2, 1, 0]) \nprint(sum_list(sum3, sum4)) # -> 42437227\n\nsum5 = create_list([5, 4, 2, 1, 1])\nsum6 = create_list([4, 2, 9, 9, 5])\nprint(sum_list(sum5, sum6))\n\nsum5 = create_list([1, 0, 0,])\nsum6 = create_list([0, 0, 0, 5, 2])\nprint(sum_list(sum5, sum6))\n","repo_name":"domeccleston/ctci","sub_path":"linked-lists/sum-list.py","file_name":"sum-list.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44293011662","text":"import requests\nimport pandas as pd\n\nfrom pandas_profiling import ProfileReport\n\nfor day in range(1, 32):\n if day<10:\n day_string = '0'+str(day)\n else:\n day_string = str(day)\n\n print('day',day_string)\n result_json = requests.get('https://api.tvmaze.com/schedule/web?date=2020-12-'+day_string).json()\n df = pd.json_normalize(result_json)\n profile = ProfileReport(df, title=\"Pandas Profiling Report\")\n profile.to_file(\"../profiling/report_december_\"+day_string+\".html\")\n","repo_name":"tatianarbelaez/challenge-bank","sub_path":"src/create_profiling.py","file_name":"create_profiling.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21277463061","text":"import tensorflow as tf\nimport tensorflowvisu\nimport math\nimport mnistdata\nprint(\"Tensorflow version \" + tf.__version__)\n\n# Download images and labels into mnist.test (10K images+labels) and mnist.train (60K images+labels)\nmnist = mnistdata.read_data_sets(\"data\", one_hot=True, reshape=False)\n\n# neural network structure for this sample:\n#\n# · · · · · · · · · · (input data, 1-deep) X [batch, 28, 28, 1]\n# @ @ @ @ @ @ @ @ @ @ -- conv. layer +BN 6x6x1=>24 stride 1 W1 [5, 5, 1, 24] B1 [24]\n# ∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶ Y1 [batch, 28, 28, 6]\n# @ @ @ @ @ @ @ @ -- conv. layer +BN 5x5x6=>48 stride 2 W2 [5, 5, 6, 48] B2 [48]\n# ∶∶∶∶∶∶∶∶∶∶∶∶∶∶∶ Y2 [batch, 14, 14, 12]\n# @ @ @ @ @ @ -- conv. layer +BN 4x4x12=>64 stride 2 W3 [4, 4, 12, 64] B3 [64]\n# ∶∶∶∶∶∶∶∶∶∶∶ Y3 [batch, 7, 7, 24] => reshaped to YY [batch, 7*7*24]\n# \\x/x\\x\\x/ ✞ -- fully connected layer (relu+dropout+BN) W4 [7*7*24, 200] B4 [200]\n# · · · · Y4 [batch, 200]\n# \\x/x\\x/ -- fully connected layer (softmax) W5 [200, 10] B5 [10]\n# · · · Y [batch, 10]\n\n# input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch\nX = tf.placeholder(tf.float32, [None, 28, 28, 1])\n# correct answers will go here\nY_ = tf.placeholder(tf.float32, [None, 10])\n# test flag for batch norm\ntst = tf.placeholder(tf.bool)\niter = tf.placeholder(tf.int32)\n# dropout probability\npkeep = tf.placeholder(tf.float32)\npkeep_conv = tf.placeholder(tf.float32)\n\ndef batchnorm(Ylogits, is_test, iteration, offset, convolutional=False):\n exp_moving_avg = tf.train.ExponentialMovingAverage(0.999, iteration) # adding the iteration prevents from averaging across non-existing iterations\n bnepsilon = 1e-5\n if convolutional:\n mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])\n else:\n mean, variance = tf.nn.moments(Ylogits, [0])\n update_moving_averages = exp_moving_avg.apply([mean, variance])\n m = tf.cond(is_test, lambda: exp_moving_avg.average(mean), lambda: mean)\n v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda: variance)\n Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)\n return Ybn, update_moving_averages\n\ndef no_batchnorm(Ylogits, is_test, iteration, offset, convolutional=False):\n return Ylogits, tf.no_op()\n\ndef compatible_convolutional_noise_shape(Y):\n noiseshape = tf.shape(Y)\n noiseshape = noiseshape * tf.constant([1,0,0,1]) + tf.constant([0,1,1,0])\n return noiseshape\n\n# three convolutional layers with their channel counts, and a\n# fully connected layer (tha last layer has 10 softmax neurons)\nK = 24 # first convolutional layer output depth\nL = 48 # second convolutional layer output depth\nM = 64 # third convolutional layer\nN = 200 # fully connected layer\n\nW1 = tf.Variable(tf.truncated_normal([6, 6, 1, K], stddev=0.1)) # 6x6 patch, 1 input channel, K output channels\nB1 = tf.Variable(tf.constant(0.1, tf.float32, [K]))\nW2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))\nB2 = tf.Variable(tf.constant(0.1, tf.float32, [L]))\nW3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))\nB3 = tf.Variable(tf.constant(0.1, tf.float32, [M]))\n\nW4 = tf.Variable(tf.truncated_normal([7 * 7 * M, N], stddev=0.1))\nB4 = tf.Variable(tf.constant(0.1, tf.float32, [N]))\nW5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))\nB5 = tf.Variable(tf.constant(0.1, tf.float32, [10]))\n\n# The model\n# batch norm scaling is not useful with relus\n# batch norm offsets are used instead of biases\nstride = 1 # output is 28x28\nY1l = tf.nn.conv2d(X, W1, strides=[1, stride, stride, 1], padding='SAME')\nY1bn, update_ema1 = batchnorm(Y1l, tst, iter, B1, convolutional=True)\nY1r = tf.nn.relu(Y1bn)\nY1 = tf.nn.dropout(Y1r, pkeep_conv, compatible_convolutional_noise_shape(Y1r))\nstride = 2 # output is 14x14\nY2l = tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME')\nY2bn, update_ema2 = batchnorm(Y2l, tst, iter, B2, convolutional=True)\nY2r = tf.nn.relu(Y2bn)\nY2 = tf.nn.dropout(Y2r, pkeep_conv, compatible_convolutional_noise_shape(Y2r))\nstride = 2 # output is 7x7\nY3l = tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME')\nY3bn, update_ema3 = batchnorm(Y3l, tst, iter, B3, convolutional=True)\nY3r = tf.nn.relu(Y3bn)\nY3 = tf.nn.dropout(Y3r, pkeep_conv, compatible_convolutional_noise_shape(Y3r))\n\n# reshape the output from the third convolution for the fully connected layer\nYY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])\n\nY4l = tf.matmul(YY, W4)\nY4bn, update_ema4 = batchnorm(Y4l, tst, iter, B4)\nY4r = tf.nn.relu(Y4bn)\nY4 = tf.nn.dropout(Y4r, pkeep)\nYlogits = tf.matmul(Y4, W5) + B5\nY = tf.nn.softmax(Ylogits)\n\nupdate_ema = tf.group(update_ema1, update_ema2, update_ema3, update_ema4)\n\n# cross-entropy loss function (= -sum(Y_i * log(Yi)) ), normalised for batches of 100 images\n# TensorFlow provides the softmax_cross_entropy_with_logits function to avoid numerical stability\n# problems with log(0) which is NaN\ncross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_)\ncross_entropy = tf.reduce_mean(cross_entropy)*100\n\n# accuracy of the trained model, between 0 (worst) and 1 (best)\ncorrect_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n# matplotlib visualisation\nallweights = tf.concat([tf.reshape(W1, [-1]), tf.reshape(W2, [-1]), tf.reshape(W3, [-1]), tf.reshape(W4, [-1]), tf.reshape(W5, [-1])], 0)\nallbiases = tf.concat([tf.reshape(B1, [-1]), tf.reshape(B2, [-1]), tf.reshape(B3, [-1]), tf.reshape(B4, [-1]), tf.reshape(B5, [-1])], 0)\nconv_activations = tf.concat([tf.reshape(tf.reduce_max(Y1r, [0]), [-1]), tf.reshape(tf.reduce_max(Y2r, [0]), [-1]), tf.reshape(tf.reduce_max(Y3r, [0]), [-1])], 0)\ndense_activations = tf.reduce_max(Y4r, [0])\nI = tensorflowvisu.tf_format_mnist_images(X, Y, Y_)\nIt = tensorflowvisu.tf_format_mnist_images(X, Y, Y_, 1000, lines=25)\ndatavis = tensorflowvisu.MnistDataVis(title4=\"batch-max conv activation\", title5=\"batch-max dense activations\", histogram4colornum=2, histogram5colornum=2)\n\n# training step\n# the learning rate is: # 0.0001 + 0.03 * (1/e)^(step/1000)), i.e. exponential decay from 0.03->0.0001\nlr = 0.0001 + tf.train.exponential_decay(0.02, iter, 1600, 1/math.e)\ntrain_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)\n\n# init\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n\n# You can call this function in a loop to train the model, 100 images at a time\ndef training_step(i, update_test_data, update_train_data):\n\n # training on batches of 100 images with 100 labels\n batch_X, batch_Y = mnist.train.next_batch(100)\n\n # compute training values for visualisation\n if update_train_data:\n a, c, im, ca, da, l = sess.run([accuracy, cross_entropy, I, conv_activations, dense_activations, lr],\n feed_dict={X: batch_X, Y_: batch_Y, iter: i, tst: False, pkeep: 1.0, pkeep_conv: 1.0})\n print(str(i) + \": accuracy:\" + str(a) + \" loss: \" + str(c) + \" (lr:\" + str(l) + \")\")\n datavis.append_training_curves_data(i, a, c)\n datavis.update_image1(im)\n datavis.append_data_histograms(i, ca, da)\n\n # compute test values for visualisation\n if update_test_data:\n a, c, im = sess.run([accuracy, cross_entropy, It],\n feed_dict={X: mnist.test.images, Y_: mnist.test.labels, tst: True, pkeep: 1.0, pkeep_conv: 1.0})\n print(str(i) + \": ********* epoch \" + str(i*100//mnist.train.images.shape[0]+1) + \" ********* test accuracy:\" + str(a) + \" test loss: \" + str(c))\n datavis.append_test_curves_data(i, a, c)\n datavis.update_image2(im)\n\n # the backpropagation training step\n sess.run(train_step, {X: batch_X, Y_: batch_Y, tst: False, iter: i, pkeep: 0.75, pkeep_conv: 1.0})\n sess.run(update_ema, {X: batch_X, Y_: batch_Y, tst: False, iter: i, pkeep: 1.0, pkeep_conv: 1.0})\n\ndatavis.animate(training_step, 10001, train_data_update_freq=20, test_data_update_freq=100)\n\n# to save the animation as a movie, add save_movie=True as an argument to datavis.animate\n# to disable the visualisation use the following line instead of the datavis.animate line\n# for i in range(10000+1): training_step(i, i % 100 == 0, i % 20 == 0)\n\nprint(\"max test accuracy: \" + str(datavis.get_max_test_accuracy()))\n\n## All runs 10K iterations:\n# batch norm 0.998 lr 0.03-0.0001-1000 no BN offset or scale: best 0.9933 but most of the way under 0.993 and lots of variation. test loss under 2.2 though\n# batch norm 0.998 lr 0.03-0.0001-500 no BN offset or scale: best 0.9933 but really clean curves\n# batch norm 0.998 lr 0.03-0.0001-500 no BN offset or scale, dropout 0.8 on fully connected layer: max 0.9926\n# same as above but batch norm on fully connected layer only: max 0.9904\n# batch norm 0.998 lr 0.03-0.0001-500, withouts biases or BN offsets or scales at all: above 0.99 at 1200 iterations (!) but then max 0.9928 (record test loss though: 2.08193)\n# batch norm 0.998 lr 0.03-0.0001-500 dropout à.75, with biases replaced with BN offsets as per the book: above 0.99 at 900 iterations (!), max 0.9931 at 10K iterations, maybe could have gone higher still (record test loss though: below 2.05)\n# batch norm 0.998 lr 0.03-0.0001-500 no dropout, with biases replaced with BN offsets as per the book: above 0.99 at 900 iterations (!), max 0.993 (best loss at 2.0879 at 2100 it and went up after that)\n# batch norm 0.998 lr 0.03-0.0001-500 no dropout, offets and scales for BN, no biases: max 0.9935 at 2400 it but going down from there... also dense activations not so regular...\n# batch norm 0.999 + same as above: 0.9935 at 2400 iterations but downhill from there...\n# batch norm 0.999 lr 0.02-0.0002-2000 dropout 0.75, normal biases, no BN scales or offsets: max 0.9949 at 17K it (min test loss 1.64665 but cruising around 1.8) 0.994 at 3100 it, 0.9942 at 20K it, 0.99427 average on last 10K it\n# batch norm 0.999 lr 0.02-0.0001-1000 dropout 0.75, normal biases, no BN scales or offsets: max 0.9944 but oscillating in 0.9935-0.9940 region (test loss stable betwen 1.7 and 1.8 though)\n# batch norm 0.999 lr 0.02-0.0002-1000 dropout 0.75, normal biases, no BN scales or offsets: max 0.995, min test loss 1.49787 cruising below 1.6, then at 8K it something happens and cruise just above 1.6, 0.99436 average on last 10K it\n# => see which setting removes the weird event at 8K ?:\n# => in everything below batch norm 0.999 lr 0.02-0.0002-1000 dropout 0.75, normal biases, no BN scales or offsets, unless stated otherwise\n# remove n/n+1 in variation calculation: no good, m ax 0.994 buit cruising around 0.993\n# bn 0.9955 for cutoff at 2K it: still something happens at 8K. Max 0.995 but cruising at 0.9942-0.9943 only and downward trend above 15K. Test loss: nice cruise below 1.6\n# bn epsilon e-10 => max 0.9947 cruise around 0.9939, test loss never went below 1.6, barely below 1.7,\n# bn epsilon e-10 run 2=> max 0.9945 cruise around 0.9937, test loss never went below 1.6, barely below 1.7,\n# baseline run 2: max 0.995 cruising around 0.9946 0.9947, test loss cruising between 1.6 and 1.7 (baseline confirmed)\n# bn 0.998 for cutoff at 5K it: max 0.9948, test loss cruising btw 1.6 and 1.8, last 10K avg 0.99421\n# lr 0.015-0.0001-1500: max 0.9938, cruise between 0.993 and 0.994, test loss above 2.0 most of the time (not good)\n# bn 0.9999: max 0.9952, cruise between 0.994 and 0.995 with upward trend, fall in last 2K it. test loss cruise just above 1.6. Avg on last 10K it 0.99441. Could be stopped at 7000 it. Quite noisy overall.\n# bn 0.99955 for cutoff at 20K it: max 0.9948, cruise around 0.9942, test loss cruise around 1.7. Avg on last 10K it 0.99415\n# batch norm 0.999 lr 0.015-0.00015-1500 dropout 0.75, normal biases, no MB scales or offsets: cruise around 0.9937-00994, test loss cruise around 1.95-2.0 (not good)\n# batch norm 0.999 lr 0.03-0.0001-2000 dropout 0.75, normal biases, no MB scales or offsets: stable cruise around 0.9940, test loss cruise around 2.2, good stability in last 10K, bumpy slow start\n# batch norm 0.9999 lr 0.02-0.0001-1500 dropout 0.75, normal biases, no MB scales or offsets: max 0.995, stable btw 0.0040-0.9945, test loss stable around 1.7, good stability in last 4K, avg on last 10K: 0.99414, avg on last 4K\n# *batch norm 0.9999 lr 0.02-0.00015-1000 dropout 0.75, normal biases, no MB scales or offsets: max 0.9956 stable above 0.995!!! test loss stable around 1.6. Avg last 10K 0.99502. Avg 10K-13K 0.99526. Avg 8K-10K: 0.99514. Best example to run in 10K\n# same as above with different rnd seed: max 0.9938 only in 10K it, test loss in 1.9 region (very bad)\n# same as above with dropout 0.8: max 0.9937 only (bad)\n# same as above with dropout 0.66: max 0.9942 only, test loss between 1.7-1.8 (not good)\n# same as above with lr 0.015-0.0001-1200: max 0.9946 at 6500 it but something happens after that it it goes down (not good)\n# best * run 2 (lbl 5.1): max 0.9953, cruising around 0.995 until 12K it, went down a bit after that (still ok) avg 8-10K 0.99484\n# best * run 3 (lbl 5.2 video): max 0.9951, cruising just below 0.995, test loss cruising around 1.6\n# best * run 3-8: not good, usually in the 0.994 range\n# best * run 9: (lbl 5.3 video): max 0.9956, cruising above 0.995, test loss cruising around 1.6, avg 7K-10K it: 0.99518\n# added BN offests instead of biases as per the BN theory. Offsets initialised to 0.025\n# lr 0.005-0.00015-1000 max accuracy 0.9944 not good\n# lr 0.015-0.0001-1200 max accuracy 0.9950 but it was really a peak\n# same with offsets initialised to -0.25: very bad, not even 0.993, BN offsets stabilise even lower than -0.25\n# same with offsets initialised to 0.1: max accuracy 0.9946 bad\n# same with batch norm and dropout on fully connected layer only: max accuracy 0.9935 very bad\n# BN with no offset but regular biases applied after the BN in convolutional layers. BN with offset on fully connected layer: max accuracy 0.9949\n# BN and dropout on all layers, as per the book: max accuracy 0.9918 very bad\n# back to basics: batch norm 0.9999 lr 0.02-0.00015-1000 dropout 0.75 on dense layer, normal biases, no MB scales or offsets: 0.9935 (bad)\n# by the book: batch norm 0.9999 lr 0.02-0.00015-1000 dropout 0.75 on dense layer, BN offsets init to 0.01, no BN scales: max accuracy 0.9943\n# smaller batch size (33): max accuracy 0.9925 (not good)\n#* by the book: 3 conv layers 24-48-64, batch norm 0.999 lr 0.02-0.0001-1700 dropout 0.5 on dense layer, BN offsets init to 0.01, no BN scales: max accuracy 0.9954, stable around 0.9950, test loss goes as low as 1.45! (on GPU)\n#* by the book: 3 conv layers 24-48-64, batch norm 0.999 lr 0.02-0.0001-1800 dropout 0.5 on dense layer, BN offsets init to 0.01, no BN scales: max accuracy 0.9952, stable around 0.9950 (on GPU)\n# by the book: 3 conv layers 24-48-64, batch norm 0.999 lr 0.02-0.0001-1500 dropout 0.5 on dense layer, BN offsets init to 0.01, no BN scales: max accuracy 0.9947 (on GPU)\n#* by the book: 3 conv layers 24-48-64, batch norm 0.999 lr 0.02-0.0001-1600 dropout 0.5 on dense layer, BN offsets init to 0.01, no BN scales: max accuracy 0.9956, stable around 0.9952 (on GPU)\n#* 2nd run: max accuracy 0.9954, stable around 0.0049, test loss stable around 1.7 (on GPU)\n#* 3rd run: max accuracy 0.9949, stable around 0.9947, test loss stable around 1.6 (on GPU)\n#* 4th run: max accuracy 0.9952, stable around 0.9948, test loss stable around 1.7, 0.9952 at 3200 iterations (on GPU)\n#* 5th run: max accuracy 0.9952, stable around 0.9952, test loss stable around 1.7 (on GPU)\n# same conditions without batch norm: max accuracy below 0.9900 ! (on GPU)\n# same conditions with dropout 0.75: max accuracy 0.9953, stable around 0.9950, test loss stable around 1.6 (on GPU)\n# 2nd run: max accuracy 0.9958 (!), stable around 0.9950, test loss stable around 1.65 (on GPU)\n# 3rd run: max accuracy 0.9955 (!), stable around 0.9951, test loss stable around 1.65 (on GPU)","repo_name":"GoogleCloudPlatform/tensorflow-without-a-phd","sub_path":"tensorflow-mnist-tutorial/mnist_4.2_batchnorm_convolutional.py","file_name":"mnist_4.2_batchnorm_convolutional.py","file_ext":"py","file_size_in_byte":16380,"program_lang":"python","lang":"en","doc_type":"code","stars":2677,"dataset":"github-code","pt":"61"} +{"seq_id":"2821631422","text":"# encoding=utf8\nfrom tests.conftest import API_BASEURL\n\nimport pytest\n\nROOT_ID = '069cb8d7-bbdd-47d3-ad8f-82ef4c269df1'\n\n\n@pytest.mark.asyncio\nasync def test_delete_non_used_id(client):\n response = await client.delete(\n f'/delete/0347sgda-5436-34gh-dsaf-845hjgdgfhg1'\n )\n assert response.status_code == 404\n assert response.json() == {'detail': 'Item not found'}\n","repo_name":"sergey-png/yandex_backend_enroll","sub_path":"tests/app/routes/test_delete.py","file_name":"test_delete.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11197712951","text":"from sklearn.externals import joblib\n\n# load the model from disk\nloaded_model = joblib.load('joblib_iris_flowers_finalized_model.sav')\n\n# define one new instance\nXnew = [[5.4, 3.4, 1.7, 0.2], [5.5, 2.3, 4.0, 1.3], [5.6, 2.8, 4.9, 2.0]]\n\n# make a prediction\nynew = loaded_model.predict(Xnew)\n\n# show the inputs and predicted outputs\nfor i in range(len(Xnew)):\n print(\"X=%s, Predicted=%s\" % (Xnew[i], ynew[i]))\n","repo_name":"Baneeishaque/classification_iris_flowers","sub_path":"multiple_class_prediction_joblib_dataset.py","file_name":"multiple_class_prediction_joblib_dataset.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2589826057","text":"import json\nimport os.path\nimport re\nimport time\n\nimport errant\nimport pandas as pd\nimport torch\nfrom annotated_text import annotated_text\nfrom bs4 import BeautifulSoup\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom gramformer import Gramformer\nfrom pydantic import BaseModel\nfrom transformers import AutoModelForSeq2SeqLM\nfrom transformers import AutoTokenizer\nimport datetime\n\nannotator = errant.load('en')\n\nPATH = os.path.abspath('models/gf.pth')\n\nprint(\"Loading models...\")\n\napp = FastAPI()\n\n\nclass Sentences(BaseModel):\n sentences: list\n\n\norigins = [\n 'https://8249-218-2-231-114.jp.ngrok.io/',\n \"http://localhost\",\n \"http://localhost:3000\",\n \"http://127.0.0.1:8000\",\n \"https://trans-grammer-frontend.vercel.app/\"\n\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=False,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n\n)\n\ndevice = \"cpu\"\ncorrection_model_tag = \"prithivida/grammar_error_correcter_v1\"\ncorrection_tokenizer = AutoTokenizer.from_pretrained(correction_model_tag)\ncorrection_model = AutoModelForSeq2SeqLM.from_pretrained(correction_model_tag)\n\ninfluent_sentences = [\n \"I is dog.\"\n]\n\n\ndef set_seed(seed):\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n\nprint(\"Models loaded !\")\n\ngf = Gramformer(models=1, use_gpu=False) # 1=corrector, 2=detector\n\ntry:\n torch.save(gf, PATH)\n\n gf_inference = torch.load(PATH)\nexcept:\n print('Torch Save Error')\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"Gramformer !\"}\n\n\n# @app.get(\"/{correct}\")\n# def get_correction(input_sentence):\n# set_seed(1212)\n# scored_corrected_sentence = correct(input_sentence)\n# return {\"scored_corrected_sentence\": scored_corrected_sentence}\n\n@app.post(\"/sentence\")\ndef get_corrected_sentence(sentences: Sentences):\n sentence_list = sentences.sentences\n # print(sentence_list)\n set_seed(1212)\n\n scored_corrected_sentence = correct_sentence(sentence_list)\n sent_list = list(scored_corrected_sentence)\n\n highlighted_sentences = show_highlights(sentence_list[0], sent_list[0])\n\n return json.dumps({'corrected_sentence': highlighted_sentences})\n\n\ndef correct_sentence(input_sentence):\n for influent_sentence in input_sentence:\n \"\"\"\n Correct influent_sentences\n \n \"\"\"\n corrected_sentences = gf.correct(influent_sentence, max_candidates=3)\n # print(\"[Input] \", influent_sentence)\n # for corrected_sentence in corrected_sentences:\n # print(\"[Correction] \",corrected_sentence)\n # print(\"[Edits] \", gf.highlight(influent_sentence, corrected_sentence))\n return corrected_sentences\n\n\ndef correct(input_sentence, max_candidates=1):\n correction_prefix = \"gec: \"\n input_sentence = correction_prefix + input_sentence\n input_ids = correction_tokenizer.encode(input_sentence, return_tensors='pt')\n input_ids = input_ids.to(device)\n\n preds = correction_model.generate(\n input_ids,\n do_sample=True,\n max_length=128,\n top_k=50,\n top_p=0.95,\n # num_beams=7,\n early_stopping=True,\n num_return_sequences=max_candidates)\n\n corrected = set()\n for pred in preds:\n corrected.add(correction_tokenizer.decode(pred, skip_special_tokens=True).strip())\n\n corrected = list(corrected)\n return (corrected[0], 0) # Corrected Sentence, Dummy score\n\n\ndef show_highlights(input_text, corrected_sentence):\n try:\n strikeout = lambda x: '\\u0336'.join(x) + '\\u0336'\n highlight_text = highlight(input_text, corrected_sentence)\n color_map = {'d': '#faa', 'a': '#afa', 'span': '#fea'}\n tokens = re.split(r'(<[das]\\s.*?<\\/[das]>)', highlight_text)\n # print(tokens)\n annotations = [] # ['Sorry i ', ('forgot', 'VERB:TENSE', '#fea'), ' how to write. ', ('Tomorrow', 'SPELL', '#fea'), ' i ', ('remember.', 'VERB', '#fea'), '']\n for token in tokens:\n soup = BeautifulSoup(token, 'html.parser')\n tags = soup.findAll()\n\n if tags:\n _tag = tags[0].name\n _type = tags[0]['type']\n _text = tags[0]['edit']\n _desc = tags[0]['desc']\n _color = color_map[_tag]\n\n if _tag == 'd':\n _text = strikeout(tags[0].text)\n\n annotations.append((_text, _type, _desc))\n else:\n annotations.append(token)\n annotated_text(*annotations)\n\n print(highlight_text)\n\n return highlight_text\n\n except Exception as e:\n print('Some error occured!' + str(e))\n\n\ndef show_edits(input_text, corrected_sentence):\n try:\n edits = get_edits(input_text, corrected_sentence)\n df = pd.DataFrame(edits, columns=['type', 'original word', 'original start', 'original end', 'correct word',\n 'correct start', 'correct end'])\n df = df.set_index('type')\n\n except Exception as e:\n print('Some error occured!' + str(e))\n\n\ndef description(orig, edit, edit_type):\n\n descriptions = {\n \"DET\": 'The article %s may be incorrect. You may consider changing it to agree with the beginning sound of the following word and use %s' % (\n orig, edit),\n \"NOUN\": 'Consider changing %s to %s' % (\n orig, edit),\n \"SPELL\": 'The word %s is wrongly spelt. Correct it to %s' % (\n orig, edit),\n \"PUNCT\": 'The article %s may be incorrect. You may consider changing it to agree with the beginning sound of the following word and use %s' % (\n orig, edit),\n \"OTHER\": 'Consider changing %s to %s' % (\n orig, edit),\n \"ORTH\": '%s may be incorrect. Consider changing to %s' % (\n orig, edit),\n \"VERB:FORM\": 'The verb %s may be incorrect. Consider changing to %s' % (\n orig, edit),\n \"NOUN:NUM\": '%s may not agree in number with other words in this phrase. Consider changing to %s' % (\n orig, edit),\n \"VERB:TENSE\": 'The verb tense %s may be incorrect. Consider changing to %s' % (\n orig, edit),\n \"VERB:SVA\": 'The verb %s may be incorrect. Consider changing to %s' % (\n orig, edit),\n\n }\n desc = descriptions[edit_type]\n return desc\n\n\ndef highlight(orig, cor):\n edits = _get_edits(orig, cor)\n orig_tokens = orig.split()\n\n ignore_indexes = []\n\n for edit in edits:\n edit_type = edit[0]\n edit_str_start = edit[1]\n edit_spos = edit[2]\n edit_epos = edit[3]\n edit_str_end = edit[4]\n\n # if no_of_tokens(edit_str_start) > 1 ==> excluding the first token, mark all other tokens for deletion\n for i in range(edit_spos + 1, edit_epos):\n ignore_indexes.append(i)\n\n if edit_str_start == \"\":\n if edit_spos - 1 >= 0:\n new_edit_str = orig_tokens[edit_spos - 1]\n edit_spos -= 1\n\n else:\n new_edit_str = orig_tokens[edit_spos + 1]\n edit_spos += 1\n\n if edit_type == \"PUNCT\":\n timestamp = str(datetime.datetime.timestamp(datetime.datetime.now())).replace('.', '-') + edit_type\n\n st = \"\" + new_edit_str + \"\"\n else:\n timestamp = str(datetime.datetime.timestamp(datetime.datetime.now())).replace('.', '-') + edit_type\n\n st = \"\" + new_edit_str + \"\"\n orig_tokens[edit_spos] = st\n elif edit_str_end == \"\":\n timestamp = str(datetime.datetime.timestamp(datetime.datetime.now())).replace('.', '-') + edit_type\n\n st = \"\" + edit_str_start + \"\"\n orig_tokens[edit_spos] = st\n else:\n timestamp = str(datetime.datetime.timestamp(datetime.datetime.now())).replace('.', '-') + edit_type\n\n edit_desc = description(edit_str_start, edit_str_end, edit_type)\n\n st = \"\" + edit_str_start + \"\"\n\n orig_tokens[edit_spos] = st\n\n for i in sorted(ignore_indexes, reverse=True):\n print(i)\n del (orig_tokens[i])\n\n return (\" \".join(orig_tokens))\n\n\ndef _get_edits(orig, cor):\n orig = annotator.parse(orig)\n cor = annotator.parse(cor)\n alignment = annotator.align(orig, cor)\n edits = annotator.merge(alignment)\n\n if len(edits) == 0:\n return []\n\n edit_annotations = []\n for e in edits:\n e = annotator.classify(e)\n edit_annotations.append((e.type[2:], e.o_str, e.o_start, e.o_end, e.c_str, e.c_start, e.c_end))\n\n if len(edit_annotations) > 0:\n return edit_annotations\n else:\n return []\n\n\ndef get_edits(orig, cor):\n return _get_edits(orig, cor)\n\n# def set_seed(seed):\n# torch.manual_seed(seed)\n# if torch.cuda.is_available():\n# torch.cuda.manual_seed_all(seed)\n#\n# set_seed(1212)\n#\n#\n# gf = Gramformer(models = 1, use_gpu=False) # 1=corrector, 2=detector\n#\n#\n#\n# influent_sentences = [\n# \"He are moving here.\",\n# \"I am doing fine. How is you?\",\n# \"How is they?\",\n# \"Matt like fish\",\n# \"the collection of letters was original used by the ancient Romans\",\n# \"We enjoys horror movies\",\n# \"Anna and Mike is going skiing\",\n# \"I walk to the store and I bought milk\",\n# \" We all eat the fish and then made dessert\",\n# \"I will eat fish for dinner and drink milk\",\n# \"what be the reason for everyone leave the company\",\n# ]\n#\n# for influent_sentence in influent_sentences:\n# corrected_sentences = gf.correct(influent_sentence, max_candidates=1)\n# print(\"[Input] \", influent_sentence)\n# for corrected_sentence in corrected_sentences:\n# print(\"[Correction] \",corrected_sentence)\n# print(\"-\" *100)\n","repo_name":"transphere-dev/trans-grammer-api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15973649303","text":"# Hangman Game\nimport random\nstages = ['''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========\n''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========\n''', '''\n +---+\n | |\n |\n |\n |\n |\n=========\n''']\n\nword_list = ['microsoft', 'google', 'facebook', 'apple', 'twitter']\nchosen_word = random.choice(word_list)\n\ndisplay = []\nfor i in chosen_word:\n display.append('_')\nword_length = len(chosen_word)\n# print('Hint, hint, the chosen word is: ' + chosen_word)\nprint(display)\n\nlives = 6\ni = 0\nwhile i <= 15:\n guess = input(\"Guess a letter of the word: \")\n guess.lower()\n for position in range(word_length):\n letter = chosen_word[position]\n if letter == guess:\n display[position] = letter\n if guess not in chosen_word:\n lives -= 1\n print(stages[lives])\n if lives == 0:\n print('You lose')\n break\n if '_' not in display:\n print('Congratulations! You won!')\n break\n print(display)\n i += 1","repo_name":"uvjour/Hangman","sub_path":"Hangman/Hangman.py","file_name":"Hangman.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4577910838","text":"from flask import render_template, request, redirect, url_for\nfrom flask_login import current_user\nfrom sqlalchemy.sql import text\n\nfrom application import app, db, login_required\nfrom application.messages.models import Message\nfrom application.messages.forms import MessageForm\nfrom application.auth.models import Role, User\nfrom application.management.forms import CategoryForm\nfrom application.groups.forms import GroupForm, GroupcategoryFrom\nfrom application.groups.models import Groups, GroupCategory, Category\n\n@app.route(\"/mypage\")\n@login_required()\ndef my_page():\n role = current_user.get_role().role\n return render_template(\"management/myPage.html\", role = role)\n\n@app.route(\"/admin/users\")\n@login_required(role = \"ADMIN\")\ndef admin_users():\n users = User.query.all()\n return render_template(\"management/adminUsers.html\", users = users)\n\n@app.route(\"/new/admin/\", methods=[\"POST\"])\n@login_required(role = \"ADMIN\")\ndef admin_make(user_id):\n u = User.query.get(user_id)\n u.role_id = db.engine.execute(text(\"SELECT id FROM Role WHERE role = 'ADMIN'\")).first()[0]\n db.session().commit()\n\n return redirect(url_for(\"admin_users\"))\n\n@app.route(\"/admin/categories\", methods=[\"GET\", \"POST\"])\n@login_required(role = \"ADMIN\")\ndef admin_categories():\n categories = Category.query.all()\n if request.method == \"GET\":\n return render_template(\"management/adminCategories.html\", categories = categories, form = CategoryForm())\n\n form = CategoryForm(request.form)\n print(form.validate)\n if not form.validate():\n return render_template(\"management/adminCategories.html\", categories = categories, form = form)\n\n if category_exists(form.name.data):\n error = \"Category with that name already exists\"\n return render_template(\"management/adminCategories.html\", categories = categories, form = form, error = error)\n \n c = Category(form.name.data)\n db.session().add(c)\n db.session().commit()\n\n return redirect(url_for(\"admin_categories\"))\n\n@app.route(\"/categories//delete\", methods=[\"POST\"])\n@login_required(role=\"ADMIN\")\ndef delete_category(category_id):\n c = Category.query.get(category_id)\n gcs = GroupCategory.query.filter_by(category_id = category_id)\n\n for gc in gcs:\n db.session().delete(gc)\n\n db.session().delete(c)\n db.session().commit()\n\n return redirect(url_for(\"admin_categories\"))\n\n@app.route(\"/users//delete\", methods=[\"POST\"])\n@login_required(role=\"ADMIN\")\ndef delete_user(user_id):\n u = User.query.get(user_id)\n if u.get_role().role != \"ADMIN\":\n gs = u.groups\n for g in gs:\n messages = g.messages\n gcs = GroupCategory.query.filter_by(group_id = g.id)\n for message in messages:\n db.session.delete(message)\n for gc in gcs:\n db.session.delete(gc)\n db.session.delete(g)\n \n messages = u.messages\n for message in messages:\n db.session().delete(message)\n\n db.session().delete(u)\n db.session().commit()\n\n return redirect(url_for(\"admin_users\"))\n\n \ndef category_exists(name):\n cate = Category.query.filter_by(name = name).first()\n\n if cate:\n return True\n\n return False","repo_name":"Etsku520/devops-docker","sub_path":"1.8/Keskustelufoorumi-master/application/management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27606156183","text":"import turtle as t\r\n\r\n\r\ndef draw_polygon(side_amount: int, side_length: int) -> None:\r\n\r\n angle = 360 / side_amount\r\n start_x = -100\r\n start_y = -100\r\n\r\n def turtlePause(duration: int) -> None:\r\n t.penup()\r\n t.hideturtle()\r\n for i in range(duration):\r\n t.speed(1)\r\n t.left(1)\r\n\r\n t.pensize(2)\r\n t.pencolor(\"black\")\r\n t.speed(2)\r\n\r\n t.hideturtle()\r\n t.penup()\r\n t.goto(start_x, start_y)\r\n\r\n t.showturtle()\r\n t.pendown()\r\n\r\n for _ in range(side_amount):\r\n t.forward(side_length)\r\n t.left(angle)\r\n\r\n turtlePause(100)\r\n\r\n\r\ndef main() -> None:\r\n draw_polygon(10, 100)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"bymyselfstudio/CodingSessions_Python","sub_path":"polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4498432036","text":"import control as ct\nimport matplotlib.pyplot as plt\n# from control.matlab import *\n\nA = [[0, 1], [-2, -10]]\nB = [[0], [2]]\nC = [1, 0]\nD = 0\n\nsys = ct.ss(A,B,C,D)\n\nprint(sys)\n\nt, y = ct.step_response(sys)\n\nprint(t.shape)\n\nplt.figure(1)\nplt.title(\"Step Response\")\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Amplitude\")\nplt.plot(t,y)\nplt.grid()\nplt.pause(5)","repo_name":"cychitivav/control_theory","sub_path":"signals/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16926716235","text":"# Randomized for Algorithm Tuning\nimport numpy\nfrom pandas import read_csv\nfrom scipy.stats import uniform\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import RandomizedSearchCV\nfilename = 'pima-indians-diabetes.data.csv'\nnames = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']\ndataframe = read_csv(filename, names=names)\narray = dataframe.values\nX = array[:,0:8]\nY = array[:,8]\nparam_grid = {'alpha': uniform()}\nmodel = Ridge()\nrsearch = RandomizedSearchCV(estimator=model, param_distributions=param_grid, n_iter=100, random_state=7)\nrsearch.fit(X, Y)\nprint(rsearch.best_score_)\nprint(rsearch.best_estimator_.alpha)\n","repo_name":"rupskygill/ML-mastery","sub_path":"ml_with_python_code/16_random_search.py","file_name":"16_random_search.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"61"} +{"seq_id":"35633352834","text":"from random import randint\n\n\nclass Research:\n def __init__(self, path):\n self.path = path\n self.calc = self.Calculations(self.file_reader())\n\n def file_reader(self, has_header=True):\n with open(self.path, \"r\") as f:\n lines = f.readlines()\n lines_len = len(lines)\n if has_header is True:\n if len(lines) < 2:\n raise Exception(\"Incorrect lines count\")\n if len(lines[0].split(\",\")) != 2:\n raise Exception(\"Incorrect header\")\n res = []\n for i in range(1, len(lines)):\n if len(lines[i].split(\",\")) != 2:\n raise Exception(\"Incorrect columns count\")\n splited = lines[i].strip(\"\\n\").split(\",\")\n if (splited[0] != \"0\" and splited[0] != \"1\") or (splited[1] != \"0\" and splited[1] != \"1\"):\n raise Exception(\"Incorrect value\")\n if splited[0] == splited[1]:\n raise Exception(\"Incorrect value\")\n res.append([int(splited[0]), int(splited[1])])\n return res\n else:\n if len(lines) < 1:\n raise Exception(\"Incorrect lines count\")\n res = []\n for i in range(len(lines)):\n if len(lines[i].split(\",\")) != 2:\n raise Exception(\"Incorrect columns count\")\n splited = lines[i].strip(\"\\n\").split(\",\")\n if (splited[0] != \"0\" and splited[0] != \"1\") or (splited[1] != \"0\" and splited[1] != \"1\"):\n raise Exception(\"Incorrect value\")\n if splited[0] == splited[1]:\n raise Exception(\"Incorrect value\")\n res.append([int(splited[0]), int(splited[1])])\n return res\n\n class Calculations:\n def __init__(self, data):\n self.data = data\n\n def counts(self):\n heads = sum(i[0] for i in self.data)\n tails = sum(i[1] for i in self.data)\n return heads, tails\n\n def fractions(self, heads_count, tails_count):\n return heads_count / (heads_count + tails_count) * 100, tails_count / (heads_count + tails_count) * 100\n\n\nclass Analitics(Research.Calculations):\n def predict_random(self, number_of_predictions):\n predictions = []\n for i in range(number_of_predictions):\n if randint(0, 1) == 0:\n predictions.append([0, 1])\n else:\n predictions.append([1, 0])\n return predictions\n\n def predict_last(self):\n return self.data[-1]\n\n def save_file(self, data, name, ext):\n with open(name+\".\"+ext, 'w') as f:\n print(data, file=f)\n","repo_name":"welida42/DS_Piscine-42-","sub_path":"day_02/ex05/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":2841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29295293256","text":"import os, sys\n\nfrom dotenv import dotenv_values\nfrom json import loads\nfrom re import compile\nfrom colorama import Fore, Style\n\nRED=Fore.RED\nGREEN=Fore.GREEN\nBLUE=Fore.BLUE\nNC=Style.RESET_ALL\n\ndef validate_path(path: str) -> None:\n if path.startswith(\"~\"):\n path = path.replace(\"~\", os.getenv(\"HOME\"))\n \n if os.path.exists is False:\n print(\"[{R}error{NC}] Failed to find path: \".format(R=RED, NC=NC), path)\n sys.exit(1)\n\nclass Rename:\n def __init__(self, config_path: str):\n self.config_path = config_path\n self.config_map = None\n \n self.show_name = \"\"\n self.season_regex = []\n self.episode_regex = []\n self.dry_run = False\n self.no_seasons = False\n\n validate_path(self.config_path)\n self.parse_config()\n\n def parse_config(self):\n # self.config_map = dotenv_values(self.config_path)\n with open(self.config_path, 'r') as file:\n data = file.read()\n self.config_map = loads(data)\n\n episode_regex = self.config_map.get(\"episode_regex\")\n if episode_regex is None and len(self.season_regex) == 0:\n print(\"[{R}error{NC}] Failed to find episode regular expressions\".format(R=RED, NC=NC))\n sys.exit(1)\n \n self.episode_regex = episode_regex\n\n if self.no_seasons is False:\n season_regex = self.config_map.get(\"season_regex\")\n if season_regex is None and len(self.season_regex) == 0:\n print(\"[{R}error{NC}] Failed to find season regular expressions\".format(R=RED, NC=NC))\n sys.exit(1)\n\n self.season_regex = season_regex\n\n def compile_regex(self):\n ret = []\n for regex in self.episode_regex:\n # print(\"Compiling: \", regex)\n ret.append(compile(regex))\n\n self.episode_regex = ret\n print(self.episode_regex)\n \n ret = []\n for regex in self.season_regex:\n # print(\"Compiling: \", regex)\n ret.append(compile(regex))\n\n self.season_regex = ret\n\n def match_str(self, string: str, regex_list: list):\n ret = None\n\n for regex in regex_list:\n search = regex.search(string)\n if search:\n ret = search\n break\n\n return ret\n\n def info(self, path: str):\n validate_path(path)\n\n def fetch_seasons(self, path: str):\n ret = []\n \n for root, directories, files in os.walk(path):\n for name in directories:\n full_path = os.path.join(root, name)\n\n match = self.match_str(name, self.season_regex)\n if match is not None:\n ret.append(full_path)\n\n return ret\n\n def rename_episodes(self, seasons: list):\n renamed = []\n file_count = 0\n \n for path in seasons:\n for root, directories, files in os.walk(path):\n for name in files:\n full_path = os.path.join(root, name)\n file_count += 1\n\n match = self.match_str(name, self.episode_regex)\n if match is not None:\n file_ext = name.split(\".\")[-1]\n sn = match.group(1)\n en = match.group(2)\n new_file_name = root + \"/\" + \"{n} - S{sn}E{en}\".format(n=self.show_name, sn=sn, en=en) + \".{}\".format(file_ext)\n print(new_file_name)\n if self.dry_run is False:\n os.rename(full_path, new_file_name)\n\n renamed.append(new_file_name)\n\n return renamed, file_count\n\n def walk(self, path: str): \n seasons = self.fetch_seasons(path)\n renamed, file_count = self.rename_episodes(seasons)\n\n return seasons, renamed, file_count","repo_name":"stevezaluk/series-rename","sub_path":"core/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28238359916","text":"\ndef change_maker(amount, denoms, index, memo):\n if (amount, index) in memo.keys():\n return memo[(amount, index)]\n if index == len(denoms) - 1:\n return 1\n\n ways = 0\n demon_amt = denoms[index]\n i = 0\n while (i*demon_amt) <= amount:\n amt_rem = (amount - (i*demon_amt))\n ways += change_maker(amt_rem, denoms, index+1, memo)\n i += 1\n\n memo[(amount, index)]=ways\n return ways \n\ndef coins(amount, memo):\n if amount == 0:\n return 1\n elif amount < 0:\n return 0\n elif memo[amount]:\n return memo[amount]\n else:\n memo[amount] = coins(amount-1, memo) + coins(amount-2, memo) + coins(amount-3, memo) + coins(amount-4, memo)\n return memo[amount]\n\n\ndenoms = [1,2,3,4]\namount = 4\nindex = 0\nmemo = dict()\nprint(change_maker(amount, denoms, index, memo))\nmemo = [None]*(amount+1)\nprint(coins(amount, memo))\n\ndef coin_change(amount, denoms, memo):\n if amount == 0:\n return 1\n elif amount < 0:\n return 0\n elif memo[amount]:\n return memo[amount]\n else:\n ways = 0\n for i in range(0,len(denoms)):\n #if denoms[i] <= amount:\n ways += coin_change(amount-denoms[i],denoms,memo)\n memo[amount] = ways\n return memo[amount]\n\nmemo = [None]*(amount+1)\ndenoms = [1,2,3,4]\namount = 4\nprint(coin_change(amount, denoms, memo))","repo_name":"withinfinitedegreesoffreedom/datastructures-algorithms","sub_path":"misc/makechange.py","file_name":"makechange.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2592532196","text":"import io\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import IO, TYPE_CHECKING, Dict, Iterator, List, Literal, Optional, Union\n\nfrom paradox.interfaces import AcceptsStatements, AlsoParam, InvalidLogic\n\nif TYPE_CHECKING:\n import builtins\n\n from paradox.expressions import (\n PanExpr,\n PanIndexAccess,\n PanKeyAccess,\n Pannable,\n PanVar,\n )\n from paradox.generate.statements import (\n ConditionalBlock,\n DictLoopBlock,\n ForLoopBlock,\n TryCatchBlock,\n )\n from paradox.typing import CrossType, FlexiType\n\n\nclass FileWriter:\n def __init__(\n self,\n f: IO[str],\n indentstr: str,\n baseindent: int = 0,\n ) -> None:\n self._f = f\n self._indentstr: str = indentstr\n self._baseindent: int = baseindent\n\n def _wline(self, indent: int, line: str) -> None:\n # when indent is -1, always write with no indent\n indentstr = \"\"\n if indent != -1:\n indentstr = self._indentstr * (indent + self._baseindent)\n self._f.write(indentstr + line + \"\\n\")\n\n def line0(self, line: str) -> None:\n self._wline(0, line)\n\n def line1(self, line: str) -> None:\n self._wline(1, line)\n\n def blank(self) -> None:\n self._f.write(\"\\n\")\n\n def with_more_indent(self) -> \"FileWriter\":\n return FileWriter(self._f, self._indentstr, self._baseindent + 1)\n\n\nclass Script(AcceptsStatements):\n def __init__(self) -> None:\n from paradox.generate.statements import Statements\n\n super().__init__()\n\n # TODO: can we just hold a list of Statements directly? Note this means we would need to\n # implement WantsImports as well\n self._content = Statements()\n self._file_comments: List[str] = []\n self._new_types: Dict[str, NewTypeDetails] = {}\n\n def add_file_comment(self, text: str) -> None:\n \"\"\"Supercedes the old FileSpec.filecomment()\"\"\"\n self._file_comments.append(text)\n\n def add_new_type(self, name: str, base: \"CrossType\", *, tsexport: bool = False) -> None:\n if name in self._new_types:\n raise InvalidLogic(f\"Cannot add two new types named {name!r}\")\n self._new_types[name] = NewTypeDetails(name, base, tsexport=tsexport)\n\n def also(self, stmt: AlsoParam) -> AlsoParam:\n return self._content.also(stmt)\n\n def blank(self) -> None:\n self._content.blank()\n\n def remark(self, text: str) -> None:\n self._content.remark(text)\n\n def alsoImportPy(self, module: str, names: List[str] = None) -> None:\n self._content.alsoImportPy(module, names)\n\n def alsoImportTS(self, module: str, names: List[str] = None) -> None:\n self._content.alsoImportTS(module, names)\n\n def alsoAppend(self, list_: \"Pannable\", value: \"Pannable\") -> None:\n self._content.alsoAppend(list_, value)\n\n def alsoRaise(self, ctor: str = None, *, msg: str = None, expr: \"PanExpr\" = None) -> None:\n self._content.alsoRaise(ctor, msg=msg, expr=expr)\n\n def alsoAssign(\n self,\n var: \"Union[PanVar, PanIndexAccess, PanKeyAccess]\",\n expr: \"Pannable\",\n ) -> None:\n self._content.alsoAssign(var, expr)\n\n def alsoDeclare(\n self,\n target: \"Union[str, PanVar]\",\n type: \"Union[None, FlexiType, Literal['no_type']]\",\n value: \"Union[Pannable, builtins.ellipsis]\" = ...,\n ) -> \"PanVar\":\n return self._content.alsoDeclare(target, type, value)\n\n @contextmanager\n def withTryBlock(self) -> \"Iterator[TryCatchBlock]\":\n with self._content.withTryBlock() as b:\n yield b\n\n @contextmanager\n def withCond(self, expr: \"PanExpr\") -> \"Iterator[ConditionalBlock]\":\n with self._content.withCond(expr) as cond:\n yield cond\n\n @contextmanager\n def withFor(\n self,\n assign: \"PanVar\",\n expr: \"Pannable\",\n ) -> \"Iterator[ForLoopBlock]\":\n with self._content.withFor(assign, expr) as loop:\n yield loop\n\n @contextmanager\n def withDictIter(\n self,\n v_dict: \"PanExpr\",\n v_val: \"PanVar\",\n v_key: \"PanVar\" = None,\n ) -> \"Iterator[DictLoopBlock]\":\n with self._content.withDictIter(v_dict, v_val, v_key) as loop:\n yield loop\n\n def write_to_path(\n self,\n target: Path,\n *,\n lang: str,\n indentstr: str = \" \",\n pretty: bool = False,\n phpnamespace: str = None,\n ) -> None:\n # TODO: add a targetversion arg which can be used to do things like choose a target\n # language version (e.g. lang=\"php\", targetversion=\"3.7\")\n with target.open(\"w\") as f:\n writer = FileWriter(f, indentstr=indentstr)\n self._write_to_writer(writer, lang=lang, pretty=pretty, phpnamespace=phpnamespace)\n\n def write_to_handle(\n self,\n handle: IO[str],\n *,\n lang: str,\n indentstr: str = \" \",\n pretty: bool = False,\n phpnamespace: str = None,\n ) -> None:\n # TODO: add a targetversion arg which can be used to do things like choose a target\n # language version (e.g. lang=\"php\", targetversion=\"3.7\")\n writer = FileWriter(handle, indentstr=indentstr)\n\n self._write_to_writer(writer, lang=lang, pretty=pretty, phpnamespace=phpnamespace)\n\n def get_source_code(\n self,\n *,\n lang: str,\n indentstr: str = \" \",\n pretty: bool = False,\n phpnamespace: str = None,\n ) -> str:\n # TODO: add a targetversion arg which can be used to do things like choose a target\n # language version (e.g. lang=\"php\", targetversion=\"3.7\")\n if pretty:\n raise NotImplementedError(\"Cannot prettify in-memory\")\n\n handle = io.StringIO()\n writer = FileWriter(handle, indentstr=indentstr)\n\n self._write_to_writer(writer, lang=lang, pretty=pretty, phpnamespace=phpnamespace)\n\n handle.seek(0)\n return handle.read()\n\n def _write_to_writer(\n self,\n writer: FileWriter,\n *,\n lang: str,\n pretty: bool,\n phpnamespace: Optional[str],\n ) -> None:\n if pretty:\n raise NotImplementedError(\"Prettifying is not yet supported\")\n\n if lang == \"php\":\n from paradox.output import php\n\n write_file_comments = php.write_file_comments\n write_top_imports = php.write_top_imports\n write_custom_types = php.write_custom_types\n\n writer.line0(\" no_slices:\n no_chosen_slice = no_slices\n\n rng = np.random.default_rng(seed)\n idxes = rng.choice(no_slices, size=no_chosen_slice, replace=False)\n\n slice_predictions = scan_df.Prediction.values[idxes]\n scan_prediction = majority_voting(slice_predictions)\n\n data[chosen_ratio][0].append(scan_prediction)\n data[chosen_ratio][1].append(lb)\n\n # print(np.unique(data[chosen_ratio][1], return_counts=True))\n\n for chosen_ratio in self.trials:\n f1 = f1_score(data[chosen_ratio][0], data[chosen_ratio][1], average=\"macro\")\n f1_scores[chosen_ratio].append(f1)\n\n report = classification_report(\n data[chosen_ratio][1],\n data[chosen_ratio][0],\n target_names_dict=target_names_dict,\n )\n reports[chosen_ratio].append(report)\n\n return f1_scores, reports\n\n def evaluate(self):\n \"\"\"Evaluate scan-level performance over n_bootstrap\n number of bootstraps\n \"\"\"\n all_reports = {}\n for chosen_ratio in self.trials:\n all_reports[chosen_ratio] = []\n\n with Pool(processes=NUM_WORKERS) as p:\n max_ = n_bootstrap\n with tqdm(total=max_) as pbar:\n for i, (f1_scores, reports) in enumerate(\n p.imap_unordered(self.evaluate_per_trial, range(n_bootstrap))\n ):\n for chosen_ratio in self.trials:\n self.data[chosen_ratio] += f1_scores[chosen_ratio]\n all_reports[chosen_ratio] += reports[chosen_ratio]\n\n pbar.update()\n\n # # PRINT AVERAGED REPORT\n dict_class = {\"Non\": 4, \"Aterial\": 7, \"Venous\": 10, \"Others\": 13}\n x = {\"Precision\": [], \"Recall\": [], \"F1-Score\": [], \"Support\": []}\n dict_vals = {\n \"Non\": {\"Precision\": [], \"Recall\": [], \"F1-Score\": [], \"Support\": []},\n \"Venous\": {\"Precision\": [], \"Recall\": [], \"F1-Score\": [], \"Support\": []},\n \"Aterial\": {\"Precision\": [], \"Recall\": [], \"F1-Score\": [], \"Support\": []},\n \"Others\": {\"Precision\": [], \"Recall\": [], \"F1-Score\": [], \"Support\": []},\n }\n reports = all_reports[self.chosen_ratio]\n\n for report in reports:\n for class_, dict_val in dict_vals.items():\n try:\n report_by_class = report[class_]\n except KeyError:\n report_by_class = {\n \"Precision\": 0,\n \"Recall\": 0,\n \"F1-Score\": 0,\n \"Support\": 0,\n }\n\n for i, metric in enumerate(dict_val):\n value = float(report_by_class[metric])\n dict_val[metric].append(value)\n\n for key, val in dict_class.items():\n metrics = []\n dict_val = dict_vals[key]\n print(f\" ===== {key} =====\")\n for i, metric in enumerate(dict_val):\n mean_metric = np.mean(dict_val[metric])\n mean_metric = f\"{mean_metric:.4f}\"\n print(f\"{metric}: {mean_metric}\")\n\n data = pd.DataFrame(self.data)\n data.to_csv(\"bootstrap.csv\", index=False)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n test_csv = args.prediction_file\n\n evaluator_ = evaluator(test_csv)\n evaluator_.evaluate()\n","repo_name":"vinbigdata-medical/MIDL2021-CT-Classification","sub_path":"study_evaluation/study_evaluate.py","file_name":"study_evaluate.py","file_ext":"py","file_size_in_byte":7642,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"15064878466","text":"'''\r\n파일 저장 명 : 5장_0516_1__최민호.py\r\n작성일 : 2023년 5월 16일\r\n학과 : 컴퓨터 공학부\r\n학번 : 202395032 \r\n이름 : 최민호\r\n설명 : 10개의 정수를 입력 받아 합을 구하는 프로그램을 작성하시오.\r\n 단, 짝수 번째에 입력되는 숫자는 양수를 음수로, 음수는 양수로 바꾸어 합을 구하시오.\r\n[문제 분석]\r\n반복하면서\r\n정수입력\r\n짝수번째 이면\r\n음수 -> 양수\r\n양수 -> 음수\r\n아니면\r\n합계 계산\r\n\r\n변수 : sum, num, count\r\n'''\r\ncount = 1\r\nsum = 0 \r\n\r\nwhile count <= 10 :\r\n num = int(input(str(count) + \"번째 정수를 입력하세요. : \"))\r\n if count % 2 == 0 :\r\n num = num * -1\r\n sum = sum + num\r\n count = count + 1\r\nprint(\"10개 정수의 합 : \", sum)\r\n\r\nprint(\"============================================================\")\r\n\r\ncount = 1\r\nsum = 0\r\nwhile True :\r\n num = int(input(\"{}번째 정수 입력\" .format(count)))\r\n if count % 2 == 0 :\r\n num = -num\r\n sum += num\r\n count += 1\r\n \r\n if count > 10 :\r\n break\r\nprint(\"10개 정수의 합 : {}\".format(sum))","repo_name":"minhokr/S-W-","sub_path":"chapter05/5장_0516_1_최민호.py","file_name":"5장_0516_1_최민호.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34819159778","text":"#! /usr/bin/env python3\n\nimport sys\nif len(sys.argv) != 4:\n sys.exit('This script finds sequences for samples of interest, given the sample names and index sequence in a mapping_oligos file. It needs the following inputs: input.fasta, mapping.oligos, input_I1.fastq\\n'\n 'Usage: ./my_script.py \\n')\n\nScript, InputFileName1, InputFileName2, InputFileName3 = sys.argv\n\nFASTA = open(InputFileName1, \"r\")\nMAPPING_OLIGOS = open(InputFileName2, \"r\")\nFASTQ = open(InputFileName3, \"r\").readlines()\nFASTQ = [line[:-1] for line in FASTQ]\n\n#creating dictionary\n\nFASTA_dict = {}\nkey = \"\"\n\n#filling dictionary with sequence id and sequence\n\n\nfor line in FASTA:\n LINE = line.strip()\n if LINE.startswith(\">\"):\n key = LINE.strip(\">\")\n else:\n FASTA_dict[key] = LINE\n\n#creating dictionary with mapping oligo sequence as key and sample id as values within a list\n\nparts = \"\"\nMAP_OLIS = {}\n\nfor row in MAPPING_OLIGOS:\n parts = row.split()\n for cols in parts:\n MAP_OLIS[parts[2]] = [parts[3]]\n\n#adding sequences IDs to MAP_OLIS dictionary\n\nindex_check = \"\"\nindex_seq = \"\"\n\nfor seq in MAP_OLIS.keys():\n index_seq = seq[1:]\n for line_no in range(0,len(FASTQ),4):\n index_check = index_seq in FASTQ[line_no+1]\n if index_check is True:\n MAP_OLIS[seq].append(FASTQ[line_no])\n\n#Create output file with sample name. Add as sequence name sample name plus sequence number (from 1 to X, X = number of sequences for the sample), add fasta sequence. \nSeqID = \"\"\nSampleID = \"\"\nSeq_check = \"\"\n\nfor seq_list in MAP_OLIS.values():\n for index_no in range(1, len(seq_list)):\n SampleID = seq_list[0]\n SeqID = seq_list[index_no]\n OUTPUT = open(SampleID + \".fasta\", \"a\")\n Seq_check = SeqID.strip('@') in FASTA_dict\n if Seq_check is True:\n print(\">\", SampleID, \"._\", index_no, \"\\n\", FASTA_dict[SeqID.strip('@')], sep = \"\", file = OUTPUT)\n","repo_name":"catesval/army_ant_myrmecophiles","sub_path":"extract_samples.py","file_name":"extract_samples.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18600254069","text":"def solution(board, moves):\n stack = []\n answer = 0\n for i in range(len(moves)):\n answer = pick_up(board, moves[i] - 1, stack, answer)\n\n return answer\n\n\ndef pick_up(board, pick, stack, answer):\n temp = 0\n for i in range(len(board)):\n if board[i][pick] != 0:\n temp = board[i][pick]\n board[i][pick] = 0\n break;\n\n if temp != 0:\n if len(stack) > 0 and stack[-1] == temp:\n stack.pop()\n answer += 2\n else:\n stack.append(temp)\n return answer","repo_name":"Andrevile/Algorithm","sub_path":"Programmers/크레인 인형뽑기 게임.py","file_name":"크레인 인형뽑기 게임.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22606102477","text":"from src import dataloader\nfrom src import llm\n\ndef sequentialze(org_list, vocab_dict):\n new_list = []\n for element in org_list:\n element_id = vocab_dict[element]\n\n new_list.append(element_id)\n return new_list\n\ndef seq2(org_list, vocab_dict):\n\n return [vocab_dict[i] for i in org_list]\n\nif __name__ == '__main__':\n train_path = './src/train.conll'\n dev_path = './src/dev.conll'\n\n train_dataset = dataloader.DataLoader(train_path, batch_size=10)\n print(train_dataset)\n dev_dataset = dataloader.DataLoader(dev_path, batch_size=10)\n\n sent_word_list = train_dataset.sent_word_list\n sent_tag_list = train_dataset.sent_tag_list\n sent_char_list = train_dataset.sent_char_list\n print(train_dataset.tag_dict)\n\n # print(sent_word_list)\n # print(train_dataset.word_dict)\n\n print(sent_word_list[0])\n sequentialzed_list = sequentialze(sent_word_list[0], train_dataset.word_dict)\n print(sequentialzed_list)\n print(len(sent_word_list[0]))\n print(len(sequentialzed_list))\n\n print(seq2(sent_word_list[0], train_dataset.word_dict))\n\n print(train_dataset.seqlized_sent_word_list[0])\n\n '''\n '08_AD_遍'\n ('15', 27, (370,))\n '''\n\n\n myllm = llm.LogLinearModel(train_dataset, dev_dataset)\n # myllm.create_feature_space()\n # print(myllm.epsilon)\n # print(len(myllm.epsilon))\n myllm.online_training(epochs=1)","repo_name":"guangyuli-uoe/sulzh","sub_path":"llm_memm/t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23420341391","text":"with open('input.in','r') as f:\n\tlines = f.readlines()\nf.close\n\nf = open('output.out','w')\n\ntmp = []\nfor l in lines:\n\ttry: tmp.append(int(l.strip()))\n\texcept ValueError: tmp.append(l.split())\nlines = tmp\n\nT = lines[0]\n\nfor t in range(T):\n\tC = t*10+1\n\tR1 = lines[C+lines[C]]\n\tR2 = lines[C+5+lines[C+5]]\n\ti = 0\n\tcard = ''\n\tfor c1 in R1:\n\t\tfor c2 in R2:\n\t\t\tif c1 == c2: i += 1; card = c1\n\n\tif i == 0: f.write('Case #'+str(t+1)+': Volunteer cheated!\\n')\n\telif i == 1: f.write('Case #'+str(t+1)+': '+card+'\\n')\n\telse: f.write('Case #'+str(t+1)+': Bad magician!\\n')\n\nf.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/917.py","file_name":"917.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15895819595","text":"import http.client\nimport json\nfrom urllib.parse import quote\n\n\ndef get_teacher_id_by_name(teacher_name: str) -> int:\n encoded_teacher_name = quote(teacher_name).replace(' ', '%20')\n server_address = 'ruz.spbstu.ru'\n request = f'https://ruz.spbstu.ru/api/v1/ruz/search/teachers?&q={encoded_teacher_name}'\n try:\n connection = http.client.HTTPSConnection(server_address)\n connection.request('GET', request)\n response = connection.getresponse()\n ans = response.read()\n connection.close()\n\n ans_dict = json.loads(ans)\n if ans_dict['teachers'][0]['full_name'].lower() == teacher_name.lower():\n return ans_dict['teachers'][0]['id']\n else:\n return -1\n except Exception as _ex:\n print(_ex)\n return -1\n\n\ndef get_teacher_schedule_dict(teacher_id: int, date: str):\n date = date.replace('.', '-')\n server_address = 'ruz.spbstu.ru'\n request = f'https://ruz.spbstu.ru/api/v1/ruz/teachers/{teacher_id}/scheduler?date={date}'\n\n try:\n connection = http.client.HTTPSConnection(server_address)\n connection.request('GET', request)\n response = connection.getresponse()\n ans = response.read()\n connection.close()\n\n ans_dict = json.loads(ans)\n\n return ans_dict\n except:\n return -1\n\n\nclass Teacher__lesson:\n def __init__(\n self,\n subject: str,\n time_start: str,\n time_end: str,\n typeObj: str,\n teacher: str,\n groups: list,\n place: str\n ):\n self.subject = subject\n self.time_start = time_start\n self.time_end = time_end\n self.typeObj = typeObj\n self.teacher = teacher\n self.groups = groups\n self.place = place\n\n def __str__(self):\n subject_line = f'{self.subject} ({self.typeObj})'\n time_line = f'\\U000023F0 {self.time_start}-{self.time_end}'\n teacher_line = f'\\U0001F9D1 {self.teacher}'\n groups_line = f'\\U0001F393 Группы: '\n for group in self.groups:\n groups_line += f'{group}, '\n groups_line = groups_line[:-2] + '.'\n place_line = f'\\U0001F3EB {self.place}'\n\n lesson = f'{time_line} {subject_line}\\n{teacher_line}\\n{place_line}\\n{groups_line}\\n\\n'\n\n return lesson\n\n\ndef teacher_lesson_from_dict(dict: dict, date: str) -> str:\n schedule = 'В этот день у преподавателя нет пар!'\n try:\n for day in dict[\"days\"]:\n if date.split('.')[2] in day[\"date\"].split('-')[2]:\n schedule = f'\\U0001F4C6 {day[\"date\"]} \\U0001F4C6\\n\\n'\n for lesson in day[\"lessons\"]:\n try:\n subject = lesson[\"subject\"]\n except:\n subject = f\"Изучение темной стороны силы \\U0001F608\"\n try:\n time_start = lesson[\"time_start\"]\n time_end = lesson[\"time_end\"]\n except:\n time_start = f\"\\U0001F608 00:00\"\n time_end = f\"23:59 \\U0001F608\"\n try:\n typeObj = lesson[\"typeObj\"][\"name\"]\n except:\n typeObj = f\"Что-то очень интересное \\U0001F608\"\n try:\n teacher = lesson[\"teachers\"][0][\"full_name\"]\n except:\n teacher = f\"Дарт-Вейдер \\U0001F608\"\n try:\n groups = []\n for group in lesson[\"groups\"]:\n groups.append(group[\"name\"])\n except:\n groups = [\"Юнлинги \\U0001F608\"]\n try:\n place = f'{lesson[\"auditories\"][0][\"building\"][\"name\"]}, ауд. {lesson[\"auditories\"][0][\"name\"]}'\n except:\n place = f\"Звезда смерти \\U0001F608\"\n\n lesson_str = Teacher__lesson(\n subject=subject,\n time_start=time_start,\n time_end=time_end,\n typeObj=typeObj,\n teacher=teacher,\n groups=groups,\n place=place\n )\n\n schedule += str(lesson_str)\n\n break\n except:\n schedule = 'В этот день у преподавателя нет пар!'\n\n return schedule\n\n","repo_name":"BakhmetievStanislav/Polykek_bot_fo_free_server","sub_path":"teacher_schedule_function.py","file_name":"teacher_schedule_function.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15392022879","text":"import cv2\nimport mediapipe as mp\n\nfrom mediapipe.framework.formats import landmark_pb2\nmp_drawing = mp.solutions.drawing_utils\nmp_drawing_styles = mp.solutions.drawing_styles\nmp_hands = mp.solutions.hands\nmp_pose = mp.solutions.pose\n\n#info on NormalizedLandmarkList manipulation https://github.com/google/mediapipe/issues/2031\ndef combine_hand_pose_lms(handlms,poselms):\n lm_list = []\n if handlms is not None:\n for hlm in handlms.landmark:\n lm_list.append(hlm)\n if poselms is not None:\n for plm in poselms.landmark:\n lm_list.append(plm)\n\n #hand_pose_lms = landmark_pb2.NormalizedLandmarkList(\n #landmark = [\n #lm_list\n #]\n #) \n if lm_list:\n hand_pose_lms = landmark_pb2.NormalizedLandmarkList(\n landmark = lm_list\n ) \n return hand_pose_lms\n else:\n return None\n \n\n\n#from https://google.github.io/mediapipe/solutions/hands.html\n# For webcam input:\ncap = cv2.VideoCapture(0)\nwith mp_hands.Hands(\n model_complexity=0,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5) as hands:\n #set up pose\n pose = mp_pose.Pose(min_detection_confidence=0.5,min_tracking_confidence=0.5)\n #while cap.isOpened()\n while cap.isOpened():\n success, image = cap.read()\n if not success:\n print(\"Ignoring empty camera frame.\")\n # If loading a video, use 'break' instead of 'continue'.\n continue\n\n # To improve performance, optionally mark the image as not writeable to\n # pass by reference.\n image.flags.writeable = False\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = hands.process(image)\n poseresults = pose.process(image)\n \n #print(\"hands\")\n #print(results.multi_hand_landmarks)\n #print(\"pose\")\n #print(type(poseresults.pose_landmarks))\n # Draw the hand annotations on the image.\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n \n if results.multi_hand_landmarks:\n for hand_landmarks in results.multi_hand_landmarks:\n lms = combine_hand_pose_lms(hand_landmarks,poseresults.pose_landmarks)\n if lms is not None:\n mp_drawing.draw_landmarks(\n image,\n lms,\n #mp_hands.HAND_CONNECTIONS,\n #mp_drawing_styles.get_default_hand_landmarks_style(),\n #mp_drawing_styles.get_default_hand_connections_style()\n )\n # Flip the image horizontally for a selfie-view display.\n cv2.imshow('MediaPipe Hands', cv2.flip(image, 1))\n if cv2.waitKey(5) & 0xFF == 27:\n break\n\ncap.release()","repo_name":"rosiehiggins/Metaverse-NVC-research-project","sub_path":"python-scripts/demos/mpHandsPoseDemo.py","file_name":"mpHandsPoseDemo.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35986964221","text":"import pygame, random, sys\nfrom pygame.locals import *\n\n\ndef collide(x1, x2, y1, y2, w1, w2, h1, h2): #Collision function\n if x1+w1>x2 and x1y2 and y1= 2:\n\n #Check if the snake collides with itself\n if collide(xs[0], xs[i], ys[0], ys[i], 20, 20, 20, 20):\n die(display, score)\n i-= 1\n\n #If Snake collides with the fruit, add a segment\n if collide(xs[0], fruit_pos[0], ys[0], fruit_pos[1], 20, 10, 20, 10):\n score+=(1*scoremultiplier)\n xs.append(700)\n ys.append(700)\n fruit_pos=(random.randint(0,590),random.randint(0,590))\n\n #If Snake collides with special fruit, give points and slow/speed game\n if collide(xs[0], fastfruit_pos[0], ys[0], fastfruit_pos[1], 20, 10, 20, 10):\n scoremultiplier=scoremultiplier*2\n fastfruit_pos=(random.randint(0,590),random.randint(0,590))\n slowfruit_pos=(random.randint(0,590),random.randint(0,590))\n\n if collide(xs[0], discofruit_pos[0], ys[0], discofruit_pos[1], 20, 10, 20, 10):\n scoremultiplier=scoremultiplier*2\n \n \n\n if collide(xs[0], slowfruit_pos[0], ys[0], slowfruit_pos[1], 20, 10, 20, 10):\n scoremultiplier=scoremultiplier/2\n fastfruit_pos=(random.randint(0,590),random.randint(0,590))\n slowfruit_pos=(random.randint(0,590),random.randint(0,590))\n #If snake hits the borders\n if xs[0] < 0 or xs[0] > 580 or ys[0] < 0 or ys[0] > 580:\n die(display, score)\n\n i = len(xs)-1\n while i >= 1:\t\t\t #Shift the segments position\n xs[i] = xs[i-1]\n ys[i] = ys[i-1]\n i -= 1\n if direction==0: xs[0] += 20\t #move the first segment in the direction chosen\n elif direction==1: ys[0] -= 20\n elif direction==2: xs[0] -= 20\n elif direction==3: ys[0] += 20\n display.fill((255, 228, 122))\t #background\n for i in range(0, len(xs)): #print segments to screen\n display.blit(seg, (xs[i], ys[i]))\n display.blit(fruit_image, fruit_pos)\n display.blit(fastfruit_image, fastfruit_pos)\n display.blit(slowfruit_image, slowfruit_pos)\n text=font.render(str(score), True, (0, 0, 0))\n display.blit(text, (10, 10))\n pygame.display.update()\n\nif __name__=='__main__':\n pygame.init()\n game()\n","repo_name":"pnaquila/Snake-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13754477376","text":"from collections import namedtuple, OrderedDict\nimport yaml\nimport torch\nfrom torch import nn\nimport math\nimport numpy as np\nfrom torchvision.ops.boxes import batched_nms\nimport os\nimport cv2\nimport uuid\nimport webcolors\nfrom typing import Union\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay\n\ndef create_namedtuple_from_dict(obj):\n if isinstance(obj, dict):\n fields = sorted(obj.keys())\n namedtuple_type = namedtuple(\n typename='GenericObject',\n field_names=fields,\n rename=True,\n )\n field_value_pairs = OrderedDict(\n (str(field), create_namedtuple_from_dict(obj[field]))\n for field in fields\n )\n try:\n return namedtuple_type(**field_value_pairs)\n except TypeError:\n # Cannot create namedtuple instance so fallback to dict (invalid attribute names)\n return dict(**field_value_pairs)\n elif isinstance(obj, (list, set, tuple, frozenset)):\n return [create_namedtuple_from_dict(item) for item in obj]\n else:\n return obj\n\nclass parameters_read:\n def __init__(self, params_path):\n self.params = yaml.safe_load(open(params_path, encoding='utf-8').read())\n\n def update(self, dictionary):\n self.params = dictionary\n\n def __getattr__(self, item):\n return self.params.get(item, None)\n\nclass Dict2Class(object):\n def __init__(self, my_dict):\n for key in my_dict:\n setattr(self, key, my_dict[key])\n\n\ndef calc_iou(a, b):\n # a(anchor) [boxes, (y1, x1, y2, x2)]\n # b(gt, coco-style) [boxes, (x1, y1, x2, y2)]\n\n area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])\n iw = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 2]) - torch.max(torch.unsqueeze(a[:, 1], 1), b[:, 0])\n ih = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 3]) - torch.max(torch.unsqueeze(a[:, 0], 1), b[:, 1])\n iw = torch.clamp(iw, min=0)\n ih = torch.clamp(ih, min=0)\n ua = torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1) + area - iw * ih\n ua = torch.clamp(ua, min=1e-8)\n intersection = iw * ih\n IoU = intersection / ua\n\n return IoU\n\nclass BBoxTransform(nn.Module):\n def forward(self, anchors, regression):\n \"\"\"\n decode_box_outputs adapted from https://github.com/google/automl/blob/master/efficientdet/anchors.py\n\n Args:\n anchors: [batchsize, boxes, (y1, x1, y2, x2)]\n regression: [batchsize, boxes, (dy, dx, dh, dw)]\n\n Returns:\n\n \"\"\"\n dim = len(anchors.shape) - 1\n\n y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2\n x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2\n ha = anchors[..., 2] - anchors[..., 0]\n wa = anchors[..., 3] - anchors[..., 1]\n\n w = regression[..., 3].exp() * wa\n h = regression[..., 2].exp() * ha\n\n y_centers = regression[..., 0] * ha + y_centers_a\n x_centers = regression[..., 1] * wa + x_centers_a\n\n ymin = y_centers - h / 2.\n xmin = x_centers - w / 2.\n ymax = y_centers + h / 2.\n xmax = x_centers + w / 2.\n\n return torch.stack([xmin, ymin, xmax, ymax], dim=dim)\n\ndef ciou(bboxes1, bboxes2):\n bboxes1 = torch.sigmoid(bboxes1)\n bboxes2 = torch.sigmoid(bboxes2)\n rows = bboxes1.shape[0]\n cols = bboxes2.shape[0]\n cious = torch.zeros((rows, cols))\n if rows * cols == 0:\n return cious\n exchange = False\n if bboxes1.shape[0] > bboxes2.shape[0]:\n bboxes1, bboxes2 = bboxes2, bboxes1\n cious = torch.zeros((cols, rows))\n exchange = True\n w1 = torch.exp(bboxes1[:, 2])\n h1 = torch.exp(bboxes1[:, 3])\n w2 = torch.exp(bboxes2[:, 2])\n h2 = torch.exp(bboxes2[:, 3])\n area1 = w1 * h1\n area2 = w2 * h2\n center_x1 = bboxes1[:, 0]\n center_y1 = bboxes1[:, 1]\n center_x2 = bboxes2[:, 0]\n center_y2 = bboxes2[:, 1]\n\n inter_l = torch.max(center_x1 - w1 / 2,center_x2 - w2 / 2)\n inter_r = torch.min(center_x1 + w1 / 2,center_x2 + w2 / 2)\n inter_t = torch.max(center_y1 - h1 / 2,center_y2 - h2 / 2)\n inter_b = torch.min(center_y1 + h1 / 2,center_y2 + h2 / 2)\n inter_area = torch.clamp((inter_r - inter_l), min=0) * torch.clamp((inter_b - inter_t), min=0)\n\n c_l = torch.min(center_x1 - w1 / 2,center_x2 - w2 / 2)\n c_r = torch.max(center_x1 + w1 / 2,center_x2 + w2 / 2)\n c_t = torch.min(center_y1 - h1 / 2,center_y2 - h2 / 2)\n c_b = torch.max(center_y1 + h1 / 2,center_y2 + h2 / 2)\n\n inter_diag = (center_x2 - center_x1)**2 + (center_y2 - center_y1)**2\n c_diag = torch.clamp((c_r - c_l), min=0)**2 + torch.clamp((c_b - c_t), min=0)**2\n\n union = area1+area2-inter_area\n u = (inter_diag) / c_diag\n iou = inter_area / union\n v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w2 / h2) - torch.atan(w1 / h1)), 2)\n with torch.no_grad():\n S = (iou>0.5).float()\n alpha = S*v/(1-iou+v)\n\n cious = iou - u - alpha * v\n cious = torch.clamp(cious, min=-1.0, max=1.0)\n if exchange:\n cious = cious.T\n return torch.mean(1 - cious)\n # return torch.sum(1 - cious)\n\ndef diou(bboxes1, bboxes2):\n bboxes1 = torch.sigmoid(bboxes1)\n bboxes2 = torch.sigmoid(bboxes2)\n rows = bboxes1.shape[0]\n cols = bboxes2.shape[0]\n cious = torch.zeros((rows, cols))\n if rows * cols == 0:\n return cious\n exchange = False\n if bboxes1.shape[0] > bboxes2.shape[0]:\n bboxes1, bboxes2 = bboxes2, bboxes1\n cious = torch.zeros((cols, rows))\n exchange = True\n w1 = torch.exp(bboxes1[:, 2])\n h1 = torch.exp(bboxes1[:, 3])\n w2 = torch.exp(bboxes2[:, 2])\n h2 = torch.exp(bboxes2[:, 3])\n area1 = w1 * h1\n area2 = w2 * h2\n center_x1 = bboxes1[:, 0]\n center_y1 = bboxes1[:, 1]\n center_x2 = bboxes2[:, 0]\n center_y2 = bboxes2[:, 1]\n\n inter_l = torch.max(center_x1 - w1 / 2,center_x2 - w2 / 2)\n inter_r = torch.min(center_x1 + w1 / 2,center_x2 + w2 / 2)\n inter_t = torch.max(center_y1 - h1 / 2,center_y2 - h2 / 2)\n inter_b = torch.min(center_y1 + h1 / 2,center_y2 + h2 / 2)\n inter_area = torch.clamp((inter_r - inter_l),min=0) * torch.clamp((inter_b - inter_t),min=0)\n\n c_l = torch.min(center_x1 - w1 / 2,center_x2 - w2 / 2)\n c_r = torch.max(center_x1 + w1 / 2,center_x2 + w2 / 2)\n c_t = torch.min(center_y1 - h1 / 2,center_y2 - h2 / 2)\n c_b = torch.max(center_y1 + h1 / 2,center_y2 + h2 / 2)\n\n inter_diag = (center_x2 - center_x1)**2 + (center_y2 - center_y1)**2\n c_diag = torch.clamp((c_r - c_l),min=0)**2 + torch.clamp((c_b - c_t),min=0)**2\n\n union = area1+area2-inter_area\n u = (inter_diag) / c_diag\n iou = inter_area / union\n dious = iou - u\n dious = torch.clamp(dious,min=-1.0,max = 1.0)\n if exchange:\n dious = dious.T\n # return torch.sum(1 - dious)\n return torch.mean(1 - dious)\n\n\ndef giou(gt_bboxes, pr_bboxes, reduction='mean'):\n \"\"\"\n gt_bboxes: tensor (-1, 4) xyxy\n pr_bboxes: tensor (-1, 4) xyxy\n loss proposed in the paper of giou\n \"\"\"\n gt_area = (gt_bboxes[:, 2]-gt_bboxes[:, 0])*(gt_bboxes[:, 3]-gt_bboxes[:, 1])\n pr_area = (pr_bboxes[:, 2]-pr_bboxes[:, 0])*(pr_bboxes[:, 3]-pr_bboxes[:, 1])\n\n # iou\n lt = torch.max(gt_bboxes[:, :2], pr_bboxes[:, :2])\n rb = torch.min(gt_bboxes[:, 2:], pr_bboxes[:, 2:])\n TO_REMOVE = 1\n wh = (rb - lt + TO_REMOVE).clamp(min=0)\n inter = wh[:, 0] * wh[:, 1]\n union = gt_area + pr_area - inter\n iou = inter / union\n # enclosure\n lt = torch.min(gt_bboxes[:, :2], pr_bboxes[:, :2])\n rb = torch.max(gt_bboxes[:, 2:], pr_bboxes[:, 2:])\n wh = (rb - lt + TO_REMOVE).clamp(min=0)\n enclosure = wh[:, 0] * wh[:, 1]\n\n giou = iou - (enclosure-union)/enclosure\n loss = 1. - giou\n if reduction == 'mean':\n loss = loss.mean()\n elif reduction == 'sum':\n loss = loss.sum()\n elif reduction == 'none':\n pass\n return loss\n\nclass ClipBoxes(nn.Module):\n\n def __init__(self):\n super(ClipBoxes, self).__init__()\n\n def forward(self, boxes, img):\n batch_size, num_channels, height, width = img.shape\n\n boxes[:, :, 0] = torch.clamp(boxes[:, :, 0], min=0)\n boxes[:, :, 1] = torch.clamp(boxes[:, :, 1], min=0)\n\n boxes[:, :, 2] = torch.clamp(boxes[:, :, 2], max=width - 1)\n boxes[:, :, 3] = torch.clamp(boxes[:, :, 3], max=height - 1)\n\n return boxes\n\ndef postprocess(x, anchors, regression, classification, regressBoxes, clipBoxes,\n score_threshold, nms_threshold, nms_method='hard_nms',\n co_coef=0, pyramid_sup='p7'):\n\n transformed_anchors = regressBoxes(anchors, regression)\n transformed_anchors = clipBoxes(transformed_anchors, x)\n\n scores = torch.max(classification, dim=2, keepdim=True)[0]\n scores_over_thresh = (scores > score_threshold)[:, :, 0]\n out = []\n for i in range(x.shape[0]):\n if scores_over_thresh[i].sum() == 0:\n out.append({\n 'rois': np.array(()),\n 'class_ids': np.array(()),\n 'scores': np.array(()),\n })\n continue\n\n if co_coef == 0:\n p3_idx = 36864\n p4_idx = 9216\n p5_idx = 2304\n p6_idx = 576\n elif co_coef == 1:\n p3_idx = 57600\n p4_idx = 14400\n p5_idx = 3600\n p6_idx = 900\n elif co_coef == 2:\n p3_idx = 82944\n p4_idx = 20736\n p5_idx = 5184\n p6_idx = 1296\n elif co_coef == 3:\n p3_idx = 112896\n p4_idx = 28224\n p5_idx = 7056\n p6_idx = 1764\n\n if pyramid_sup == 'p7':\n sup_list = p3_idx + p4_idx + p5_idx + p6_idx\n elif pyramid_sup == 'p6_7':\n sup_list = p3_idx + p4_idx + p5_idx\n elif pyramid_sup == 'p5_6_7':\n sup_list = p3_idx + p4_idx\n\n sup_anchor = transformed_anchors[i][:sup_list]\n sup_class = classification[i][:sup_list]\n sup_score = scores[i][:sup_list]\n sup_score_tsh = scores_over_thresh[i][:sup_list]\n\n classification_per = torch.unsqueeze(sup_class, 0)[i, torch.unsqueeze(sup_score_tsh, 0)[i, :], ...].permute(1, 0)\n\n # transformed_anchors_per = torch.unsqueeze(sup_anchor, 0)[i, torch.unsqueeze(sup_score_tsh, 0)[i, :], ...]\n # scores_per = torch.unsqueeze(sup_score, 0)[i, torch.unsqueeze(sup_score_tsh, 0)[i, :], ...]\n\n transformed_anchors_per = torch.unsqueeze(sup_anchor, 0)[i, torch.unsqueeze(sup_score_tsh, 0)[i, :], ...].cuda()\n scores_per = torch.unsqueeze(sup_score, 0)[i, torch.unsqueeze(sup_score_tsh, 0)[i, :], ...].cuda()\n\n if len(classification_per[i]) < 1:\n return 'no_result'\n\n scores_, classes_ = classification_per.max(dim=0)\n\n if nms_method == 'hard_nms':\n anchors_nms_idx = batched_nms(transformed_anchors_per, scores_per[:, 0],\n classes_, nms_threshold)\n\n\n if anchors_nms_idx.shape[0] != 0:\n classes_ = classes_[anchors_nms_idx]\n scores_ = scores_[anchors_nms_idx]\n boxes_ = transformed_anchors_per[anchors_nms_idx, :]\n\n out.append({\n # 'rois': boxes_.cpu().numpy(),\n # 'class_ids': classes_.cpu().numpy(),\n # 'scores': scores_.cpu().numpy(),\n 'rois': boxes_.cpu().detach().numpy(),\n 'class_ids': classes_.cpu().detach().numpy(),\n 'scores': scores_.cpu().detach().numpy(),\n })\n else:\n out.append({\n 'rois': np.array(()),\n 'class_ids': np.array(()),\n 'scores': np.array(()),\n })\n\n return out\n\ndef display(preds, imgs, obj_list, imshow=True, imwrite=False):\n for i in range(len(imgs)):\n if len(preds[i]['rois']) == 0:\n continue\n\n imgs[i] = imgs[i].copy()\n\n for j in range(len(preds[i]['rois'])):\n (x1, y1, x2, y2) = preds[i]['rois'][j].astype(np.int)\n obj = obj_list[preds[i]['class_ids'][j]]\n score = float(preds[i]['scores'][j])\n\n plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj, score=score,\n color=color_list[get_index_label(obj, obj_list)])\n if imshow:\n cv2.imshow('img', imgs[i])\n cv2.waitKey(0)\n\n if imwrite:\n os.makedirs('test/', exist_ok=True)\n cv2.imwrite(f'test/{uuid.uuid4().hex}.jpg', imgs[i])\n\ndef plot_one_box(img, coord, label=None, score=None, color=None, line_thickness=None):\n tl = line_thickness or int(round(0.001 * max(img.shape[0:2]))) # line thickness\n color = color\n c1, c2 = (int(coord[0]), int(coord[1])), (int(coord[2]), int(coord[3]))\n cv2.rectangle(img, c1, c2, color, thickness=tl)\n if label:\n tf = max(tl - 2, 1) # font thickness\n s_size = cv2.getTextSize(str('{:.0%}'.format(score)), 0, fontScale=float(tl) / 3, thickness=tf)[0]\n t_size = cv2.getTextSize(label, 0, fontScale=float(tl) / 3, thickness=tf)[0]\n c2 = c1[0] + t_size[0] + s_size[0] + 15, c1[1] - t_size[1] - 3\n cv2.rectangle(img, c1, c2, color, -1) # filled\n cv2.putText(img, '{}: {:.0%}'.format(label, score), (c1[0], c1[1] - 2), 0, float(tl) / 3, [0, 0, 0],\n thickness=tf, lineType=cv2.FONT_HERSHEY_SIMPLEX)\n\ndef get_index_label(label, obj_list):\n index = int(obj_list.index(label))\n return index\n\ndef standard_to_bgr(list_color_name):\n standard = []\n for i in range(len(list_color_name) - 36): # -36 used to match the len(obj_list)\n standard.append(from_colorname_to_bgr(list_color_name[i]))\n return standard\n\ndef from_colorname_to_bgr(color):\n rgb_color = webcolors.name_to_rgb(color)\n result = (rgb_color.blue, rgb_color.green, rgb_color.red)\n return result\n\n\nSTANDARD_COLORS = [\n 'LawnGreen', 'Chartreuse', 'Aqua', 'Beige', 'Azure', 'BlanchedAlmond', 'Bisque',\n 'Aquamarine', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',\n 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',\n 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',\n 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',\n 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',\n 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',\n 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',\n 'Lavender', 'LavenderBlush', 'AliceBlue', 'LemonChiffon', 'LightBlue',\n 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',\n 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',\n 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',\n 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',\n 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',\n 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',\n 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',\n 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',\n 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',\n 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',\n 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',\n 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',\n 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',\n 'WhiteSmoke', 'Yellow', 'YellowGreen'\n]\n\ncolor_list = standard_to_bgr(STANDARD_COLORS)\n\ndef resize_img(image, img_size):\n height, width, _ = image.shape\n if height > width:\n scale = img_size / height\n resized_height = img_size\n resized_width = int(width * scale)\n else:\n scale = img_size / width\n resized_height = int(height * scale)\n resized_width = img_size\n\n image = cv2.resize(image, (resized_width, resized_height), interpolation=cv2.INTER_LINEAR)\n\n new_image = np.zeros((img_size, img_size, 3))\n new_image[0:resized_height, 0:resized_width] = image\n return {'img': torch.from_numpy(new_image).to(torch.float32), 'scale': scale}\n\ndef invert_affine(metas: Union[float, list, tuple], preds):\n for i in range(len(preds)):\n if len(preds[i]['rois']) == 0:\n continue\n else:\n if metas is float:\n preds[i]['rois'][:, [0, 2]] = preds[i]['rois'][:, [0, 2]] / metas\n preds[i]['rois'][:, [1, 3]] = preds[i]['rois'][:, [1, 3]] / metas\n else:\n new_w, new_h, old_w, old_h, padding_w, padding_h = metas[i]\n preds[i]['rois'][:, [0, 2]] = preds[i]['rois'][:, [0, 2]] / (new_w / old_w)\n preds[i]['rois'][:, [1, 3]] = preds[i]['rois'][:, [1, 3]] / (new_h / old_h)\n return preds\n\ndef confusion_matrix_plot(true, predict, normalize = False):\n true = true.flatten()\n predict = predict.flatten()\n if normalize:\n conf_mat = confusion_matrix(true, predict, normalize='true')\n else:\n conf_mat = confusion_matrix(true, predict, normalize='false')\n return conf_mat\n\ndef resize(input,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None):\n\n if isinstance(size, torch.Size):\n size = tuple(int(x) for x in size)\n return F.interpolate(input, size, scale_factor, mode, align_corners)","repo_name":"lycaoduong/acne_segmentation","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5259352466","text":"def do_band_curvature ( data_controller ):\n '''\n Calculate the Gradient of the k-space Hamiltonian, 'Hksp'\n Requires 'Hksp' and 'pksp'\n Yields 'd2Hksp'\n\n Arguments:\n None\n\n Returns:\n None\n '''\n\n from .do_d2Hd2k import do_d2Hd2k_ij\n import numpy as np\n from .perturb_split import perturb_split\n\n ary,attr = data_controller.data_dicts()\n bnd = attr['bnd'] \n nawf = attr['nawf'] \n E_k = ary['E_k'] \n\n # not really the inverse mass tensor..it's actually tksp\n # but we are calling it d2Ed2k for now to save memory.\n d2Ed2k,dvec_list = do_d2Hd2k_ij(ary['Hksp'],ary['Rfft'],attr['alat'],\n attr['npool'],ary['v_k'],\n bnd,ary['degen'])\n\n \n # d2Ed2k is only the 6 unique components of the curvature \n # (inverse effective mass ) tensor. This is one to save memory.\n ij_ind = np.array([[0,0],[1,1],[2,2],[0,1],[0,2],[1,2]],dtype=int)\n E_temp = np.zeros((bnd,nawf),order=\"C\")\n\n #----------------------\n # for d2E/d2k_ij\n #----------------------\n for ispin in range(d2Ed2k.shape[3]):\n for ik in range(d2Ed2k.shape[1]): \n\n # tksp_ij = \n\n # ij component of second derivative of the energy is:\n # tksp_ij + sum_i( (pksp_i*pksp_j.T + pksp_j*pksp_i.T)/(E_i-E_j) )\n E_temp = ((E_k[ik,:,ispin]-E_k[ik,:,ispin][:,None])[:,:]).T\n E_temp[np.where(np.abs(E_temp)<1.e-5)]=np.inf\n\n for ij in range(ij_ind.shape[0]):\n ipol = ij_ind[ij,0]\n jpol = ij_ind[ij,1]\n\n # to avoid a zero in the denominator when E_i=E_j\n if dvec_list[ij][ispin][ik].size:\n v_k=dvec_list[ij][ispin][ik]\n else:\n v_k=ary['v_k'][ik,:,:,ispin]\n \n pksp_i=np.conj(v_k.T).dot(ary['dHksp'][ik,ipol,:,:,ispin]).dot(v_k)\n pksp_j=np.conj(v_k.T).dot(ary['dHksp'][ik,jpol,:,:,ispin]).dot(v_k)\n\n # this is where d2Ed2k becomes the actual curvature tensor\n d2Ed2k[ij,ik,:,ispin] += np.sum((((pksp_i*pksp_j.T +\\\n pksp_j*pksp_i.T) / E_temp).real),axis=1)[:bnd]\n\n ary['d2Ed2k']=d2Ed2k\n","repo_name":"marcobn/PAOFLOW","sub_path":"src/defs/do_band_curvature.py","file_name":"do_band_curvature.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"40882226157","text":"import os\nimport sys\nfrom typing import List, Tuple\nimport yaml\n\nclass _Configuration:\n name: str\n highscores: List[Tuple[str, int]]\n location: str\n\n def __init__(self, name, highscores, location=\"\"):\n self.name = name\n self.highscores = highscores\n self.location = location\n\n def register_highscore(self, name, new_score):\n for i, score in enumerate(self.highscores):\n if score[1] < new_score:\n self.highscores.insert(i, (name, new_score))\n break\n else:\n self.highscores.append((name, new_score))\n if len(self.highscores) > 10:\n self.highscores = self.highscores[:10]\n\n def is_highest_score(self, score):\n if not self.highscores:\n return True\n return self.highscores[0][1] <= score\n\n def is_highscore(self, score):\n if not self.highscores:\n return True\n return self.highscores[-1][1] < score or len(self.highscores) < 10\n\n @property\n def highest_score(self):\n if not self.highscores:\n return None\n return self.highscores[0]\n\n def to_dict(self):\n return {'name': self.name, 'highscores': self.highscores}\n\n def __iter__(self):\n yield from self.to_dict().items()\n\n @classmethod\n def load_from(cls, location):\n with open(location, 'r') as file:\n data = yaml.load(file.read())\n return cls(location=location, **data)\n\n def save(self, location=\"\"):\n if not (location or self.location):\n raise ValueError(\"No location provided nor load location stored for configuration file to save in\")\n\n with open(location or self.location, 'w') as file:\n file.write(yaml.dump(self.to_dict()))\n\ntry:\n Configuration = _Configuration.load_from('rsc/data.yaml')\nexcept:\n Configuration = _Configuration(None, [], location='rsc/data.yaml')","repo_name":"mrRachar/Tracta","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70388380674","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom itertools import count\nfrom numbers import Number\nfrom sys import float_info\nfrom typing import List, Optional\n\nimport numpy as np\nfrom sklearn.utils.validation import check_is_fitted\nfrom statsmodels.regression.linear_model import RegressionResultsWrapper\nfrom statsmodels.regression.quantile_regression import QuantReg\nfrom statsmodels.tsa.stattools import adfuller\n\nfrom fforma.base import Naive\n\n\ndef embed(x: np.array, p: int) -> np.array:\n \"\"\"Embeds the time series x into a low-dimensional Euclidean space.\n\n Parameters\n ----------\n x: numpy array\n Time series.\n p: int\n Embedding dimension.\n\n Notes\n -----\n [1] embed(x, p) = embed(x, [0, 1, ..., p - 1])\n\n References\n ----------\n [1] https://www.rdocumentation.org/packages/stats/versions/3.6.2/topics/embed\n \"\"\"\n is_p_int = isinstance(p, int)\n\n if is_p_int and p == 0:\n raise Exception('Embedding dimension should not be 0')\n\n rolls = range(p) if is_p_int else p\n min_p = p - 1 if is_p_int else np.max(p)\n\n x = np.transpose(np.vstack(list((np.roll(x, k) for k in rolls))))\n x = x[min_p:]\n\n return x\n\nclass QuantileAutoRegression:\n \"\"\"\n Perform Quantile Regression on a time series using lags.\n y_t = c + a_1 y_{t - n1} + a_2 + y_{t - n2} + ...\n Where n1, n2, ... are indexes provided by the user.\n A Dicky-Fuller test is performed to decide if the process\n is stationary. If not, the time series is differentiated\n as many times as needed (max number of differences can be\n controlled by the user).\n\n Parameters\n ----------\n tau: float\n Quantile to predict between (0, 1).\n ar_terms: list[int]\n List of autorregresive terms to add.\n add_constant: bool\n Wheter add + c to the model.\n max_diffs: int\n Max number of differences to apply.\n adjust_ar_terms: bool\n If some ar term results in a constant column\n adjust_ar_terms = True removes this ar_term in the\n analysis. If adjust_ar_terms = False raises an Exception.\n add_trend: bool\n Adds linear trend to design matrix.\n If True, Dicky-Fuller test is not performed.\n naive_forecasts: bool\n Predicts seasonal naive using fitted values.\n First ar_term used as seasonality.\n\n Notes\n -----\n [1] To avoid Dicky-Fuller test just use max_diffs = 0.\n [2] Be cautious when the time series is too short.\n [3] Setting tau = 0.5 equals to optimize for MAE.\n [4] If y is constant, returns Naive model.\n\n Examples\n --------\n For 90 percentile (over-estimate) for daily data:\n model = QuantileAutoRegression(0.9, ar_terms=[7, 14])\n \"\"\"\n\n def __init__(self, tau: float,\n ar_terms: List[int],\n add_constant: bool = True,\n max_diffs: int = 10,\n adjust_ar_terms: bool = True,\n add_trend: bool = False,\n naive_forecasts: bool = False):\n self.tau = tau\n self.ar_terms = ar_terms\n self.add_constant = add_constant\n self.max_diffs = max_diffs\n self.adjust_ar_terms = adjust_ar_terms\n self.add_trend = add_trend\n self.naive_forecasts = naive_forecasts\n\n self.min_ar, self.max_ar = np.min(ar_terms), np.max(ar_terms)\n\n self.differences: int\n self.is_constant: bool\n self.last_y_train: Number\n self.last_len_y: int\n self.y_train: np.ndarray\n self.model_: RegressionResultsWrapper\n\n def _check_X(self, X):\n \"\"\"\n Checks if ar-matrix X has constant columns. If yes, removes it.\n \"\"\"\n if self.is_constant:\n return X\n\n idx, = np.where(X.std(0) == 0)\n\n if not self.adjust_ar_terms and idx:\n raise Exception(f'AR terms [{\", \".join([str(i) for i in idx])}] '\n 'generate constant '\n 'columns; try removing this terms or '\n 'using others.')\n\n X = np.delete(X, idx, 1)\n self.ar_terms = np.delete(self.ar_terms, idx)\n\n return X\n\n def fit(self, X: np.ndarray, y: np.ndarray) -> 'QuantileAutoRegression':\n y = y.copy()\n self.last_y_train = y[-1]\n self.last_len_y = len(y)\n self.is_constant = np.var(y) == 0\n\n if self.is_constant:\n self.model_ = Naive().fit(None, y)\n\n return self\n\n # Convert y to an stationary process\n self.differences = 0\n if not self.add_trend:\n for _ in range(self.max_diffs):\n _, pval, *_ = adfuller(y)\n if pval < 0.05:\n break\n y = np.diff(y, 1)\n self.differences += 1\n\n design_mat = embed(y, [0] + self.ar_terms)\n self.y_train, X_train = design_mat[:, 0], design_mat[:, 1:]\n\n X_train = self._check_X(X_train)\n\n if self.add_constant:\n X_train = np.hstack([X_train, np.ones((len(X_train), 1))])\n\n if self.add_trend:\n trend = np.arange(self.last_len_y - len(X_train),\n self.last_len_y).reshape(-1, 1)\n X_train = np.hstack([X_train, trend])\n\n if np.linalg.cond(X_train) > 1 / float_info.epsilon:\n raise Exception('X matrix is ill-conditioned '\n 'try reducing number of ar_terms '\n 'or setting add_constant=False.')\n\n self.model_ = QuantReg(self.y_train, X_train).fit(self.tau)\n\n return self\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n check_is_fitted(self)\n\n if self.is_constant:\n return self.model_.predict(X)\n\n horizon = len(X)\n\n if self.naive_forecasts:\n seasonality = self.ar_terms[0]\n repetitions = int(np.ceil(horizon / seasonality))\n y_hat = self.model_.fittedvalues[-seasonality:]\n y_hat = np.tile(y_hat, repetitions)\n y_hat = y_hat[:horizon]\n\n else:\n y_hat = self.y_train\n len_train = self.y_train.size\n forecast_size = len_train + horizon\n\n counter = 0\n while y_hat.size < forecast_size:\n y_hat_placeholder = np.zeros(self.min_ar)\n y_hat = np.concatenate([y_hat, y_hat_placeholder])\n\n X_test = embed(y_hat, self.ar_terms)[-self.min_ar:]\n\n if self.add_constant and not self.is_constant:\n X_test = np.hstack([X_test, np.ones((len(X_test), 1))])\n\n if self.add_trend and not self.is_constant:\n delta = self.max_ar * counter\n trend = np.arange(self.last_len_y + delta,\n self.last_len_y + len(X_test) + delta).reshape(-1, 1)\n X_test = np.hstack([X_test, trend])\n\n y_hat[-self.min_ar:] = self.model_.predict(X_test)\n counter += 1\n\n y_hat = y_hat[len_train:forecast_size]\n\n if self.differences:\n for _ in range(self.differences): y_hat = y_hat.cumsum()\n y_hat += self.last_y_train\n\n return y_hat\n","repo_name":"FedericoGarza/fforma","sub_path":"fforma/base/_quantile_models.py","file_name":"_quantile_models.py","file_ext":"py","file_size_in_byte":7227,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"5776905643","text":"from RGT.XML.SVG.baseScriptNode import BaseScriptNode\r\nfrom types import StringType\r\nfrom RGT.XML.SVG.basicSvgNode import BasicSvgNode\r\n\r\n\r\nclass StyleNode(BaseScriptNode):\r\n svgNodeType = BasicSvgNode.SVG_STYLE_NODE\r\n\r\n ATTRIBUTE_TYPE = 'type'\r\n ATTRIBUTE_MEDIA = 'media'\r\n ATTRIBUTE_TITLE = 'title'\r\n\r\n\r\n def __init__(self, ownerDoc):\r\n BaseScriptNode.__init__(self, ownerDoc, 'style')\r\n\r\n def setType(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_TYPE, data)\r\n\r\n def setMedia(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_MEDIA, data)\r\n\r\n def setTitle(self, data):\r\n if data is not None:\r\n if type(data) is not StringType:\r\n data = str(data)\r\n self._setNodeAttribute(self.ATTRIBUTE_TITLE, data)\r\n\r\n def getType(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_TYPE)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getMedia(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_MEDIA)\r\n if node is not None:\r\n return node.nodeValue\r\n return None\r\n\r\n def getTitle(self):\r\n node = self._getNodeAttribute(self.ATTRIBUTE_TITLE)\r\n if node is not None:\r\n return node.nodeValue\r\n return None","repo_name":"danrg/RGT-tool","sub_path":"src/RGT/XML/SVG/styleNode.py","file_name":"styleNode.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"20460407836","text":"from interactions.utils.statistic_element import ConditionalActionRestriction, ConditionalInteractionAction, ProgressBarAction\n\nclass ConditionGroup:\n __qualname__ = 'ConditionGroup'\n\n def __init__(self, conditions, conditional_action):\n self._conditions = conditions\n self._conditional_action = conditional_action\n self._satisfied = False\n self._on_satisfied_callback = None\n\n def __iter__(self):\n return iter(self._conditions)\n\n def __len__(self):\n return len(self._conditions)\n\n def __bool__(self):\n return bool(self._conditions)\n\n def __getitem__(self, key):\n return self._conditions(key)\n\n def __str__(self):\n return '\\n'.join(str(cg) for cg in self._conditions)\n\n @property\n def conditional_action(self):\n return self._conditional_action\n\n @property\n def satisfied(self):\n return self._satisfied\n\n def attach(self, owner, on_satisfied_callback):\n self._on_satisfied_callback = on_satisfied_callback\n for condition in self:\n condition.attach_to_owner(owner, self._on_condition_satisfied_callback)\n\n def detach(self, owner, exiting=False):\n self._on_satisifed_callback = None\n for condition in self:\n condition.detach_from_owner(owner, exiting=exiting)\n\n def _on_condition_satisfied_callback(self, *args, **kwargs):\n if self.satisfied:\n return\n for condition in self:\n while not condition.satisfied:\n return\n self._satisfied = True\n if self._on_satisfied_callback is not None:\n self._on_satisfied_callback(self)\n\nclass ConditionalActionManager:\n __qualname__ = 'ConditionalActionManager'\n\n def __init__(self):\n self._condition_groups = []\n self._callback = None\n self._attached = False\n\n def __iter__(self):\n return iter(self._condition_groups)\n\n def __len__(self):\n return len(self._condition_groups)\n\n def __bool__(self):\n return bool(self._condition_groups)\n\n def __getitem__(self, key):\n return self._condition_groups(key)\n\n def __repr__(self):\n return 'ConditionalActionManager: {} conditions attached'.format(len(self._condition_groups))\n\n def _condition_group_satisfied_callback(self, condition_group):\n if not self._attached:\n return\n self._callback(condition_group)\n\n def callback_will_trigger_immediately(self, owner, conditional_actions, interaction=None, situation=None):\n satisfied = False\n\n def callback(_):\n nonlocal satisfied\n satisfied = True\n\n self.attach_conditions(owner, conditional_actions, callback, interaction=interaction, situation=situation)\n self.detach_conditions(owner, exiting=True)\n return satisfied\n\n def attach_conditions(self, owner, conditional_actions, callback, interaction=None, situation=None):\n self._callback = callback\n if interaction is not None:\n resolver = interaction.get_resolver()\n is_user_directed = interaction.is_user_directed\n for conditional_action in conditional_actions:\n conditions = []\n if interaction is not None:\n tests = conditional_action.tests\n if tests is not None and not tests.run_tests(resolver):\n pass\n restrictions = conditional_action.restrictions\n if restrictions:\n restrict_to_user_directed = restrictions == ConditionalActionRestriction.USER_DIRECTED_ONLY\n if is_user_directed != restrict_to_user_directed:\n pass\n for condition_factory in conditional_action.conditions:\n condition = condition_factory(interaction=interaction, situation=situation)\n conditions.append(condition)\n condition_group = ConditionGroup(conditions, conditional_action)\n self._condition_groups.append(condition_group)\n condition_group.attach(owner, self._condition_group_satisfied_callback)\n self._attached = True\n satisfied_groups = set(group for group in self if group.satisfied)\n for group in satisfied_groups:\n self._callback(group)\n\n def detach_conditions(self, owner, exiting=False):\n for condition_group in self:\n condition_group.detach(owner, exiting=exiting)\n self._condition_groups = []\n self._callback = None\n self._attached = False\n\n def get_percent_rate_for_best_exit_conditions(self, interaction):\n group_time = None\n for condition_group in self:\n progress_bar_action = condition_group.conditional_action.progress_bar_action\n if progress_bar_action == ProgressBarAction.IGNORE_CONDITION:\n pass\n action = condition_group.conditional_action.interaction_action\n if action != ConditionalInteractionAction.GO_INERTIAL and action != ConditionalInteractionAction.EXIT_NATURALLY and progress_bar_action == ProgressBarAction.NO_ACTION:\n pass\n individual_time = None\n for condition in condition_group:\n (current_time, percent, rate_change) = condition.get_time_until_satisfy(interaction)\n if current_time is None:\n individual_time = None\n break\n if current_time <= 0:\n pass\n while individual_time is None or individual_time < current_time:\n individual_time = current_time\n individual_percent = percent\n individual_rate_change = rate_change\n if progress_bar_action == ProgressBarAction.FORCE_USE_CONDITION:\n return (individual_percent, individual_rate_change)\n if individual_time is None:\n pass\n while group_time is None or group_time > individual_time:\n group_time = individual_time\n group_percent = individual_percent\n group_rate_change = individual_rate_change\n if group_time is not None:\n return (group_percent, group_rate_change)\n return (None, None)\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"simulation/interactions/utils/exit_condition_manager.py","file_name":"exit_condition_manager.py","file_ext":"py","file_size_in_byte":6312,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"12695908578","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom bajoo.index import IndexTree\nfrom bajoo.index.base_node import BaseNode\nfrom bajoo.index.file_node import FileNode\nfrom bajoo.index.folder_node import FolderNode\n\n\nclass MyNode(BaseNode):\n pass\n\n\ndef _make_tree(node_def, default_sync=False):\n \"\"\"Quick helper to generate a BaseNode hierarchy.\n\n Args:\n Tuple[str, List[Tuple], bool]: node definition, of the form:\n ('node name', [children definitions], sync). Both part 2 and 3\n of the tuple are optional.\n default_sync (bool, optional): default value to set the 'sync' flag\n Returns:\n BaseNode: node built from the definition.\n \"\"\"\n name = node_def[0]\n children = node_def[1] if len(node_def) > 1 else []\n sync_flag = node_def[2] if len(node_def) > 2 else default_sync\n node = BaseNode(name)\n node.sync = sync_flag\n for child_def in children:\n node.add_child(_make_tree(child_def, default_sync))\n return node\n\n\nclass TestBrowseIndexTree(object):\n \"\"\"Test of the IndexTree.browse_all_non_sync_nodes() method.\"\"\"\n\n def test_browse_empty_tree_will_return_empty_generator(self):\n tree = IndexTree()\n gen = tree.browse_all_non_sync_nodes()\n assert list(gen) == []\n\n def test_browse_clean_tree_returns_empty_generator(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [\n ('A', [('1',), ('2',)]),\n ('B', [('1',), ('2',)]),\n ]), default_sync=True)\n\n gen = tree.browse_all_non_sync_nodes()\n assert list(gen) == []\n\n def test_browse_dirty_tree_returns_only_non_sync_nodes(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [\n ('A', [('A1', [], False), ('A2',)]),\n ('B', [('B1',), ('B2', [], False)], False),\n ('C', [('C1', [], False), ('C2', [], False)]),\n ]), default_sync=True)\n\n non_sync_nodes = []\n for node in tree.browse_all_non_sync_nodes():\n non_sync_nodes.append(node.name)\n node.task = None\n node.sync = True\n\n expected_non_sync_node = sorted(['A1', 'B', 'B2', 'C1', 'C2'])\n assert sorted(non_sync_nodes) == expected_non_sync_node\n\n def test_browse_skip_nodes_with_task(self):\n \"\"\"Ensures browse_all_non_sync_nodes() never returns a node with task.\n\n When a node has a task associated, it must not be yielded by\n browse_all_non_sync_nodes(). Such a node must be skipped until there is\n no longer a task on it.\n \"\"\"\n tree = IndexTree()\n tree._root = _make_tree(('root', [\n ('A', [('A1', [], False), ('A2',)]),\n ('B', [('B1',), ('B2', [], False)], False),\n ('C', [('C1', [], False), ('C2', [], False)]),\n ]), default_sync=True)\n\n tree._root.children['A'].children['A1'].task = True\n tree._root.children['B'].children['B1'].task = True\n tree._root.children['B'].children['B2'].task = True\n tree._root.children['C'].children['C2'].task = True\n\n non_sync_nodes = []\n for node in tree.browse_all_non_sync_nodes():\n if node is IndexTree.WAIT_FOR_TASK:\n break\n non_sync_nodes.append(node.name)\n # node.task is automatically set to True\n\n expected_non_sync_node = sorted(['B', 'C1'])\n assert sorted(non_sync_nodes) == expected_non_sync_node\n\n def test_browse_set_node_task_to_true(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [\n ('A1', [])\n ]))\n\n nodes = []\n for node in tree.browse_all_non_sync_nodes():\n if node is IndexTree.WAIT_FOR_TASK:\n break\n nodes.append(node)\n node.sync = True\n\n assert nodes[0].task is True\n assert nodes[1].task is True\n assert len(nodes) is 2\n\n def test_browse_until_all_is_clean(self):\n \"\"\"browse must loop over all nodes many times until they're clean.\n\n A node non-sync can be yielded over and over again until it's synced.\n Then generator must stop only when all nodes are fully sync (when the\n whole tree is marked not dirty).\n \"\"\"\n tree = IndexTree()\n tree._root = _make_tree(('node A', [('node B',)], True))\n node_a = tree._root\n node_b = tree._root.children['node B']\n # At start: A is sync, but not B\n\n gen = tree.browse_all_non_sync_nodes()\n\n # return node B until it's sync.\n non_sync_nodes = []\n for i in range(3):\n node = next(gen)\n node.task = None # By default browse() set node.task to True\n non_sync_nodes.append(node)\n assert non_sync_nodes == [node_b, node_b, node_b]\n\n node_a.sync = False\n node_b.sync = True\n\n # return node A until it's sync.\n non_sync_nodes = []\n for i in range(3):\n node = next(gen)\n node.task = None\n non_sync_nodes.append(node)\n assert non_sync_nodes == [node_a, node_a, node_a]\n\n node_b.sync = True\n\n # will yield until there is no remaining non-sync nodes.\n non_sync_nodes = []\n for i in range(15):\n node = next(gen)\n node.task = None\n non_sync_nodes.append(node)\n assert len(non_sync_nodes) is 15\n\n node_a.sync = True\n node_b.sync = True\n\n # Tree is clean: Nothing to yield.\n assert len(list(gen)) is 0\n\n def test_browse_pauses_when_all_non_sync_nodes_have_task(self):\n \"\"\"Check browse_all_non_sync_nodes() handles when all nodes have task.\n\n When all remaining nodes (meaning: non sync nodes) have a task\n associated, the generator has no node to return, but the iteration is\n not over.\n In this situation, it must return the special value\n `BROWSE_WAIT_FOR_TASK`.\n \"\"\"\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)], True),\n default_sync=False)\n tree._root.children['A'].task = True\n tree._root.children['B'].task = True\n\n gen = tree.browse_all_non_sync_nodes()\n\n # All nodes are reserved by tasks.\n assert next(gen) is IndexTree.WAIT_FOR_TASK\n assert next(gen) is IndexTree.WAIT_FOR_TASK\n\n tree._root.children['A'].task = None\n assert next(gen) is tree._root.children['A']\n tree._root.children['A'].sync = True\n assert next(gen) is IndexTree.WAIT_FOR_TASK\n\n tree._root.children['B'].sync = True\n # Although B is still reserved by a task, it's no longer dirty.\n assert next(gen, None) is None # Iterator is empty\n\n\nclass TestGetNodeFromIndexTree(object):\n \"\"\"Tests about node access methods of IndexTree.\"\"\"\n\n def test_get_node_by_path(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A', [('A1',)]),\n ('B', [('B1',), ('B2',)])]))\n node_a1 = tree.get_node_by_path('A/A1')\n node_b = tree.get_node_by_path('B')\n assert node_a1 and node_a1.name == 'A1'\n assert node_b and node_b.name == 'B'\n\n def test_get_root_node_by_path(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)]))\n assert tree.get_node_by_path('.') is tree._root\n\n def test_get_missing_node_by_path(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)]))\n assert tree.get_node_by_path('A/ghost') is None\n\n def test_get_missing_root_node_by_path(self):\n tree = IndexTree()\n assert tree.get_node_by_path('.') is None\n\n def test_get_node_by_path_with_missing_folder(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)]))\n assert tree.get_node_by_path('A/B/C/ghost') is None\n\n def test_get_or_create_node_by_path(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A', [('A1',)]),\n ('B', [('B1',), ('B2',)])]))\n node_a1 = tree.get_or_create_node_by_path('A/A1', None)\n node_b = tree.get_or_create_node_by_path('B', None)\n assert node_a1 and node_a1.name == 'A1'\n assert node_b and node_b.name == 'B'\n\n def test_get_or_create_root_node_by_path(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)]))\n assert tree.get_or_create_node_by_path('.', None) is tree._root\n\n def test_get_or_create_missing_node_by_path(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)]))\n node = tree.get_or_create_node_by_path('A/ghost', MyNode)\n assert isinstance(node, MyNode)\n\n def test_get_or_create_node_by_path_without_root(self):\n tree = IndexTree()\n node = tree.get_or_create_node_by_path('A/b/c', MyNode)\n assert isinstance(node, MyNode)\n\n def test_get_or_create_node_by_path_with_missing_folder(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A',), ('B',)]))\n node = tree.get_or_create_node_by_path('A/B/C/ghost', MyNode)\n assert isinstance(node, MyNode)\n\n def test_set_tree_node_sync(self):\n tree = IndexTree()\n tree._root = _make_tree(('root', [('A', [('A1',)]),\n ('B', [('B1',), ('B2',)])]))\n tree.set_tree_not_sync()\n assert tree._root.sync is False\n assert tree._root.children['A'].sync is False\n assert tree._root.children['B'].sync is False\n assert tree._root.children['A'].children['A1'].sync is False\n assert tree._root.children['B'].children['B1'].sync is False\n assert tree._root.children['B'].children['B2'].sync is False\n\n def test_set_empty_tree_node_not_sync(self):\n tree = IndexTree()\n # should do nothing\n tree.set_tree_not_sync()\n\n\nclass TestSaveAndLoadIndexTree(object):\n\n def test_load_from_legacy_empty_tree(self):\n tree = IndexTree()\n tree.load({})\n assert tree._root is None\n\n def test_load_from_legacy_flat_tree(self):\n tree = IndexTree()\n tree.load({\n u'file1': ('hash1', 'hash2'),\n u'file2': ('hash3', None),\n u'file3': ('hash4', None),\n u'file4': (None, None)\n })\n assert tree._root is not None\n for path in ('file1', 'file2', 'file3', 'file4'):\n assert isinstance(tree.get_node_by_path(path), FileNode)\n assert tree.get_node_by_path('file5') is None\n\n def test_load_from_legacy_nested_tree(self):\n tree = IndexTree()\n tree.load({\n u'deep/nested/file': ('123546', 'abcdef')\n })\n assert tree._root is not None\n assert isinstance(tree.get_node_by_path('deep/nested'), FolderNode)\n assert isinstance(tree.get_node_by_path('deep/nested/file'), FileNode)\n\n def test_load_from_legacy_should_correctly_set_hashes(self):\n tree = IndexTree()\n tree.load({\n u'root_file': ('3f4855158eb3266a74cf3a5d78b361cc',\n '1e4c2746ef98ebb5fe703723ecc3b8fd'),\n u'nested/file': ('148fff4717a87b8ddacb8a4b1fd18531',\n '02d70d1f6d522c454883d6114c7f315f')\n })\n assert tree._root is not None\n root_file = tree.get_node_by_path('root_file')\n assert root_file.state == {\n 'local_hash': '3f4855158eb3266a74cf3a5d78b361cc',\n 'remote_hash': '1e4c2746ef98ebb5fe703723ecc3b8fd'}\n nested_file = tree.get_node_by_path('nested/file')\n assert nested_file.state == {\n 'local_hash': '148fff4717a87b8ddacb8a4b1fd18531',\n 'remote_hash': '02d70d1f6d522c454883d6114c7f315f'}\n\n def test_root_node_should_be_named_dot_after_load(self):\n tree = IndexTree()\n tree.load({\n 'version': 2,\n 'root': {\n 'type': 'FOLDER',\n 'state': None,\n }\n })\n assert tree._root is not None\n assert tree._root.name == u'.'\n\n def test_root_node_should_be_named_dot_after_legacy_load(self):\n tree = IndexTree()\n tree.load({u'x': (None, None)})\n assert tree._root is not None\n assert tree._root.name == u'.'\n\n def test_load_tree_should_set_default_state_to_none(self):\n tree = IndexTree()\n tree.load({\n 'version': 2,\n 'root': {\n 'type': 'FOLDER',\n }\n })\n assert tree._root.state is None\n\n def test_load_tree_should_set_all_node_names(self):\n tree = IndexTree()\n tree.load({\n 'version': 2,\n 'root': {\n 'type': \"FOLDER\",\n 'children': {\n u'file1': {\n 'type': \"FILE\",\n 'state': {'local_hash': 'hash1',\n 'remote_hash': 'hash2'}\n },\n u'file2': {\n 'type': \"FILE\",\n 'state': {'local_hash': 'hash3',\n 'remote_hash': 'hash4'},\n },\n u'file3': {\n 'type': \"FILE\",\n 'state': {'local_hash': 'hash5',\n 'remote_hash': 'hash6'}\n },\n u'file4': {'type': \"FILE\"},\n u'nested': {\n 'type': \"FOLDER\",\n 'children': {\n u'child.txt': {\n 'type': \"FILE\",\n }\n }\n }\n }\n }\n })\n assert tree._root is not None\n for path in ('file1', 'file2', 'file3', 'file4'):\n node = tree.get_node_by_path(path)\n assert node.name == path\n node = tree.get_node_by_path('nested/child.txt')\n assert node.name == 'child.txt'\n\n def test_export_index_should_be_informat_version_2(self):\n tree = IndexTree()\n data = tree.export_data()\n assert data.get('version') == 2\n\n def test_export_index_tree_without_root_node(self):\n tree = IndexTree()\n data = tree.export_data()\n assert data.get('version') == 2\n\n def test_export_index_tree_with_only_root_node(self):\n tree = IndexTree()\n tree._root = FolderNode('.')\n data = tree.export_data()\n root_def = data.get('root')\n assert root_def['type'] == \"FOLDER\"\n assert len(root_def.get('children', {})) == 0\n\n def test_export_index_tree_with_nested_nodes(self):\n tree = IndexTree()\n tree._root = FolderNode('.')\n tree._root.add_child(FolderNode('A'))\n tree._root.children['A'].add_child(FileNode('A1'))\n tree._root.children['A'].add_child(FolderNode('A2'))\n tree._root.add_child(FolderNode('B'))\n tree._root.children['B'].add_child(FileNode('B1'))\n\n data = tree.export_data()\n assert data['root']['type'] == \"FOLDER\"\n node_a_def = data['root']['children']['A']\n node_b_def = data['root']['children']['B']\n assert node_a_def['type'] == \"FOLDER\"\n assert node_b_def['type'] == \"FOLDER\"\n assert node_a_def['children']['A1']['type'] == \"FILE\"\n assert node_a_def['children']['A2']['type'] == \"FOLDER\"\n assert node_b_def['children']['B1']['type'] == \"FILE\"\n\n def test_export_index_tree_returns_states(self):\n tree = IndexTree()\n tree._root = FolderNode('.')\n node_folder = FolderNode('folder')\n node_child = FileNode('child')\n tree._root.add_child(node_folder)\n node_folder.add_child(node_child)\n\n tree._root.state = {'local_hash': 1, 'remote_hash': 2}\n node_folder.state = {'local_hash': 3, 'remote_hash': 4}\n node_child.state = {'local_hash': 5, 'remote_hash': 6}\n\n data = tree.export_data()\n root_node_def = data['root']\n assert root_node_def['state'] == {'local_hash': 1, 'remote_hash': 2}\n folder_node_def = root_node_def['children']['folder']\n assert folder_node_def['state'] == {'local_hash': 3, 'remote_hash': 4}\n child_node_def = folder_node_def['children']['child']\n assert child_node_def['state'] == {'local_hash': 5, 'remote_hash': 6}\n\n\nclass TestIndexTree(object):\n \"\"\"Other IndexTree tests who doesn't fit in other classes.\"\"\"\n\n def test_get_remote_hash_of_empty_tree(self):\n tree = IndexTree()\n assert tree.get_remote_hashes() == {}\n\n def test_get_remote_hash_of_tree_containing_only_folders(self):\n tree = IndexTree()\n tree._root = FolderNode('.')\n node_folder = FolderNode('folder')\n tree._root.add_child(node_folder)\n node_folder.add_child(FolderNode('nested folder'))\n\n assert tree.get_remote_hashes() == {}\n\n def test_get_remote_hash_of_tree_with_files(self):\n tree = IndexTree()\n file_a1 = FileNode('A1')\n file_a1.state = {'local_hash': 'abcd', 'remote_hash': 1234}\n file_b1 = FileNode('B1')\n file_b1.state = {'local_hash': 'ef01', 'remote_hash': 5678}\n\n tree._root = FolderNode('.')\n tree._root.add_child(FolderNode('A'))\n tree._root.children['A'].add_child(file_a1)\n tree._root.children['A'].add_child(FolderNode('A2'))\n tree._root.add_child(FolderNode('B'))\n tree._root.children['B'].add_child(file_b1)\n\n data = tree.get_remote_hashes()\n assert data == {\n u'A/A1': 1234,\n u'B/B1': 5678,\n }\n\n def test_empty_tree_is_not_dirty(self):\n tree = IndexTree()\n assert tree.is_dirty() is False\n\n def test_tree_is_dirty_if_root_node_is_dirty(self):\n tree = IndexTree()\n tree._root = MyNode('.')\n assert tree.is_dirty() is True\n\n def test_tree_is_not_dirty_if_root_node_is_not_dirty(self):\n tree = IndexTree()\n tree._root = MyNode('.')\n tree._root.sync = True\n assert tree.is_dirty() is False\n","repo_name":"Bajoo/client-pc","sub_path":"tests/unit_tests/index/index_tree_test.py","file_name":"index_tree_test.py","file_ext":"py","file_size_in_byte":18273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22484232158","text":"# Display\nMASTER_VOLUMEN = 0.1\n#WIDTH = 1920\n#HEIGHT = 1080\nWIDTH = 1280\nHEIGHT = 720\nASPECT_RELATION_PLAYER = 2.4\nASPECT_RELATION_CHAT = 0.6\nWIDTH_PLAYER_SPRITE = 100\nHEIGHT_CHAT = 680\nWIDTH_OBJECT = 100\nWIDTH_OBJECT_ICON = 20\n\nPLAYER_SURFACE = (WIDTH_PLAYER_SPRITE, int(WIDTH_PLAYER_SPRITE * ASPECT_RELATION_PLAYER))\nCHAT_SURFACE = (int(HEIGHT_CHAT * ASPECT_RELATION_CHAT), HEIGHT_CHAT)\nOBJECT_SURFACE = (WIDTH_OBJECT, WIDTH_OBJECT)\nOBJECT_SURFACE_ICON = (WIDTH_OBJECT_ICON, WIDTH_OBJECT_ICON)\n\n\n# Locations menus\nPLAY_BUTTON = (430, 325)\nPLAY_STORY_MODE_BUTTON = (800, 310)\nPLAY_SURIVAL_MODE_BUTTON = (800, 415)\nHOW_TO_PLAY_BUTTON = (430, 550)\nCREDITS_BUTTON = (430, 435)\nBACK_BUTTON = (800, 530)\nBACK_BUTTON_HTP = (640, 600)\n\nNEXT_CHAT_BUTTON = (int(WIDTH * (4 / 5)), int(HEIGHT * (4 / 5)))\nSTART_FINISH_BUTTON = (int(WIDTH / 2), int(HEIGHT / 2))\n\n\n\n# Level config\nLEVEL_TIME = 120\nFALL_SPEED = 0.5\nPLAYER_SPEED = 0.5\nLEFT_LIMIT = 30\nRIGHT_LIMIT = WIDTH - LEFT_LIMIT\nGROUND_LEVEL = HEIGHT - 175\nBOTTOM_LIMIT = HEIGHT + 100\nINITIAL_HEALTH = 30\nFRAME_PER_SPRITE = 5\n\nICON_LOCATIONS = [(int(WIDTH / 2) - 95, 30), (int(WIDTH / 2) + 90, 30), (int(WIDTH / 2) + 190, 30)]\nCOUNTER_LOCATIONS = [(int(WIDTH / 2) - 150, 30), (int(WIDTH / 2) + 150, 30), (int(WIDTH / 2) + 210, 30)]\n\n# HUB\nTIMER_LOCATION = (int(WIDTH / 2), 30)\nOBJECT_1_ICON_LOCATION = (int(WIDTH / 2) - 95, 30)\nOBJECT_1_COUNTER_LOCATION = (int(WIDTH / 2) - 150, 30)\nOBJECT_2_ICON_LOCATION = (int(WIDTH / 2) + 90, 30)\nOBJECT_2_COUNTER_LOCATION = (int(WIDTH / 2) + 150, 30)\nHEALTH_BAR_PORTION_SIZE = (int(WIDTH / (2 * INITIAL_HEALTH)), 15)\nHEALTH_LOCATION = (int(WIDTH / 2), HEIGHT - 20)","repo_name":"CimaDeLosVientos/corona_game","sub_path":"src/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43394185882","text":"\"\"\"Module to print multiplication table of a given number\"\"\"\n\n# Import modules.\n\nimport sys\nimport custom_module_clear_screen\n\n# Define functions.\n\ndef print_multiplication_table(fn_num):\n \"\"\"Function to print multiplication table of fn_num number\"\"\"\n print(f\"Multiplication table of {fn_num} is:\")\n for i in range(1,11,1):\n product = fn_num * i\n print(f\"{product}\")\n\ndef main():\n \"\"\"First function to be called\"\"\"\n custom_module_clear_screen.clear_screen()\n print(\"This script accepts a number & prints its multiplication table.\\n\")\n try:\n num = int(input(\"Enter number: \"))\n except ValueError:\n print(\"Invalid input. Please enter an integer only. Exiting script!\\n\")\n sys.exit(1)\n print()\n print_multiplication_table(num)\n print()\n\n# Call main() when the script is executed explicitly.\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"inderpal2406/python-practice-2023","sub_path":"PYnative/02_loop_exercises/ex04_multiplication_table.py","file_name":"ex04_multiplication_table.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11260064523","text":"\r\nimport ast\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport json\r\n\r\ndef make_dictionary(soup):\r\n lib = {}\r\n lib['platform'] = 'glints'\r\n \r\n for pekerjaan in soup.find_all('div', {\"class\":\"TopFoldsc__JobOverViewTitle-kklg8i-3 gaBsxq\"}):\r\n lib['pekerjaan'] = pekerjaan.text\r\n \r\n for perusahaan in soup.find_all('div', {\"class\":\"TopFoldsc__JobOverViewCompanyName-kklg8i-5 ldguEd\"}):\r\n lib['perusahaan'] = perusahaan.text\r\n for loc in soup.find_all('div', {\"class\":\"TopFoldsc__JobOverViewCompanyLocation-kklg8i-6 bQlTwv\"}):\r\n lib['lokasi'] = loc.text\r\n \r\n for pos in soup.find_all('span', {\"class\":\"TopFoldsc__PostedAt-kklg8i-11 cRnvgg\"}):\r\n lib['posted_at'] = pos.text\r\n \r\n result = soup.select('div.eWKiJR')\r\n if len(result) ==3:\r\n for i in range(0, len(result)):\r\n lib['pengalaman'] = result[-1].text\r\n lib['industri'] = result[0].text\r\n lib['tipe'] = result[1].text\r\n elif len(result) == 4:\r\n for i in range(0, len(result)):\r\n lib['gaji'] = result[0].text\r\n lib['industri'] = result[1].text\r\n lib['tipe'] = result[2].text\r\n lib['pengalaman'] = result[3].text\r\n elif len(result) == 2:\r\n for i in range(0, len(result)):\r\n lib['industri'] = result[0].text\r\n lib['tipe'] = result[1].text\r\n skil =[]\r\n#list = ['Motion Graphics, Creative Concept, ... ']\r\n for skill in soup.find_all('div', {\"class\":\"TagStyle__TagContainer-sc-66xi2f-1 cPvXJd aries-tag Skillssc__TagOverride-sc-11imayw-3 bssJoZ\"}):\r\n skil.append(skill.text)\r\n lib['skill'] = skil\r\n for jobdesc in soup.find_all('div', {\"class\":\"JobDescriptionsc__DescriptionContainer-sc-1jylha1-2 gpAMiw\"}):\r\n # print(jobdesc.text)\r\n lib['jobdesc'] = jobdesc.text\r\n return lib\r\n\r\n\r\na_list = [] \r\nwith open('one_list.txt', 'r') as f:\r\n mylist = ast.literal_eval(f.read())\r\n\r\n# mylist = ['https://glints.com/id/opportunities/jobs/social-media-specialist-content-creator/80d9e237-de42-472a-b336-5727eea6235b', 'https://glints.com/id/opportunities/jobs/staff-sales/0b1e486d-a05c-4c42-becf-40ae922e4f90','https://glints.com/id/opportunities/jobs/finance-control-manager/3c349df1-e855-4164-a3dd-bd55f8618701']\r\nprint(\"JobStart\")\r\nnum = 0\r\nfor link in mylist:\r\n html_doc = requests.get(link).text\r\n soup = BeautifulSoup(html_doc, 'lxml')\r\n kamus=make_dictionary(soup)\r\n dictionary_copy = kamus.copy()\r\n a_list.append(dictionary_copy)\r\n num+=1\r\n print(num)\r\n# print(a_list)\r\n\r\nwith open('glints.json', 'w') as fp:\r\n json.dump(a_list, fp)\r\nprint(\"Job Finished\")","repo_name":"aqilahaz/get-vacancy-glints","sub_path":"get_details.py","file_name":"get_details.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39297474463","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport pandas as pd\nfrom Kalman import KalmanFilter\nfrom conversion import accelerometer_to_attitude, euler_to_quaternion, quoternion_to_euler_angles, gyro_transition_matrix, normalize_quaternion\nfrom matplotlib import pyplot as plt\nfrom time import sleep\nfrom serial import Serial\nfrom filter import applyLowPass\n\nimport socket\nUDP_IP = \"127.0.0.1\"\nUDP_PORT = 5005\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nSEND = False\n\n# Config\nPORT = \"/dev/ttyACM0\"\nSAMPLE_SIZE = 600\nBAUD_RATE = 115200\nTIME_STEP = 0.01\n\ndelta_t = 0.01 # Time step [s], 100Hz\nPLOT = False\n\n# Initialize covariance matrices\nQ = np.array([[10 ** -4, 0, 0, 0],\n [0, 10 ** -4, 0, 0], \n [0, 0, 10 ** -4, 0], \n [0, 0, 0, 10 ** -4]])\n\nR = np.array([[10, 0, 0, 0],\n [0, 10, 0, 0],\n [0, 0, 10, 0],\n [0, 0, 0, 10]])\n\n \narduino = None\n\ndef getData():\n global arduino\n \n if not arduino:\n arduino = Serial(PORT, BAUD_RATE, timeout=0.1)\n print(\"Opened\", arduino.name)\n sleep(3)\n arduino.readline() # Flush input\n\n ser_bytes = arduino.readline()\n decoded_bytes = ser_bytes[0:len(ser_bytes)-2].decode(\"utf-8\", errors='ignore') # remove trailing characters (\\r\\n)\n \n if not \"Data:\" in decoded_bytes:\n return None\n\n vals = decoded_bytes.replace(\"Data:\", \"\").strip().split(',')\n if len(vals) != 6:\n return None\n vals = [float(i) for i in vals]\n return vals\n\n\ndef main():\n global SEND, PLOT\n\n x0 = np.array(euler_to_quaternion(0,0,0))\n F = np.identity(4)\n H = np.identity(4)\n P = np.eye(4)\n\n kalman = KalmanFilter(x0, F, H, P, Q, R)\n\n # Collect Data\n # df = pd.read_excel(\"data/simulated_data.xlsx\", engine=\"openpyxl\")\n # accelerometer_data = np.array([df[\"Accel X\"], df[\"Accel Y\"], df[\"Accel Z\"]], ndmin=2).transpose()\n # gyro_data = np.array([(df[\"Gyro Phi\"]), (df[\"Gyro Theta\"]), (df[\"Gyro Omega\"])], ndmin=2).transpose()\n\n time = np.linspace(0, 200.1, num=20001)\n kalman_corrected_phi = []\n kalman_corrected_theta = []\n kalman_corrected_omega = []\n i = 0\n\n buffers_accel = np.zeros((3,6))\n buffers_gyros = np.zeros((3,6))\n buffers = np.zeros((6,6))\n\n while(1):\n # for accelerometer_measurement, gyro_measurement in zip(accelerometer_data, gyro_data):\n # accelerometer_measurement = data[0:3] \n # gyro_measurement = data[3:6]\n\n data = getData()\n if data is not None:\n \n filtered_data = np.zeros((1, 6))\n\n for index, val in enumerate(data):\n buffers[index], filtered_data[0, index] = applyLowPass(buffers[index], val)\n\n accelerometer_measurement = filtered_data[0, 0:3] \n gyro_measurement = filtered_data[0, 3:6]\n \n F = gyro_transition_matrix(gyro_measurement[0], gyro_measurement[1], gyro_measurement[2], delta_t)\n kalman.update_state_transition(F)\n\n kalman.predict()\n\n z = euler_to_quaternion(*accelerometer_to_attitude(accelerometer_measurement[0], accelerometer_measurement[1], accelerometer_measurement[2]))\n\n x = kalman.correct(z)\n x = normalize_quaternion(*x)\n kalman.normalize_x(x)\n\n phi, theta, omega = quoternion_to_euler_angles(*x)\n\n SEND = True\n\n if SEND:\n # Euler angles are okay\n message = b\"y%.4fyp%.4fpr%.4fr\" % (omega, theta, phi)\n\n # Quaternions are better\n q_message = b\"w%.4fwa%.4fab%.4fbc%.4fc\" % tuple(x[:])\n\n sock.sendto(message, (UDP_IP, UDP_PORT))\n sleep(0.003)\n\n PLOT = False\n\n if PLOT:\n kalman_corrected_phi.append(phi)\n kalman_corrected_theta.append(theta)\n kalman_corrected_omega.append(omega)\n\n if PLOT:\n dictionary = {\n \"Time\": time,\n \"Phi\": kalman_corrected_phi,\n \"Theta\": kalman_corrected_theta,\n \"Omega\": kalman_corrected_omega,\n }\n kalman_data = pd.DataFrame(data=dictionary)\n kalman_data.plot(x=\"Time\", y=[\"Phi\"])\n kalman_data.plot(x=\"Time\", y=[\"Theta\"])\n kalman_data.plot(x=\"Time\", y=[\"Omega\"])\n plt.show()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Silverlined/Kalman-Quaternion-Rotation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"61"} +{"seq_id":"70691123074","text":"\"\"\"\n프로그램이 자기 자신에 관한 정보에 접근하는 능력을 \nintrospection(자기관찰)이라 한다.\n호출자의 식별 정보에 접근하는 것은 호출 스택을 조사(inspect.stack()) 하고\n이전 프레임에 접근(현재 프레임이 [0]이므로 [1])한 다음,\n그 프레임의 세 번째 요소(함수 이름을 나태낸다.)에 접근함으로써 처리한다.\n함수에 전달한 인자에 자기 관찰적 런타임 구조인 locals()를 통해 접근한다.\n파이썬에서 locals()는 현재 지역 기호 표(symbol table)를 나타내는\n틱셔너리를 반환하는 함수\n\"\"\"\nimport sys, re, operator, string, inspect\n\n\ndef read_stop_words():\n if inspect.stack()[1][3] != 'extract_words':\n return None\n\n with open('../stop_words.txt') as f:\n stop_words = f.read().split(',')\n stop_words.extend(list(string.ascii_lowercase))\n return stop_words\n\n\ndef extract_words(path_to_file):\n with open(locals()['path_to_file']) as f:\n str_data = f.read()\n pattern = re.compile('[\\W_]+')\n word_list = pattern.sub(' ', str_data).lower().split()\n stop_words = read_stop_words()\n return [w for w in word_list if not w in stop_words]\n\n\ndef frequencies(word_list):\n word_freqs = {}\n for w in locals()['word_list']:\n if w in word_freqs:\n word_freqs[w] += 1\n else:\n word_freqs[w] = 1\n return word_freqs\n\n\ndef sort(word_freqs):\n return sorted(\n locals()['word_freqs'].items(),\n key=operator.itemgetter(1),\n reverse=True)\n\n\ndef main():\n word_freqs = sort(frequencies(extract_words(sys.argv[1])))\n for (w, c) in word_freqs[0:25]:\n print(f'{w} - {c}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"angelxtry/TIL","sub_path":"Python/ExercisesInProgrammingStyle/code/16.Introsepctive.py","file_name":"16.Introsepctive.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22190392635","text":"from Game.Game import Game\nfrom InterestingStuff.Game import Player\nfrom InterestingStuff.Game.CantTurnPacman import CantTurnPacman\nfrom InterestingStuff.Game.CrossroadSmartPacman import CrossroadSmartPacman\nfrom InterestingStuff.Game.DoublePacman import DoublePacman\nfrom InterestingStuff.Game.GhostsPlayer import GhostsPlayer\nfrom InterestingStuff.Game.PacmanPlayer import PacmanPlayer\nfrom InterestingStuff.Solver.MinimaxSolver import *\nimport time\n\n\nclass GameContainer:\n def __init__(self, game, pacmanPlayer, ghostPlayer, minimax_depth=3):\n self.pacman_player = pacmanPlayer\n self.ghost_player = ghostPlayer\n self.solver = MinimaxSolver(game, pacmanPlayer, ghostPlayer, minimax_depth)\n\n def play(self):\n for i in self.solver.steps_generator():\n print(i.g_map)\n time.sleep(.2)\n\n\nprint('Hello, please choose map, enter 1 for bigger map or 2 for smaller one')\nm = input()\nm = 'Map1' if m == '1' else 'Map2'\n\nprint('Please choose one of following configurations: ')\nprint('1 - Classic game: 4 ghosts vs 1 pacman using minimax with depth 4')\nprint('2 - 4 ghosts vs 1 Cant turn pacman using minimax with depth 4')\nprint('3 - 4 ghosts vs 2 pacmans using minimax with depth 3')\nval = input('Enter config: ')\n\nif val == '1':\n g = Game(\"data/\"+m+\"_Solo.txt\")\n pacman = PacmanPlayer(g, g.g_map.pacman)\n ghosts = GhostsPlayer(g, g.g_map.Blinky, g.g_map.Speedy, g.g_map.Clyde, g.g_map.Inky)\n game1 = GameContainer(g, pacman, ghosts)\n game1.play()\nelif val == '2':\n g = Game(\"data/\"+m+\"_Solo.txt\")\n pacman = CantTurnPacman(g, g.g_map.pacman)\n ghosts = GhostsPlayer(g, g.g_map.Blinky, g.g_map.Speedy, g.g_map.Clyde, g.g_map.Inky)\n game1 = GameContainer(g, pacman, ghosts)\n game1.play()\nelse:\n g = Game(\"data/\"+m+\"_Duo.txt\")\n pacman = DoublePacman(g, g.g_map.pacman)\n ghosts = GhostsPlayer(g, g.g_map.Blinky, g.g_map.Speedy, g.g_map.Clyde, g.g_map.Inky)\n game1 = GameContainer(g, pacman, ghosts)\n game1.play()\n","repo_name":"solcmich/FIT_CVUT","sub_path":"Bachelors/ZUM/Semestral/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"72255899073","text":"\r\n\r\n\r\n# set the matplotlib backend so figures can be saved in the background\r\nimport matplotlib\r\nmatplotlib.use(\"Agg\")\r\n\r\n# import the necessary packages\r\nimport keras\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport argparse\r\nimport os\r\nimport cv2\r\nimport random\r\nimport shutil\r\n# import Augmentor\r\n\r\n#from dataPreprocessor import dataProvider\r\nfrom train_utils_bci import (NDStandardScaler, subject_specific, leave1out, importseveralsubjects, pad_with_zeros, pad_by_duplicating, generator)\r\nfrom data_pooler_bci import data_pooler\r\n\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.preprocessing.image import save_img\r\nfrom keras.callbacks import LearningRateScheduler\r\nfrom keras.optimizers import RMSprop\r\nfrom keras.utils import np_utils\r\nfrom keras.models import load_model\r\nfrom keras import models\r\nfrom keras import layers\r\nfrom keras import optimizers\r\nfrom sklearn.metrics import classification_report\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import roc_curve\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom imutils import paths\r\nfrom PIL import Image\r\nfrom keras.optimizers import Adagrad\r\n\r\n'''import tensorflow as tf \r\nfrom keras.backend.tensorflow_backend import set_session \r\nconfig = tf.ConfigProto() \r\nconfig.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU \r\nconfig.log_device_placement = True # to log device placement (on which device the operation ran) \r\n # (nothing gets printed in Jupyter, only if you run it standalone)\r\nsess = tf.Session(config=config) \r\nset_session(sess) # set this TensorFlow session as the default session for Keras \r\n'''\r\n#FUNCTIONS TO IMPORT DIFFERENT MODELS\r\n\r\n\r\ndef func1(shape):\r\n\tfrom keras.applications import ResNet50\r\n\tBS = 16\r\n\tconv_base = ResNet50(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func2(shape):\r\n\tfrom keras.applications import ResNet101\r\n\tBS = 16\r\n\tconv_base = ResNet101(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func3(shape):\r\n\tfrom keras.applications import ResNet152\r\n\tBS = 8\r\n\tconv_base = ResNet152(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func4(shape):\r\n\tfrom keras.applications import DenseNet121\r\n\tBS = 32\r\n\tconv_base = DenseNet121(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func5(shape):\r\n\tfrom keras.applications import DenseNet201\r\n\tBS = 8\r\n\tconv_base = DenseNet201(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func6(shape):\r\n\tfrom keras.applications import Xception\r\n\tBS = 32\r\n\tconv_base = Xception(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func7(shape):\r\n\tfrom keras.applications import InceptionV3\r\n\tBS = 32\r\n\tconv_base = InceptionV3(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func8(shape):\r\n\tfrom keras.applications import VGG16\r\n\tBS = 16\r\n\tconv_base = VGG16(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\ndef func9(shape):\r\n\tfrom keras.applications import VGG19\r\n\tBS = 16\r\n\tconv_base = VGG19(weights = 'imagenet',\r\n \tinclude_top = False,\r\n \tinput_shape = (shape[0],shape[1],3))\r\n\treturn BS,conv_base\r\n\r\n\r\n\r\ni = 1\r\nii = 0\r\nk = 1 #FOR SEPARATE FIGURES\r\n\r\nfor ss in range(8): ## FOR EACH SUBJECT SEPARATELY\r\n\r\n\r\n\tNUM_EPOCHS = 50\r\n\t#INIT_LR = 1e-2\r\n\r\n\tmodel_names = ['Xception']\r\n\r\n\tmodel_names1 = [\r\n\t\t\t\t\t'DenseNet121',\r\n\t\t\t\t\t'DenseNet201',\r\n\t\t\t\t\t'InceptionV3',\r\n\t\t\t\t\t'Xception',\r\n\t\t\t\t\t'ResNet50',\r\n\t\t\t\t\t'ResNet101',\r\n\t\t\t\t\t'ResNet152',\r\n\t\t\t\t\t'VGG16',\r\n\t\t\t\t\t'VGG19'\r\n\t\t\t\t\t]\r\n\tfor name in model_names:\r\n\t\ttrain_norm, y_train, val_subjects_norm, test_subjects_norm = data_pooler(dataset_name='TenHealthyData', subIndexTest = ss)\r\n\t\tif (name == 'InceptionV4') or (name =='InceptionV3') or (name =='InceptionV2') or (name =='Xception'):\r\n\t\t\tshape = [299,299]\r\n\t\telse:\r\n\t\t\tshape = [224,224]\r\n\r\n\t\tif (name == 'ResNet50'): BS,conv_base = func1(shape)\r\n\t\telif (name == 'ResNet101'): BS,conv_base = func2(shape)\r\n\t\telif (name == 'ResNet152'): BS,conv_base = func3(shape)\r\n\t\telif (name == 'DenseNet121'): BS,conv_base = func4(shape)\r\n\t\telif (name == 'DenseNet201'): BS,conv_base = func5(shape)\r\n\t\telif (name == 'Xception'): BS,conv_base = func6(shape)\r\n\t\telif (name == 'InceptionV3'): BS,conv_base = func7(shape)\r\n\t\telif (name == 'VGG16'): BS,conv_base = func8(shape)\r\n\t\telif (name == 'VGG19'): BS,conv_base = func9(shape)\r\n\r\n\t\t#x_train, y_train, x_valid, y_valid, x_test, y_test = dataProvider(dataset_name = 'EPFL', batch_size=BS)\r\n\t\t#print(train_norm.shape)\r\n\r\n\r\n\r\n\t\ttrain_gen = generator(train_norm,\r\n\t y_train, \r\n\t min_index=0,\r\n\t max_index=None,\r\n\t batch_size=BS,\r\n\t desired_size = shape[0], #************WARNING************\r\n\t #color_mode=\"grayscale\",\r\n\t shuffle=True) #see what None does\r\n\r\n\t\t'''val_gen = generator(val_norm,\r\n y_val,\r\n min_index=0,\r\n max_index=None,\r\n batch_size=BS,\r\n desired_size = shape[0], #************WARNING************\r\n #color_mode=\"grayscale\",\r\n shuffle=True)'''\r\n\t\ttotalVal = 0\r\n\t\tfor jj in range(len(val_subjects_norm)):\r\n\t\t\ttotalVal += len(val_subjects_norm[jj]['ytrain'])\r\n\t\t\tval_gen = generator(val_subjects_norm[jj]['xtrain'],\r\n val_subjects_norm[jj]['ytrain'],\r\n min_index=0,\r\n max_index=None,\r\n batch_size=BS,\r\n desired_size = shape[0],\r\n shuffle = True \t\t #************WARNING************\r\n #color_mode=\"grayscale\",\r\n )\r\n\r\n\t\ttotalTrain = len(y_train)\r\n\t\tclassWeight = [np.sum(y_train == 0), np.sum(y_train == 1)]\r\n\r\n\r\n\r\n\r\n\t\t#Feature extraction with data augmentation\r\n\t\tmodel = models.Sequential()\r\n\t\tmodel.add(conv_base)\r\n\t\tmodel.add(layers.Flatten())\r\n\t\tmodel.add(layers.Dropout(0.2))\r\n\t\tmodel.add(layers.Dense(256, activation = 'relu'))\r\n\t\tmodel.add(layers.Dense(1, activation = 'sigmoid'))\r\n\r\n\t\tconv_base.trainable = True\r\n\t\t'''set_trainable = False\r\n\t\tprint(\"\\n\\n\\n\"+str(conv_base.layers))'''\r\n\t\tcount = 0\r\n\t\tfor layer in conv_base.layers:\r\n\t\t\t\t#if(count > 39):\r\n\t\t\t\t\tlayer.trainable = True\r\n\t\t\t\t#count = count + 1\r\n\t\tmodel.summary()\r\n\t\t\t\r\n\t\tmodel.compile(loss = 'binary_crossentropy',\r\n\t \t\toptimizer = optimizers.RMSprop(learning_rate=0.0001, rho=0.9),\r\n\t \t\tmetrics = ['acc'])\r\n\r\n\r\n\t\t#es = EarlyStopping(monitor='val_accuracy', mode='max', min_delta=5)\r\n\t\tmcp_save = keras.callbacks.ModelCheckpoint(str(i)+\"Test-subject-\"+str(ss)+\"_bci_Best_model#\"+str(NUM_EPOCHS)+'_'+ str(name) + \".h5\",monitor='val_acc', verbose=0, \r\n\t\t\tsave_best_only=True, save_weights_only=False, mode='max', period=1)\r\n\r\n\t\tH = model.fit_generator(\r\n\t\t\ttrain_gen,\r\n\t\t\tsteps_per_epoch=totalTrain // BS,\r\n\t\t\tvalidation_data=val_gen,\r\n\t\t\tvalidation_steps=totalVal // BS,\r\n\t\t\tclass_weight=classWeight,\r\n\t\t\tepochs=NUM_EPOCHS,\r\n\t\t\tcallbacks = [mcp_save])\r\n\r\n\r\n\t\t\r\n\t\t# save model and architecture to single file\r\n\t\t#model.save(\"noaug_model#\"+str(NUM_EPOCHS)+'_'+ str(name) + \".h5\")\r\n\t\tmodel.save(str(i)+\"Test-subject-\"+str(ss)+\"_BCI_model#\"+str(NUM_EPOCHS)+'_'+ str(name) + \".h5\")\r\n\t\tprint(\"Saved model to disk\")\r\n\r\n\t\t\r\n\r\n\r\n\t\tmodel = load_model(str(i)+\"Test-subject-\"+str(ss)+\"_bci_Best_model#\"+str(NUM_EPOCHS)+'_'+ str(name) + \".h5\")\r\n\r\n\t\tfname1 = str(i)+\"Test-subject-\"+str(ss)+\"_img_AllLayer_modelHistory#\"+str(NUM_EPOCHS)+'_'+ str(name) + \".txt\"\r\n\t\tf = open(fname1,\"w+\")\r\n\r\n\t\tfor jj in range(len(test_subjects_norm)):\r\n\t\t\tprint(\"hi!\")\r\n\t\t\ttotalTest = len(test_subjects_norm[jj]['ytrain'])\r\n\t\t\ttest_gen = generator(test_subjects_norm[jj]['xtrain'],\r\n test_subjects_norm[jj]['ytrain'],\r\n min_index=0,\r\n max_index=None,\r\n batch_size=BS,\r\n desired_size = shape[0] #************WARNING************\r\n #color_mode=\"grayscale\",\r\n )\r\n\t\t\tpredIdxs = model.predict_generator(test_gen,verbose=2,\r\n\t\t\t\t\t\tsteps=(totalTest // BS))\r\n\t\t\t# for each image in the testing set we need to find the index of the\r\n\t\t\t# label with corresponding largest predicted probability\r\n#\t\t\tpredIdxs = np.argmax(predIdxs, axis=1)\r\n\t\t\tpredIdxs = np.where(predIdxs >= 0.5, 1, 0)\r\n\t\t\tpredIdxs = predIdxs.reshape(len(predIdxs),)\r\n\t\t\tpredIdxs = np.float32(predIdxs)\r\n #print(classification_report(test_gen.classes, predIdxs, target_names=y_test))\r\n\t\t\t#test_gen.reset()\r\n\t\t\tauc = roc_auc_score(test_subjects_norm[jj]['ytrain'][0:len(predIdxs)],predIdxs)\r\n\t\t\tcm = confusion_matrix(test_subjects_norm[jj]['ytrain'][0:len(predIdxs)], predIdxs) #CHANGED EVERY testGen to valGen\r\n\t\t\ttotal = sum(sum(cm))\r\n\t\t\tacc = (cm[0, 0] + cm[1, 1]) / total\r\n\t\t\tsensitivity0 = cm[0, 0] / (cm[0, 0] + cm[0, 1])\r\n\t\t\tsensitivity1 = cm[1, 1] / (cm[1, 1] + cm[1, 0])\r\n\t\t\t#sensitivity2 = cm[2, 2] / (cm[2, 2] + cm[2, 0] + cm[2,1] + cm[2,3])\r\n\t\t\t#sensitivity3 = cm[3, 3] / (cm[3, 3] + cm[3, 0] + cm[3,1] + cm[3,2])\r\n\t\t\tsensitivity = (sensitivity1+sensitivity0)/2\r\n\t\t\tf.write(\"Test subject \\n\" + str(ss) + \"\\nValidation acc: \" + str(round(H.history['val_acc'][NUM_EPOCHS-1],4)) + \"; Training acc: \" + str(round(H.history['acc'][NUM_EPOCHS-1],4)) + \r\n\t\t\t\t\"\\nAUC: \" + str(round(auc,4)) + \"; Sensitivity (avg): \" + str(round(sensitivity,4)) + \"; Test acc: \" + str(round(acc,4)))\r\n\t\t\tf.write(\"\\nSensitivity0: {:.4f}\".format(sensitivity0))\r\n\t\t\tf.write(\"\\nSensitivity1: {:.4f}\".format(sensitivity1))\r\n\t\t\t#f.write(\"\\nSensitivity2: {:.4f}\".format(sensitivity2))\r\n\t\t\t#f.write(\"\\nSensitivity3: {:.4f}\".format(sensitivity3))\r\n\t\t\t#f.write(\"\\n\\n\" + classification_report(testGen.classes, predIdxs,\r\n\t\t\t#\t\t\t\t\t\ttarget_names=testGen.class_indices.keys()))\r\n\t\t\tf.write(\"\\n\" + str(cm))\r\n\r\n\t\t\tdel predIdxs\r\n# e1 = \"noaug_modelHistory#\"+str(NUM_EPOCHS)+'_'+ str(name) + \".txt\"\r\n\t\tf.close()\r\n \r\n\r\n# =============================================================================\r\n# \t\tprint(cm)\r\n# \t\tprint(\"test acc: {:.4f}\".format(acc))\r\n# \t\tprint(\"sensitivity0: {:.4f}\".format(sensitivity0))\r\n# \t\tprint(\"sensitivity1: {:.4f}\".format(sensitivity1))\r\n# \t\tprint(\"sensitivity2: {:.4f}\".format(sensitivity2))\r\n# \t\tprint(\"sensitivity3: {:.4f}\".format(sensitivity3))\r\n# \t\tprint(\"sensitivity (avg): {:.4f}\".format(sensitivity))\r\n# =============================================================================\r\n\r\n\t\tplt.figure(k)\r\n\t\tplt.plot(H.history['acc'])\r\n\t\tplt.plot(H.history['val_acc'])\r\n\t\tplt.title('model accuracy')\r\n\t\tplt.ylabel('accuracy')\r\n\t\tplt.xlabel('epoch')\r\n\t\tplt.legend(['train', 'val'], loc='upper left')\r\n\t\t#plt.savefig('noaug_modelHistory#'+str(NUM_EPOCHS)+'_'+ str(name)+'_accuracy.png')\r\n\t\tplt.savefig(str(i)+\"Test-subject-\"+str(ss)+'_img_AllLayer_modelHistory#'+str(NUM_EPOCHS)+'_'+ str(name)+'_accuracy.png')\r\n\r\n\t\tplt.figure(k+1)\r\n\t\tplt.plot(H.history['loss'])\r\n\t\tplt.plot(H.history['val_loss'])\r\n\t\tplt.title('model loss')\r\n\t\tplt.ylabel('loss')\r\n\t\tplt.xlabel('epoch')\r\n\t\tplt.legend(['loss', 'val_loss'], loc='upper left')\r\n\t\t#plt.savefig('noaug_modelHistory#'+str(NUM_EPOCHS)+'_'+ str(name)+'_loss.png')\r\n\t\tplt.savefig(str(i)+\"Test-subject-\"+str(ss)+'_img_AllLayer_modelHistory#'+str(NUM_EPOCHS)+'_'+ str(name)+'_loss.png')\r\n\t\t\r\n\t\t#k = k + 2\r\n\t\t\r\n\ti = i + 1\r\n\r\n\tii = ii + 4\r\n\tdel train_norm, y_train, val_subjects_norm, test_subjects_norm","repo_name":"dazhigulov/BCI-Transfer-Learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12293,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"30347282260","text":"#!/usr/bin/env python\nimport math\n#import fractions\nimport Rat\nimport itertools\n\ndef old_convert_integers_by_ratio(ratio, num_inputs, src_offset=0, dest_offset=0):\n \"\"\"Given an out:in ratio and a number of inputs, give the reassignments\n that will result in the correct number of output frames taken linearly from\n input frames.\n\n You can also specify a src_offset or dest_offset, which are added to the\n source or dest numbers.\n\n >>> list(convert_integers_by_ratio(0.5, 4))\n [(1, 0), (3, 1)]\n >>>\n \"\"\"\n max_taken = -1\n for in_frame in xrange(0, num_inputs):\n out_frame = int(math.floor((in_frame + 1) * ratio)) - 1\n if out_frame > max_taken:\n for copy in xrange(max_taken + 1, out_frame + 1):\n yield (in_frame + src_offset, copy + dest_offset)\n max_taken = out_frame\n\ndef convert_integers_by_ratio(ratio, num_inputs, src_offset=0, dest_offset=0):\n return convert_integers_by_iterator_ratio(ratio,\n xrange(src_offset, src_offset + num_inputs),\n dest_offset=dest_offset)\n\ndef argh(ratio, source, dest_offset=0):\n \"\"\"Given a source of frame numbers and a ratio, gives the reassignments\n that will result in the best assignment of input to output frames.\"\"\"\n\n if len(source) == 0:\n raise StopIteration()\n\n first_frame = source[0]\n\n for in_frame in source:\n rel_in_frame = in_frame - first_frame\n\n first_rel_out_frame = int(math.floor(rel_in_frame * ratio))\n bound_rel_out_frame = int(math.floor((rel_in_frame + 1) * ratio))\n\n # Confusing, argh:\n # Uncomment this to let the program use e.g. even-numbered frames when \n # the ratio is 1/2. Unfortunately, this breaks everything else! See \n # testSkipRatioBad in test_rateconverter.py. You'll also need to use\n # take_last_assignment down in convert_integers_by_iterator_ratio.\n #bound_rel_out_frame = max(\n #int(math.floor((rel_in_frame + 1) * ratio)),\n #first_rel_out_frame + 1)\n\n for rel_out_frame in xrange(first_rel_out_frame, bound_rel_out_frame):\n yield (in_frame, rel_out_frame + dest_offset)\n\ndef convert_integers_by_iterator_ratio(ratio, source, dest_offset=0):\n #return take_last_assignment(argh(ratio, source, dest_offset))\n return argh(ratio, source, dest_offset)\n\ndef take_last_assignment(source):\n first = True\n last = None\n for assn in source:\n if first:\n last = assn\n first = False\n if assn[1] != last[1]:\n yield last\n last = assn\n if last is not None:\n yield last\n\n \n\n\ndef expected_number(ratio, num_inputs):\n return math.floor(ratio * num_inputs)\n\ndef ratio_for_number(num_inputs, num_outputs):\n return Rat.rat(num_outputs, num_inputs)\n\ndef frames_in_range(bounds):\n return bounds[1] + 1 - bounds[0]\n\ndef convert_range_to_range(in_bounds, out_bounds):\n num_outs = frames_in_range(out_bounds)\n num_ins = frames_in_range(in_bounds)\n ratio = ratio_for_number(num_ins, num_outs)\n return convert_integers_by_ratio(ratio, num_ins,\n src_offset=in_bounds[0],\n dest_offset=out_bounds[0])\n\n","repo_name":"rjmoggach/python-moshion","sub_path":"moshion/extractframes/rateconverter.py","file_name":"rateconverter.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"20916282135","text":"filelist=[]\nwith open('train.txt','r') as ta:\n\treadlist=ta.readlines()\n\tfor line in readlist:\n\t\tline=line.strip().split()\n\t\tline=line.pop(0)\n\t\tfilelist.append(line)\nwith open('train1.txt','w+') as ta1:\n\tfor readline in filelist:\n\t\tta1.write(readline+'\\n')\n\n","repo_name":"huguofengcool/python","sub_path":"remove_label.py","file_name":"remove_label.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31330724805","text":"from django.test import TestCase\r\nfrom django.test.client import Client\r\nfrom django.core.management import call_command\r\n\r\nfrom mainapp.models import Card, CardCategory\r\n\r\n\r\nclass TestMainappSmoke(TestCase):\r\n def setUp(self):\r\n call_command('flush', '--noinput')\r\n call_command('loaddata', 'test_db.json')\r\n self.client = Client()\r\n\r\n def test_mainapp_urls(self):\r\n response = self.client.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n\r\n response = self.client.get('/contacts/')\r\n self.assertEqual(response.status_code, 200)\r\n\r\n # response = self.client.get('/card/7/')\r\n # self.assertEqual(response.status_code, 200)\r\n\r\n response = self.client.get('/category/0/1/')\r\n self.assertEqual(response.status_code, 200)\r\n\r\n for category in CardCategory.objects.all():\r\n response = self.client.get(f'/category/{category.pk}/1/')\r\n self.assertEqual(response.status_code, 200)\r\n\r\n for card in Card.objects.all():\r\n response = self.client.get(f'/card/{card.pk}/')\r\n self.assertEqual(response.status_code, 200)\r\n\r\n def tearDown(self):\r\n call_command('sqlsequencereset', 'mainapp', 'authapp', 'ordersapp', 'basketapp')\r\n","repo_name":"Volhen/geekshop","sub_path":"mainapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10180268869","text":"from selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path=\"C:\\\\Users\\\\amit_pc\\\\Documents\\\\Study materials\\\\python\\\\Seleium\\\\chromedriver.exe\")\n\ndriver.get(\"https://the-internet.herokuapp.com/windows\")\n\ndriver.find_element_by_link_text(\"Click Here\").click()\n\nchildWindow = driver.window_handles[1] #get all the window opened in the browser\n\n#(\"ParentID\", \"ChildId\") # driver.window_handles == gets the list of Id of all the windows\n\n# Switch to a new window\ndriver.switch_to.window(childWindow)\n\n# Printing the Text from parent Window\nprint(driver.find_element_by_tag_name(\"h3\").text)\n\ndriver.close()\n\n# Printing the text from child window\ndriver.switch_to.window(driver.window_handles[0])\n\nprint(driver.find_element_by_tag_name(\"h3\").text)\n\nassert \"Opening a new window\" == driver.find_element_by_tag_name(\"h3\").text\n\ndriver.quit()","repo_name":"amits0003/Selenium_Study_Files","sub_path":"SeleniumTest/handle_new_child_window.py","file_name":"handle_new_child_window.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38506479966","text":"from ceramatch.controller.cgraph_nodes.node import Node\n\nfrom ceramatch.utils.fnc_drawing import (render_drawing, get_profile_data)\n\nfrom deposit.utils.fnc_serialize import (try_numeric)\nfrom deposit import DGeometry\n\nfrom PySide2 import (QtWidgets, QtCore, QtGui, QtSvg)\nimport numbers\n\nclass SampleNode(Node):\n\t\n\tdef __init__(self, cgraph, node_id, label, drawing_data, picture = None):\n\t\t\n\t\tNode.__init__(self, cgraph, node_id, label)\n\t\t\n\t\tself._drawing_data = drawing_data\n\t\t\n\t\tif picture is not None:\n\t\t\tself._picture = picture\n\t\telse:\n\t\t\tself._picture = QtGui.QPicture()\n\t\t\tpainter = QtGui.QPainter(self._picture)\n\t\t\trender_drawing(\n\t\t\t\tself._drawing_data, painter, self.cgraph.LINE_WIDTH, \n\t\t\t\tscale = self.cgraph.SCALE_DRAWINGS, color = QtCore.Qt.black,\n\t\t\t)\n\t\t\tpainter.end()\n\t\t\n\t\tself._selection_polygon = QtGui.QPolygonF(\n\t\t\tQtCore.QRectF(self._picture.boundingRect().marginsAdded(\n\t\t\t\tQtCore.QMargins(3, 3, 3, 3)\n\t\t\t))\n\t\t)\n\t\tself._selection_shape = QtGui.QPainterPath()\n\t\tself._selection_shape.addPolygon(self._selection_polygon)\n\t\n\tdef copy(self):\n\t\t\n\t\treturn SampleNode(\n\t\t\tself.cgraph, self.node_id, self.label, \n\t\t\tself._drawing_data, self._picture,\n\t\t)\n\t\n\tdef get_drawing_data(self):\n\t\t\n\t\treturn self._drawing_data.copy()\n\t\n\tdef set_drawing_data(self, data):\n\t\t\n\t\tself._drawing_data = data\n\t\n\tdef get_profile_data(self):\n\t\t# returns (coords, radius)\n\t\t\n\t\tcoords, radius = get_profile_data(self._drawing_data)\n\t\t\n\t\treturn (coords, radius)\n\t\n\tdef boundingRect(self):\n\t\t\n\t\tscale = self.cgraph.get_scale_factor()\n\t\tif scale < self.cgraph.SCALE_CUTOFF:\n\t\t\treturn QtCore.QRectF()\n\t\t\n\t\treturn self._selection_polygon.boundingRect()\n\t\n\tdef center(self):\n\t\t\n\t\treturn self.boundingRect().center()\n\t\n\tdef shape(self):\n\t\t\n\t\treturn self._selection_shape\n\t\n\tdef paint(self, painter, option, widget):\n\t\t\n\t\tscale = self.cgraph.get_scale_factor()\n\t\tif scale < self.cgraph.SCALE_CUTOFF:\n\t\t\treturn\n\t\t\n\t\tselected = False\n\t\tcolor = QtCore.Qt.black\n\t\tif option.state & QtWidgets.QStyle.State_Selected:\n\t\t\tcolor = QtCore.Qt.red\n\t\t\tselected = True\n\t\t\n\t\trect = self.boundingRect()\n\t\tbgcolor = QtGui.QColor(\"white\")\n\t\tbgcolor.setAlphaF(0.8)\n\t\tpainter.setBrush(QtGui.QBrush(bgcolor))\n\t\tpainter.setPen(QtGui.QPen(bgcolor, 1))\n\t\tpainter.drawRect(rect)\n\t\t\n\t\tpainter.setBrush(QtGui.QBrush(color))\n\t\tpainter.setPen(QtGui.QPen(color, 1))\n\t\tpainter.drawPicture(0, 0, self._picture)\n\t\t\n\t\tif selected:\n\t\t\tpainter.setBrush(QtGui.QBrush())\n\t\t\tpainter.drawRect(rect)\n\t\n\tdef update_tooltip(self):\n\t\t\n\t\tbuffer = QtCore.QBuffer()\n\t\tbuffer.open(QtCore.QIODevice.WriteOnly)\n\t\tscale = self.cgraph.SCALE_TOOLTIP / self.cgraph.SCALE_DRAWINGS\n\t\tgen = QtSvg.QSvgGenerator()\n\t\tgen.setOutputDevice(buffer)\n\t\tpainter = QtGui.QPainter(gen)\n\t\tpainter.scale(scale, scale)\n\t\trect = self.boundingRect().marginsAdded(QtCore.QMargins(10, 10, 10, 10))\n\t\trect = QtCore.QRectF(rect.x(), rect.y(), rect.width()*0.8, rect.height())\n\t\tpainter.setPen(QtGui.QPen(QtCore.Qt.white, 0))\n\t\tpainter.setBrush(QtGui.QBrush(QtCore.Qt.white))\n\t\tpainter.drawRect(rect)\n\t\tpainter.setPen(QtGui.QPen(QtCore.Qt.black, 0.5))\n\t\tpainter.drawPicture(0, 0, self._picture)\n\t\tpainter.drawText(rect, QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom, self.label)\n\t\tpainter.end()\n\t\tself._tool_tip = \"\" % (bytes(buffer.data().toBase64()).decode())\n\t\n\tdef on_hover(self, state):\n\t\t# state: True = Enter, False = Leave\n\t\t\n\t\tif self._tool_tip is None:\n\t\t\tself.update_tooltip()\n\t\t\n\t\tself.cgraph.on_hover(self, state)\n\n","repo_name":"demjanp/CeraMatch","sub_path":"src/ceramatch/controller/cgraph_nodes/sample_node.py","file_name":"sample_node.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"7875525286","text":"\"\"\"Codewars: Sort Out The Men From Boys\n7 kyu\n\nURL: https://www.codewars.com/kata/5af15a37de4c7f223e00012d/train/python\n\nScenario\nNow that the competition gets tough it will Sort out the men from the boys.\nMen are the Even numbers and Boys are the odd !\n\nTask\nGiven an array/list [] of n integers , Separate The even numbers from the \nodds , or Separate the men from the boys !alt !alt\n\nNotes\nReturn an array/list where Even numbers come first then odds\nSince , Men are stronger than Boys , Then Even numbers in ascending order \nWhile odds in descending .\nArray/list size is at least *4*** .\nArray/list numbers could be a mixture of positives , negatives .\nHave no fear , It is guaranteed that no Zeroes will exists .\nRepetition of numbers in the array/list could occur , So \n(duplications are not included when separating).\n\nInput >> Output Examples:\nmenFromBoys ({7, 3 , 14 , 17}) ==> return ({14, 17, 7, 3}) \nExplanation:\nSince , { 14 } is the even number here , So it came first , then the odds \nin descending order {17 , 7 , 3} .\n\"\"\"\n\n\ndef men_from_boys(arr):\n arr = sorted(set(arr))\n lst_even = []\n lst_odd = []\n for d in arr:\n if d % 2 == 0:\n lst_even.append(d)\n else:\n lst_odd.append(d)\n return lst_even + sorted(lst_odd, reverse = True)\n return lst_even + lst_odd[::-1]\n\n\ndef men_from_boys(arr):\n arr = sorted(set(arr))\n arr_r = sorted(arr, reverse = True)\n lst = []\n for d in arr:\n if d % 2 == 0:\n lst.append(d)\n for d_r in arr_r:\n if d_r not in lst:\n lst.append(d_r)\n return lst\n\n\ndef main():\n # Output: [-28,2,76,88,63,-57,-85]\n # arr = [63,-57,76,-85,88,2,-28]\n # print(men_from_boys(arr))\n\n assert men_from_boys([7,3,14,17]) == [14,17,7,3]\n assert men_from_boys([2,43,95,90,37]) == [2,90,95,43,37]\n assert men_from_boys([20,33,50,34,43,46]) == [20,34,46,50,43,33]\n assert men_from_boys([82,91,72,76,76,100,85]) == [72,76,82,100,91,85]\n assert men_from_boys([2,15,17,15,2,10,10,17,1,1]) == [2,10,17,15,1]\n assert men_from_boys([-32,-39,-35,-41]) == [-32,-35,-39,-41]\n assert men_from_boys([-64,-71,-63,-66,-65]) == [-66,-64,-63,-65,-71]\n assert men_from_boys([-94,-99,-100,-99,-96,-99]) == [-100,-96,-94,-99]\n assert men_from_boys([-53,-26,-53,-27,-49,-51,-14]) == [-26,-14,-27,-49,-51,-53]\n assert men_from_boys([-17,-45,-15,-33,-85,-56,-86,-30]) == [-86,-56,-30,-15,-17,-33,-45,-85]\n assert men_from_boys([12,89,-38,-78]) == [-78,-38,12,89]\n assert men_from_boys([2,-43,95,-90,37]) == [-90,2,95,37,-43]\n assert men_from_boys([82,-61,-87,-12,21,1]) == [-12,82,21,1,-61,-87]\n assert men_from_boys([63,-57,76,-85,88,2,-28]) == [-28,2,76,88,63,-57,-85]\n assert men_from_boys([49,818,-282,900,928,281,-282,-1]) == [-282,818,900,928,281,49,-1]\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"yiyinghsieh/python-algorithms-data-structures","sub_path":"cw_sort_out_the_men_from_boys.py","file_name":"cw_sort_out_the_men_from_boys.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4663159572","text":"import os\nimport mod.extract_values as extract\nimport mod.details as det\nimport mod.manage_data as mani\nfrom mod.figparams import *\nimport numpy as np\n\n\ndef process(info, fov, align_to_step, cycles_start_on_step):\n folder, condition, date, flowcell = info\n details = det.import_experimental_details(folder)\n data = {}\n for line in fov:\n area, R, del_beads = line\n print(area, end=' ')\n # ---\n path = os.path.join(folder, date,'data_corrected', f'results_{R}_PAR_corr.dat')\n data[area] = mani.align_and_classify_data(path, del_beads, align_to_step, cycles_start_on_step)\n # ---\n details = det.save_experimental_details(info, data, line, details)\n # ---\n print('done.')\n print('Finished processing.')\n return data, details\n\n\ndef get_bin_central_points(bins_edges):\n bins_center = []\n idx_max = len(bins_edges[:-1])\n for i in range(0, idx_max):\n mean = (bins_edges[i + 1] + bins_edges[i]) / 2\n bins_center += [mean]\n return bins_center\n\n\ndef find_coord_hist_maxima(bins_center, counts):\n y_peak = counts.max()\n x_peak = bins_center[counts==y_peak][0]\n return x_peak, y_peak\n\n\n\"\"\"\n\ndef get_contribution_percentage(all_counts, bw): # NOT BEING USED \n counts, counts_extended, counts_bridged = all_counts\n area=sum(counts*bw)\n distribution_all = int(area*100)\n distribution_extended = int(sum(counts_extended*bw)*100)\n distribution_bridged = int(sum(counts_bridged*bw)*100)\n\"\"\"","repo_name":"Moreno-HerreroLab/MTDataProcessing","sub_path":"DeBragança_CellReports_2023/mod/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1612847519","text":"# Built-in modules\nimport asyncio\nimport io\nfrom datetime import datetime, timedelta\nfrom functools import partial\nfrom typing import Union\n\n# External modules\nimport discord\nimport pytz\nimport youtube_dl\nfrom discord.ext import commands\nfrom tenacity import retry, retry_if_exception_type\n\nTZ = pytz.timezone(\"Europe/Paris\")\n\n# Suppress noise about console usage from errors\nyoutube_dl.utils.bug_reports_message = lambda: \"\"\nytdl_format_options = {\n \"format\": \"bestaudio/best\",\n \"outtmpl\": \"%(extractor)s-%(id)s-%(title)s.%(ext)s\",\n \"restrictfilenames\": True,\n \"noplaylist\": True,\n \"nocheckcertificate\": True,\n \"ignoreerrors\": False,\n \"logtostderr\": False,\n \"quiet\": True,\n \"no_warnings\": True,\n \"default_search\": \"auto\",\n \"retries\": 10,\n \"source_address\": \"0.0.0.0\", # nosec\n}\nffmpeg_options = {\n \"executable\": \"/usr/bin/ffmpeg\",\n \"options\": \"-vn\",\n}\nytdl = youtube_dl.YoutubeDL(ytdl_format_options)\n\n\nclass YTDLSource(discord.PCMVolumeTransformer):\n \"\"\"Create a discord.PCMVolumeTransformer using youtube_dl.\"\"\"\n\n def __init__(self, source: Union[str, io.BufferedIOBase], data: dict, requester: str):\n super().__init__(source)\n self.webpage_url: str = data.get(\"webpage_url\", \"\")\n self.requester: str = requester\n self.title: str = data.get(\"title\", \"\")\n self.duration: str = str(timedelta(seconds=data.get(\"duration\", 0)))\n self.duration_sec: int = int(data.get(\"duration\", 0))\n self.thumbnail: str = data.get(\"thumbnail\", \"\")\n # YTDL info dicts (data) have other useful information you might want\n # https://github.com/rg3/youtube-dl/blob/master/README.md\n\n def __getitem__(self, item: str):\n \"\"\"Allows us to access attributes similar to a dict.\n\n This is only useful when you are NOT downloading.\n \"\"\"\n return self.__getattribute__(item)\n\n @classmethod\n async def create_source(cls, ctx: commands.Context, url: str, loop: asyncio.AbstractEventLoop) -> dict:\n \"\"\"Add `search` url to queue.\"\"\"\n loop = loop or asyncio.get_event_loop()\n to_run = partial(ytdl.extract_info, url=url, download=False)\n data = await loop.run_in_executor(None, to_run)\n if \"entries\" in data:\n # take first item from a playlist\n data = data[\"entries\"][0]\n title = data.get(\"title\", \"no_title\")\n _url = data.get(\"webpage_url\", \"no_url\")\n duration = str(timedelta(seconds=data.get(\"duration\", 0)))\n duration_sec: int = int(data.get(\"duration\", 0))\n thumbnail = data.get(\"thumbnail\", \"no_thumbnail\")\n embed = discord.Embed(\n title=f\"Music added by {ctx.author}\",\n description=f\"[{title}]({_url}) - {duration}\",\n color=discord.Color.blue(),\n )\n embed.set_thumbnail(url=thumbnail)\n embed.timestamp = datetime.now(tz=TZ)\n await ctx.send(embed=embed)\n return {\n \"url\": _url,\n \"requester\": ctx.author,\n \"title\": title,\n \"duration\": duration,\n \"duration_sec\": duration_sec,\n \"thumbnail\": thumbnail,\n }\n\n @classmethod\n @retry(retry=retry_if_exception_type(Exception))\n async def regather_stream(cls, data: dict, loop: asyncio.AbstractEventLoop) -> \"YTDLSource\":\n \"\"\"Used for preparing a stream.\n\n Since Youtube Streaming links expire.\n \"\"\"\n loop = loop or asyncio.get_event_loop()\n requester = data.get(\"requester\", \"no_requester\")\n to_run = partial(ytdl.extract_info, url=data.get(\"url\", \"no_url\"), download=False)\n data = await loop.run_in_executor(None, to_run)\n return cls(\n source=discord.FFmpegPCMAudio(data.get(\"url\", \"no_url\")),\n data=data,\n requester=requester,\n )\n","repo_name":"razy69/rbot","sub_path":"rbot/utils/yt_player.py","file_name":"yt_player.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70030565316","text":"\"\"\" \nLicensed under GNU GPL-3.0-or-later\n\nThis file is part of RS Companion.\n\nRS Companion is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nRS Companion is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with RS Companion. If not, see .\n\nAuthor: Phillip Riskin\nAuthor: Nathan Rogers\nDate: 2020\nProject: Companion App\nCompany: Red Scientific\nhttps://redscientific.com/index.html\n\"\"\"\n\nfrom RSCompanionAsync.Model.app_defs import LangEnum, app_name, company_name\nfrom enum import Enum, auto\n\n\nclass StringsEnum(Enum):\n TITLE = auto()\n CLOSE_TITLE = auto()\n CLOSE_APP_CONFIRM = auto()\n SF_TIMES = auto()\n HDR = auto()\n CREATE = auto()\n END = auto()\n START = auto()\n STOP = auto()\n\n\nenglish = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"Close \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"Close app? Any unsaved progress will be lost!\",\n StringsEnum.SF_TIMES: \"times_\",\n StringsEnum.HDR: \"timestamp, event, condition name, block #\",\n StringsEnum.CREATE: \"created\",\n StringsEnum.END: \"ended\",\n StringsEnum.START: \"started\",\n StringsEnum.STOP: \"stopped\",\n }\n\n# TODO: Verify translations\n# Dutch strings\ndutch = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"Dicht \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"App sluiten? Alle niet-opgeslagen voortgang gaat verloren!\",\n StringsEnum.SF_TIMES: \"keer_\",\n StringsEnum.HDR: \"tijdstempel, evenement\",\n StringsEnum.CREATE: \"creëer\",\n StringsEnum.END: \"einde\",\n StringsEnum.START: \"begin\",\n StringsEnum.STOP: \"hou op\",\n }\n\n# French strings\nfrench = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"Fermer \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"Fermer l'application? Tout progrès non enregistré sera perdu!\",\n StringsEnum.SF_TIMES: \"fois_\",\n StringsEnum.HDR: \"horodatage, événement\",\n StringsEnum.CREATE: \"créer\",\n StringsEnum.END: \"fin\",\n StringsEnum.START: \"début\",\n StringsEnum.STOP: \"arrêtez\",\n }\n\n# German strings\ngerman = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"Schließen \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"App schließen? Jeder nicht gespeicherte Fortschritt geht verloren!\",\n StringsEnum.SF_TIMES: \"mal_\",\n StringsEnum.HDR: \"Zeitstempel, Veranstaltung\",\n StringsEnum.CREATE: \"erstellen\",\n StringsEnum.END: \"ende\",\n StringsEnum.START: \"anfang\",\n StringsEnum.STOP: \"halt\",\n }\n\n# Russian strings\nrussian = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"закрывать \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"Закрыть приложение? Любой несохраненный прогресс будет потерян!\",\n StringsEnum.SF_TIMES: \"раз_\",\n StringsEnum.HDR: \"отметка времени, меропри��тие\",\n StringsEnum.CREATE: \"Создайте\",\n StringsEnum.END: \"Конец\",\n StringsEnum.START: \"Начало\",\n StringsEnum.STOP: \"стоп\",\n }\n\n# Spanish strings\nspanish = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"Cerrar \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"¿Cerrar app? ¡Cualquier progreso no guardado se perderá!\",\n StringsEnum.SF_TIMES: \"veces_\",\n StringsEnum.HDR: \"marca de tiempo, evento\",\n StringsEnum.CREATE: \"crear\",\n StringsEnum.END: \"final\",\n StringsEnum.START: \"comienzo\",\n StringsEnum.STOP: \"detener\",\n }\n\n# Chinese (simplified) strings\nchinese = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"关闭 \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"关闭应用程式? 任何未保存的进度将丢失!\",\n StringsEnum.SF_TIMES: \"次_\",\n StringsEnum.HDR: \"时间戳记, 事件\",\n StringsEnum.CREATE: \"创建\",\n StringsEnum.END: \"结束\",\n StringsEnum.START: \"开始\",\n StringsEnum.STOP: \"停\",\n }\n\n# Japanese strings\njapanese = {StringsEnum.TITLE: app_name,\n StringsEnum.CLOSE_TITLE: \"閉じる \" + company_name,\n StringsEnum.CLOSE_APP_CONFIRM: \"アプリを閉じますか? 保存されていない進行状況は失われます!\",\n StringsEnum.SF_TIMES: \"回_\",\n StringsEnum.HDR: \"タイムスタンプ, 出来事\",\n StringsEnum.CREATE: \"作成する\",\n StringsEnum.END: \"終わり\",\n StringsEnum.START: \"開始\",\n StringsEnum.STOP: \"やめる\",\n }\n\nstrings = {LangEnum.ENG: english,\n LangEnum.DUT: dutch,\n LangEnum.FRE: french,\n LangEnum.GER: german,\n LangEnum.RUS: russian,\n LangEnum.SPA: spanish,\n LangEnum.CHI: chinese,\n LangEnum.JPN: japanese}\n","repo_name":"USnark772/RSCompanionV3","sub_path":"RSCompanionAsync/Resources/Strings/main_window_strings.py","file_name":"main_window_strings.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7527998873","text":"from .models import Article\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\n# Articulos\ndef any_article_id(article_name):\n\ttry:\n\t\tarticle = Article.objects.filter(name=article_name).first()\n\texcept ObjectDoesNotExist:\n\t\tarticle = None\n\treturn article.id if article is not None else None\n\n\ndef get_article_by_id(_id):\n\ttry:\n\t\tarticle = Article.objects.get(id=int(_id))\n\texcept ObjectDoesNotExist:\n\t\tarticle = None\n\n\treturn article\n","repo_name":"DCC-CC4401/2018-1-0xA-T3","sub_path":"tarea3_isw/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71109977153","text":"from AthenaConfiguration.AccumulatorCache import AccumulatorCache\nfrom TriggerMenuMT.HLT.Config.MenuComponents import MenuSequenceCA, SelectionCA, InEventRecoCA\nfrom AthenaConfiguration.ComponentFactory import CompFactory\nfrom TrigEDMConfig.TriggerEDMRun3 import recordable\nfrom TrigMuonHypo.TrigMuonHypoConfig import TrigMuonEFMSonlyHypoToolFromDict\nfrom TrigMuonHypo.TrigMuonHypoMonitoring import TrigMuonTLAHypoMonitoring\nfrom .MuonRecoSequences import muonNames\n\ndef getMuonCollections (chainPart):\n muNames = muonNames().getNames('RoI')\n muonName = muNames.EFCBName\n if 'msonly' in chainPart['msonlyInfo']:\n muonName = muNames.EFSAName\n\n return muonName\n\n@AccumulatorCache\ndef MuonTLASequenceCfg(flags, muons):\n \n ## add the InputMaker (event context) \n tlaMuonInputMakerAlg = CompFactory.InputMakerForRoI(\"IMTLAMuons\"+muons)\n tlaMuonInputMakerAlg.mergeUsingFeature = True\n tlaMuonInputMakerAlg.RoITool = CompFactory.ViewCreatorPreviousROITool()\n recoAcc = InEventRecoCA(\"MuonTLARecoSeq_\"+ muons,inputMaker=tlaMuonInputMakerAlg)\n \n sequenceOut = recordable(muons+\"_TLA\")\n # add the hypo\n hypo = CompFactory.TrigMuonTLAHypoAlg(\"TrigMuonTLAHypoAlg_\"+muons) \n hypo.TLAOutputName = sequenceOut \n hypo.MonTool = TrigMuonTLAHypoMonitoring(flags, \"TrigMuonTLAHypoAlg/\")\n\n selAcc = SelectionCA(\"TrigMuonTLAMainSeq_\"+muons)\n selAcc.mergeReco(recoAcc)\n selAcc.addHypoAlgo(hypo)\n return selAcc\n\ndef MuonTLAMenuSequenceCfg( flags, muChainPart):\n muonsIn = getMuonCollections(muChainPart) \n selAcc=MuonTLASequenceCfg(flags, muons=muonsIn)\n\n return MenuSequenceCA( flags,\n selAcc,\n HypoToolGen = TrigMuonEFMSonlyHypoToolFromDict\n )\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Trigger/TriggerCommon/TriggerMenuMT/python/HLT/Muon/MuonTLASequenceConfig.py","file_name":"MuonTLASequenceConfig.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72963744194","text":"\nimport pendulum\nfrom airflow import DAG\nfrom airflow.operators.bash import BashOperator\nfrom airflow.operators.trigger_dagrun import TriggerDagRunOperator\n\nwith DAG(\n dag_id=\"15_trigger_dag_run_operator\",\n description=\"triggered by 03_python_operator\",\n schedule=None,\n start_date=pendulum.datetime(2023, 8, 1, tz=\"Asia/Seoul\"),\n catchup=False,\n tags=[\"trigger\"],\n) as dag:\n \n task_start = BashOperator(\n task_id=\"task_start\",\n bash_command='echo \"START\"'\n )\n \n task_trigger_dag_run = TriggerDagRunOperator(\n task_id=\"task_trigger_dag_run\",\n trigger_dag_id=\"03_python_operator\",\n trigger_run_id=None,\n execution_date=\"{{ data_interval_start }}\",\n reset_dag_run=True,\n wait_for_completion=False,\n poke_interval=60,\n allowed_states=[\"success\"],\n failed_states=None,\n )\n \n task_start >> task_trigger_dag_run","repo_name":"kida0/airflow-master","sub_path":"utils/dags/15_trigger_dag_run_operator.py","file_name":"15_trigger_dag_run_operator.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43381731058","text":"# 461. Hamming Distance\n\nclass Solution:\n def hammingDistance(self, x: int, y: int) -> int:\n n = int(bin(x ^ y), 2)\n i = 0\n while n != 0:\n n &= (n - 1)\n i += 1\n\n return i","repo_name":"yunnyang/Practice_Code","sub_path":"Easy/leetCode461.py","file_name":"leetCode461.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24552266368","text":"import os\nimport sys\nfrom glob import glob\nimport logging\n\nimport numpy as np\nimport pandas as pd\n\nfrom src.evaluation import evaluate_timestep_melody, evaluate_duration_melody, SEQUENCE_LENGTHS, EVALUATION_METRICS\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogFormatter = logging.Formatter('%(levelname)7s - %(message)s')\n\nconsoleHandler = logging.StreamHandler(sys.stdout)\nconsoleHandler.setFormatter(logFormatter)\nlogger.addHandler(consoleHandler)\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nsrc_path = os.path.join(dir_path, '..', '..')\n\n\nif __name__ == \"__main__\":\n metrics = {}\n encoding_type = 'duration'\n\n folder = os.path.join(src_path, 'data', 'encoded', encoding_type, 'mono')\n filepaths = [y for x in os.walk(folder) for y in glob(os.path.join(x[0], '*.csv'))]\n\n corpus_sequences = None\n\n # logger.info('Calculate corpus sequences')\n do_duration = False\n # corpus_sequences = {}\n # for l in SEQUENCE_LENGTHS:\n # corpus_sequences[l] = {}\n # for filepath in filepaths:\n # df = pd.read_csv(filepath, index_col=0)\n # df = df[df['type'] == 'improvised']\n # df = df.dropna()\n #\n # for i in range(df.shape[0] - l + 1):\n # pitch = tuple(df.iloc[i:i + l, 2].astype(int).values)\n #\n # if do_duration:\n # duration = tuple(df.iloc[i:i + l, 3].astype(int).values)\n #\n # seq = (pitch, duration)\n # else:\n # seq = pitch\n #\n # corpus_sequences[l][seq] = True\n\n logger.info('Calculate metrics')\n for filepath in filepaths:\n if encoding_type == 'timestep':\n metrics[filepath] = evaluate_timestep_melody(filepath, corpus_sequences)\n elif encoding_type == 'duration':\n metrics[filepath] = evaluate_duration_melody(filepath, corpus_sequences)\n\n metrics_df = pd.DataFrame().from_dict(metrics).T\n metrics_df['HC-m'] = metrics_df['HC'].apply(np.mean)\n\n for metric in EVALUATION_METRICS:\n if metric in metrics_df.columns:\n logger.info(f'{metric} - {metrics_df[metric].mean():5.3f} - {metrics_df[metric].std():5.3f}')\n else:\n logger.error(f'{metric} has not been calculated')\n\n metrics_df.to_csv(os.path.join(folder, '..', 'metrics.csv'))\n","repo_name":"HikariNoMJ14/cool-cat-ai","sub_path":"src/scripts/evaluate_corpus.py","file_name":"evaluate_corpus.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21407314868","text":"\"\"\"Tools to perform combinatorial rationalization in Fairseq.\"\"\"\nimport math\nimport torch\nfrom fairseq import utils\n\nimport pdb\n\n\ndef decode_single_word(model, word_index, source=False):\n if source:\n dictionary = model.task.source_dictionary\n else:\n dictionary = model.task.target_dictionary\n if model.bpe is None:\n word_str = dictionary.symbols[word_index]\n if word_index == 2:\n return \"\"\n elif word_index < 2 or word_index == 3:\n return \"\"\n else:\n return word_str\n return model.bpe.decode(dictionary.string([word_index]))\n\n\ndef decode_sequence(model, tokens, source=False):\n return [decode_single_word(model, token, source) for token in tokens]\n\n\n@torch.no_grad()\ndef rationalize_occupation_model(model,\n input_ids,\n years,\n educations=None,\n ethnicities=None,\n genders=None,\n locations=None,\n verbose=False,\n max_steps=1024,\n start_step=0,\n max_tokens_per_batch=4096):\n all_rationales = []\n log = {}\n num_tokens = len(input_ids)\n\n input_text = decode_sequence(model, input_ids, source=False)\n input_years_text = model.task._year_dictionary.string(years).split(\" \")\n log['input_ids'] = list(input_ids.cpu().numpy())\n log['input_text'] = input_text\n log['input_years'] = input_years_text\n log['rationalization'] = []\n\n if verbose:\n print(\"All tokens: {}\".format(input_text))\n print(\"All years: {}\".format(input_years_text))\n \n all_positions = utils.make_positions(\n input_ids[None], model.model.decoder.padding_idx)\n \n # Perform greedy rationalization for each token in the sequence, starting\n # from `start_step`.\n for prev_token in range(start_step, num_tokens - 1):\n goal_word_text = input_text[prev_token + 1]\n token_log = {}\n token_log['target_position'] = prev_token + 1\n token_log['goal_job'] = goal_word_text\n token_log['log'] = []\n \n # Initialize the rationale. The rationale must always include the most\n # recent token.\n rationale = [prev_token]\n\n if verbose:\n print(\"Currently rationalizing token {}: '{}'\".format(\n prev_token + 1, goal_word_text))\n \n for rationale_size in range(1, min(max_steps + 1, prev_token + 2)):\n if rationale_size == 1:\n # A rationale of size 1 can only include the most recent target token.\n decoder_out = model.model.decoder(\n prev_output_tokens=input_ids[prev_token:(prev_token + 1)][None],\n years=years[prev_token:(prev_token + 1)][None],\n educations=educations[prev_token:(prev_token + 1)][None],\n ethnicities=ethnicities[None],\n genders=genders[None],\n locations=locations[None],\n position_ids=all_positions[:, prev_token:(prev_token + 1)])\n best_probs = model.model.get_normalized_probs(\n decoder_out, log_probs=True, \n two_stage=model.model.decoder.args.two_stage, \n prev_tokens=input_ids[prev_token:(prev_token + 1)][None])[0, -1].exp()\n added_token_text = input_text[prev_token]\n added_token_position = prev_token\n added_year_text = input_years_text[prev_token]\n if verbose:\n added_token_string = (\"Adding previous token to sequence: \"\n \"'{}' (year: {})\".format(added_token_text,\n added_year_text))\n else:\n # Consider the current rationale + each target token\n candidates = [sorted(rationale + [x]) for x in range(prev_token + 1) \n if x not in rationale]\n candidate_input_ids = input_ids[[candidates]]\n candidate_years = years[[candidates]]\n candidate_educations = educations[[candidates]]\n candidate_ethnicities = ethnicities[None].repeat([len(candidates), 1])\n candidate_genders = genders[None].repeat([len(candidates), 1])\n candidate_locations = locations[None].repeat([len(candidates), 1])\n candidate_position_ids = all_positions[0, candidates]\n\n # Divide the candidates into batches, since all possible subsets may\n # not fit in memory if we pass them to the model at once.\n num_candidates, seq_len = candidate_input_ids.shape\n batch_size = math.floor(max_tokens_per_batch / seq_len)\n num_batches = math.ceil(num_candidates / batch_size)\n best_prob = -float(\"inf\")\n for batch_ind in range(num_batches):\n batch_start_ind = batch_ind * batch_size\n batch_end_ind = (batch_ind + 1) * batch_size\n batch_input_ids = candidate_input_ids[batch_start_ind:batch_end_ind]\n batch_years = candidate_years[batch_start_ind:batch_end_ind]\n batch_educations = candidate_educations[\n batch_start_ind:batch_end_ind]\n batch_ethnicities = candidate_ethnicities[\n batch_start_ind:batch_end_ind]\n batch_genders = candidate_genders[batch_start_ind:batch_end_ind]\n batch_locations = candidate_locations[batch_start_ind:batch_end_ind]\n batch_position_ids = candidate_position_ids[\n batch_start_ind:batch_end_ind]\n batch_decoder_out = model.model.decoder(\n batch_input_ids,\n years=batch_years,\n educations=batch_educations,\n ethnicities=batch_ethnicities,\n genders=batch_genders,\n locations=batch_locations,\n position_ids=batch_position_ids)\n batch_probs = model.model.get_normalized_probs(\n batch_decoder_out, log_probs=True,\n two_stage=model.model.decoder.args.two_stage,\n prev_tokens=batch_input_ids).exp()[:, -1]\n true_token_probs = batch_probs[:, input_ids[prev_token + 1]]\n if batch_probs.max() > best_prob:\n best_prob = true_token_probs.max()\n best_token = true_token_probs.argmax() + batch_start_ind\n best_probs = batch_probs[true_token_probs.argmax()]\n \n best_token_position = set(candidates[best_token]) - set(rationale)\n best_token_position = best_token_position.pop()\n rationale.append(best_token_position)\n added_token = input_text[best_token_position]\n added_year_text = input_years_text[best_token_position]\n added_token_string = \"Adding token: '{}' (year: {})\".format(\n added_token, added_year_text)\n added_token_text = input_text[best_token_position]\n added_token_position = best_token_position\n \n predicted_word_id = best_probs.argmax().item()\n predicted_word_prob = best_probs.max().item()\n predicted_word_text = decode_single_word(\n model, predicted_word_id, source=False)\n top_2_word_id = best_probs.topk(2).indices[1].item()\n top_2_word_prob = best_probs.topk(2).values[1].item()\n top_2_word_text = decode_single_word(model, top_2_word_id, source=False)\n\n true_token_prob = best_probs[input_ids[prev_token + 1]].item()\n token_log['log'].append({\n \"rationale_size\": rationale_size,\n \"added_token_position\": added_token_position,\n \"added_token_text\": added_token_text,\n \"added_year_text\": added_year_text,\n \"prediction\": predicted_word_text,\n \"prediction_prob\": predicted_word_prob,\n \"top_2_prediction\": top_2_word_text,\n \"top_2_prediction_prob\": top_2_word_prob,\n \"true_token_prob\": true_token_prob,\n })\n if verbose:\n print(\"{}. This makes the top predicted job: '{}', and the second \"\n \"most-likely job: '{}'. P('{}') = {:.3f}\".format(\n added_token_string, predicted_word_text, top_2_word_text,\n goal_word_text, true_token_prob))\n # Our combinatorial optimization is complete when the predicted token is\n # the true token or the second-most-likely token.\n if (torch.argmax(best_probs) == input_ids[prev_token + 1] or \n top_2_word_id == input_ids[prev_token + 1]):\n if verbose:\n print(\"When predicting: '{}'\".format(goal_word_text))\n print(\" The rationale is: {}\".format(\n ', '.join([input_text[x] + \" [\" + input_years_text[x] + \"]\" \n for x in sorted(rationale)])))\n print(\"Finished with {} tokens.\".format(rationale_size))\n print(\"..........\")\n break\n # When we've finished rationalizing, add the rationale to the complete \n # rationale list.\n all_rationales.append(rationale)\n token_log['rationale'] = rationale\n reached_argmax = (predicted_word_id == input_ids[prev_token + 1] or \n top_2_word_id == input_ids[prev_token + 1])\n token_log['reached_argmax'] = reached_argmax.item()\n log['rationalization'].append(token_log)\n \n log['all_rationales'] = all_rationales\n return all_rationales, log\n","repo_name":"keyonvafa/career-code","sub_path":"fairseq/rationalization.py","file_name":"rationalization.py","file_ext":"py","file_size_in_byte":9040,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"29919211858","text":"import sys\nimport numpy as np, numpy.random as nr\nfrom six import StringIO, b\nfrom util import softmax_prob\nfrom gym import utils\nimport discrete_env\n\n \nLEFT = 0\nDOWN = 1\nRIGHT = 2\nUP = 3\n\nMAPS = {\n \"4x4\": [\n \"SFFF\",\n \"FHFH\",\n \"FFFH\",\n \"HFFG\"\n ],\n \"8x8\": [\n \"SFFFFFFF\",\n \"FFFFFFFF\",\n \"FFFHFFFF\",\n \"FFFFFHFF\",\n \"FFFHFFFF\",\n \"FHHFFFHF\",\n \"FHFFHFHF\",\n \"FFFHFFFG\"\n ],\n}\n\nclass FrozenLakeEnv(discrete_env.DiscreteEnv):\n \"\"\"\n Winter is here. You and your friends were tossing around a frisbee at the park\n when you made a wild throw that left the frisbee out in the middle of the lake.\n The water is mostly frozen, but there are a few holes where the ice has melted.\n If you step into one of those holes, you'll fall into the freezing water.\n At this time, there's an international frisbee shortage, so it's absolutely imperative that\n you navigate across the lake and retrieve the disc.\n However, the ice is slippery, so you won't always move in the direction you intend.\n The surface is described using a grid like the following\n\n SFFF\n FHFH\n FFFH\n HFFG\n\n S : starting point, safe\n F : frozen surface, safe\n H : hole, fall to your doom\n G : goal, where the frisbee is located\n\n The episode ends when you reach the goal or fall in a hole.\n You receive a reward of 1 if you reach the goal, and zero otherwise.\n\n \"\"\"\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def __init__(self, desc=None, map_name=\"4x4\",is_slippery=True):\n if desc is None and map_name is None:\n raise ValueError('Must provide either desc or map_name')\n elif desc is None:\n desc = MAPS[map_name]\n self.desc = desc = np.asarray(desc,dtype='c')\n self.nrow, self.ncol = nrow, ncol = desc.shape\n\n nA = 4\n nS = nrow * ncol\n\n isd = np.array(desc == b'S').astype('float64').ravel()\n isd /= isd.sum()\n\n P = {s : {a : [] for a in range(nA)} for s in range(nS)}\n\n def to_s(row, col):\n return row*ncol + col\n def inc(row, col, a):\n if a==0: # left\n col = max(col-1,0)\n elif a==1: # down\n row = min(row+1,nrow-1)\n elif a==2: # right\n col = min(col+1,ncol-1)\n elif a==3: # up\n row = max(row-1,0)\n return (row, col)\n\n for row in range(nrow):\n for col in range(ncol):\n s = to_s(row, col)\n for a in range(4):\n li = P[s][a]\n letter = desc[row, col]\n if letter in b'GH':\n li.append((1.0, s, 0, True))\n else:\n if is_slippery:\n for b in [(a-1)%4, a, (a+1)%4]:\n newrow, newcol = inc(row, col, b)\n newstate = to_s(newrow, newcol)\n newletter = desc[newrow, newcol]\n done = bytes(newletter) in b'GH'\n rew = float(newletter == b'G')\n li.append((0.8 if b==a else 0.1, newstate, rew, done))\n else:\n newrow, newcol = inc(row, col, a)\n newstate = to_s(newrow, newcol)\n newletter = desc[newrow, newcol]\n done = bytes(newletter) in b'GH'\n rew = float(newletter == b'G')\n li.append((1.0, newstate, rew, done))\n\n super(FrozenLakeEnv, self).__init__(nS, nA, P, isd)\n\n def _render(self, mode='human', close=False):\n if close:\n return\n outfile = StringIO() if mode == 'ansi' else sys.stdout\n\n row, col = self.s // self.ncol, self.s % self.ncol\n desc = self.desc.tolist()\n desc = [[c.decode('utf-8') for c in line] for line in desc]\n desc[row][col] = utils.colorize(desc[row][col], \"red\", highlight=True)\n if self.lastaction is not None:\n outfile.write(\" ({})\\n\".format([\"Left\",\"Down\",\"Right\",\"Up\"][self.lastaction]))\n else:\n outfile.write(\"\\n\")\n outfile.write(\"\\n\".join(''.join(line) for line in desc)+\"\\n\")\n\n return outfile\n\n def demonstrate(self):\n print(\" Let's look at a random episode...\")\n self.reset()\n for t in range(100):\n self.render()\n a = self.action_space.sample()\n ob, rew, done, _ = self.step(a)\n if done:\n break\n assert done\n self.render()\n\n print(\" In the episode above, the agent falls into a hole after two timesteps.\")\n print(\" Also note the stochasticity -- on the first step, the DOWN action is\")\n print(\" selected, but the agent moves to the right.\")\n print(\"\\n\")\n \n print(\"Let us look at the transition model of the Frozen Lake Problem now.\\n\")\n print(\"env.P is a two-level dict where the first key is the state and the second key is the action.\")\n print(\"The 2D grid cells are associated with indices [0, 1, 2, ..., 15] from left to right and top to down, as in\")\n print(np.arange(16).reshape(4,4))\n print(\"env.P[state][action] is a list of tuples (probability, nextstate, reward).\\n\")\n print(\"For example, state 0 is the initial state, and the transition information for s=0, a=0 is \\nP[0][0] =\", self.P[0][0], \"\\n\")\n print(\"As another example, state 5 corresponds to a hole in the ice, which transitions to itself with probability 1 and reward 0.\")\n print(\"P[5][0] =\", self.P[5][0], '\\n')\n print(\"\\n\")\n\n\nclass Policy(object):\n def step(self, o):\n \"\"\"\n Return dict including\n\n required: \n a : actions\n optional:\n pa : specifies probability distribution that 'a' was sampled from\n [whatever else your learning algorithm will need]\n \"\"\"\n raise NotImplementedError\n \nclass RandomDiscreteActionChooser(Policy):\n def __init__(self, n_actions):\n self.n_actions = n_actions\n def step(self, observation):\n return {\"action\":np.array([nr.randint(0, self.n_actions)])}\n \nclass FrozenLakeTabularPolicy(Policy):\n def __init__(self, n_states):\n self.n_states = n_states\n self.n_actions = n_actions = 4 \n self.f_sa = np.zeros((n_states, n_actions))\n\n def step(self, s_n):\n f_na = self.f_sa[s_n]\n prob_nk = softmax_prob(f_na)\n acts_n = discrete_env.cat_sample(prob_nk)\n return {\"action\": acts_n,\n \"pdist\" : f_na}\n\n def compute_pdists(self, s_n):\n return self.f_sa[s_n]\n\n def compute_entropy(self, f_na):\n prob_nk = softmax_prob(f_na)\n return discrete_env.cat_entropy(prob_nk)\n\n def compute_kl(self, f0_na, f1_na):\n p0_na = softmax_prob(f0_na)\n p1_na = softmax_prob(f1_na)\n return discrete_env.cat_kl(p0_na, p1_na)\n \n\ndef rollout(env, policy, max_pathlength):\n \"\"\"\n Simulate the env and policy for max_pathlength steps\n \"\"\"\n ob = env.reset()\n ob = np.array([ob])\n terminated = False\n\n obs = []\n actions = []\n rewards = []\n pdists = []\n for _ in range(max_pathlength):\n obs.append(ob)\n pol_out = policy.step(ob)\n action = pol_out[\"action\"] \n actions.append(action)\n pdists.append(pol_out.get(\"pdist\",[None]))\n\n ob, rew, done, _ = env.step(action[0])\n ob = np.array([ob])\n rewards.append(rew)\n if done:\n terminated = True\n break\n return {\"observations\" : np.concatenate(obs), \"pdists\" : np.concatenate(pdists), \n \"terminated\" : terminated, \"rewards\" : np.array(rewards), \"actions\" : np.concatenate(actions)}\n\ndef animate_rollout(env, policy, horizon=100, delay=0.05):\n \"\"\"\n Do rollouts and plot at each timestep\n delay : time to sleep at each step\n \"\"\"\n import time\n obs = env.reset()\n env.render()\n for i in range(horizon):\n a = policy.step(np.array([obs]))[\"action\"]\n obs, _rew, done, _ = env.step(a[0])\n env.render()\n if done:\n print (\"terminated after %s timesteps\"%(i+1))\n break\n time.sleep(delay)\n \n\n","repo_name":"haoyuanz13/Machine-Learning-in-Robotics","sub_path":"Reinforcement_Learning/src/frozen_lake.py","file_name":"frozen_lake.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"36157919102","text":"import numpy as np\n\nfrom DataProcessing.LoadData import dat_to_train_test, to_tf_dataset, \\\n rm_unlabelled_samples, to_batch_dataset, split_data_labels\nfrom Model.RFMalwareDetection import MalwareDetectionRF\n\nFILTERED_DATASET_SIZES = 600000\n\nBATCH_SIZE = 1000\nEPOCHS = 125\n\n# CHECK: Ensure that this is the correct path to the dataset\nDATA_DIR = './Data/dat/'\n\nif __name__ == '__main__':\n\n x_train, y_train, x_test, y_test = dat_to_train_test(DATA_DIR)\n\n '''# Normalize the data using robust scaler\n print(\"Normalizing data...\")\n x_train_scaled = normalize_data(x_train)\n x_test_scaled = normalize_data(x_test)\n print(\"Data normalization complete...\")\n\n # Apply PCA dimensionality reduction\n print(\"Computing PCA for dimensionality reduction...\")\n x_train_pca, x_test_pca = dataset_pca_reduction(x_train_scaled, x_test_scaled)\n print(\"PCA dimensionality reduction complete...\")\n\n # Number of components kept by PCA\n num_components_pca = len(x_train_pca[1])\n print(num_components_pca)'''\n\n unfiltered_train_ds = to_tf_dataset(x_train, y_train) # _pca\n unfiltered_test_ds = to_tf_dataset(x_test, y_test) # _pca\n\n # Filter out the data with label '-1' (unlabeled)\n filtered_train_ds = rm_unlabelled_samples(unfiltered_train_ds)\n filtered_test_ds = rm_unlabelled_samples(unfiltered_test_ds)\n\n train_ds = filtered_train_ds.take(int(0.85 * FILTERED_DATASET_SIZES))\n val_ds = filtered_train_ds.skip(int(0.85 * FILTERED_DATASET_SIZES))\n\n train_ds = to_batch_dataset(filtered_train_ds, BATCH_SIZE)\n val_ds = to_batch_dataset(val_ds, BATCH_SIZE)\n test_ds = to_batch_dataset(filtered_test_ds, BATCH_SIZE)\n\n train_data, train_labels = split_data_labels(train_ds, 10000)\n test_data, test_labels = split_data_labels(test_ds, 1000)\n\n RandomForest = MalwareDetectionRF(num_trees=1000, verbose=1)\n print(\"Training Random Forest...\")\n RandomForest.train(train_data, train_labels)\n print(\"Random Forest successfully trained...\")\n print(RandomForest.evaluate(train_data, train_labels))\n\n print(RandomForest.evaluate(test_data, test_labels))\n","repo_name":"willmacd/dl-malware-detection","sub_path":"mainRF.py","file_name":"mainRF.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74367863235","text":"import numpy as np\n\n# print LDA\n\nA = np.array([[4, 1], [2, 4], [2, 3], [3, 6], [4, 4]])\nprint(A)\nmean_A = np.mean(A, axis=0, keepdims=True)\nprint(mean_A)\nsigma_A = np.cov(A.T, bias=True)\nprint(sigma_A)\n\nprint(\"====================\")\n\nB = np.array([[9, 10], [6, 8], [9, 5], [8, 7], [10, 8]])\nprint(B)\nmean_B = np.mean(B, axis=0, keepdims=True)\nprint(mean_B)\nsigma_B = np.cov(B.T, bias=True)\nprint(sigma_B)\n\nprint(\"====================\")\n\nS_W = sigma_A+sigma_B\nprint(S_W)\n\nprint(\"====================\")\nS_B = np.dot((mean_A - mean_B).T, (mean_A-mean_B))\nprint(S_B)\n\nprint(\"====================\")\n\neig_value, eig_vector = np.linalg.eig(np.dot(np.linalg.inv(S_W), S_B))\nprint(eig_vector.T[0])\n\nprint(\"====================\")\n\n\n# PCA\n\n\nclass PCA:\n def __init__(self, n_components, whiten=False):\n self.n=n_components\n self.s=whiten\n\n ### this is computing eigenvalues and vector as fast as possible\n\n def householder_reflection(self, a, e):\n\n assert a.ndim == 1\n assert np.allclose(1, np.sum(e ** 2))\n\n u = a - np.sign(a[0]) * np.linalg.norm(a) * e\n v = u / np.linalg.norm(u)\n H = np.eye(len(a)) - 2 * np.outer(v, v)\n\n return H\n\n def qr_decomposition(self, A):\n\n n, m = A.shape\n assert n >= m\n\n Q = np.eye(n)\n R = A.copy()\n\n for i in range(m - int(n == m)):\n r = R[i:, i]\n\n if np.allclose(r[1:], 0):\n continue\n\n e = np.zeros(n - i)\n e[0] = 1\n\n H = np.eye(n)\n H[i:, i:] = self.householder_reflection(r, e)\n\n Q = np.dot(Q, H.T)\n R = np.dot(H, R)\n\n return Q, R\n\n def eigen_decomposition(self, covariance, max_iter=1000):\n covariance_k = covariance\n Q_k = np.eye(covariance.shape[1])\n\n for k in range(max_iter):\n Q, R = self.qr_decomposition(covariance_k)\n Q_k = np.dot(Q_k, Q)\n covariance_k = np.dot(R, Q)\n\n eigenvalues = np.diag(covariance_k)\n eigenvectors = Q_k\n return eigenvalues, eigenvectors\n\n def explain_variance(self):\n return self.values / np.sum(self.values)\n\n def all_variance(self):\n return self.eigens / np.sum(self.eigens)\n\n def fit(self, X):\n n, m = X.shape\n print(X)\n print(\"====================\")\n self.mu = X.mean(axis=0)\n print(self.mu)\n print(\"====================\")\n X = X - self.mu\n if self.s:\n self.std = X.std(axis=0)\n X = X/self.std\n covariance = np.matmul(X.T, X)/(n-1)\n print(covariance)\n print(\"====================\")\n self.values, self.vectors = self.eigen_decomposition(covariance)\n print(self.values)\n print(self.vectors)\n print(\"====================\")\n\n self.eigens = self.values\n\n # self.values, self.vectors = np.linalg.eig(covariance)\n descending_order = np.argsort(-1*self.values)\n self.values = self.values[descending_order]\n self.vectors = self.vectors[:, descending_order]\n if self.n is not None:\n self.values = self.values[0:self.n]\n self.vectors = self.vectors[:, 0:self.n]\n # self.vectors = self.vectors.T\n\n def transform(self, X):\n X = X - self.mu\n if self.s:\n X = X / self.std\n return np.dot(X, self.vectors)\n\n\nprint(\"====================\")\nprint(\"====================\")\nprint(\"====================\")\n\nprint(\"PCA\")\npca = PCA(2)\npca.fit(A)\nX = pca.transform(A)\nprint(X)\n\nprint(\"====================\")\n","repo_name":"forsakenMystery/data-mining","sub_path":"Clustering/maa.py","file_name":"maa.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42683955946","text":"import argparse\nimport logging\nimport random\nimport time\n\nimport datasets\nimport numpy as np\nimport torch\nfrom datasets import load_dataset\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import tqdm\n\nimport evaluate\nimport transformers\nfrom accelerate import Accelerator\nfrom accelerate.logging import get_logger\nfrom transformers import (\n AutoModelForSeq2SeqLM,\n AutoTokenizer,\n DataCollatorForSeq2Seq,\n MBartTokenizer,\n MBartTokenizerFast,\n default_data_collator,\n get_scheduler,\n)\n\ntorch.backends.cuda.matmul.allow_tf32 = True\nlogger = get_logger(__name__)\n\n\n# Parsing input arguments\ndef parse_args():\n\n parser = argparse.ArgumentParser(description=\"Finetune a transformers model on a text classification task\")\n parser.add_argument(\n \"--model_name_or_path\",\n type=str,\n help=\"Path to pretrained model or model identifier from huggingface.co/models.\",\n default=\"t5-small\",\n )\n parser.add_argument(\n \"--max_length\",\n type=int,\n default=128,\n help=(\n \"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,\"\n \" sequences shorter will be padded unless `--dynamic_lengh` is passed.\"\n ),\n )\n parser.add_argument(\n \"--dynamic_length\",\n action=\"store_true\",\n help=\"If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=16,\n help=\"Batch size (per device) for the dataloaders.\",\n )\n parser.add_argument(\n \"--num_epochs\",\n type=int,\n default=1,\n help=\"Number of training epochs.\",\n )\n parser.add_argument(\"--seed\", type=int, default=0, help=\"A seed for reproducible training.\")\n parser.add_argument(\"--dynamo_backend\", type=str, default=\"no\", help=\"Dynamo backend\")\n parser.add_argument(\"--mixed_precision\", type=str, default=\"no\", help=\"`no` or `fp16`\")\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n torch.manual_seed(args.seed)\n accelerator = Accelerator(dynamo_backend=args.dynamo_backend, mixed_precision=args.mixed_precision)\n\n # Make one log on every process with the configuration for debugging.\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n logger.info(accelerator.state, main_process_only=False)\n if accelerator.is_local_main_process:\n datasets.utils.logging.set_verbosity_warning()\n transformers.utils.logging.set_verbosity_info()\n else:\n datasets.utils.logging.set_verbosity_error()\n transformers.utils.logging.set_verbosity_error()\n\n # Load data\n raw_datasets = load_dataset(\"wmt16\", \"ro-en\")\n\n # Load pretrained model and tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path)\n\n # MBART requires some language codes\n if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):\n tokenizer.src_lang = \"en_XX\"\n tokenizer.tgt_lang = \"ro_RO\"\n if model.config.decoder_start_token_id is None:\n if isinstance(tokenizer, MBartTokenizer):\n model.config.decoder_start_token_id = tokenizer.lang_code_to_id[\"ro_RO\"]\n else:\n model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(\"ro_RO\")\n\n # T5 requires a prefix\n if args.model_name_or_path in [\"t5-small\", \"t5-base\", \"t5-large\", \"t5-3b\", \"t5-11b\"]:\n prefix = \"translate English to Romanian: \"\n else:\n prefix = \"\"\n\n # Preprocessing the datasets.\n padding = False if args.dynamic_length else \"max_length\"\n\n def preprocess_function(examples):\n inputs = [ex[\"en\"] for ex in examples[\"translation\"]]\n targets = [ex[\"ro\"] for ex in examples[\"translation\"]]\n inputs = [prefix + inp for inp in inputs]\n model_inputs = tokenizer(\n inputs, text_target=targets, max_length=args.max_length, padding=padding, truncation=True\n )\n\n # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore\n # padding in the loss.\n if padding == \"max_length\":\n model_inputs[\"labels\"] = [\n [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in model_inputs[\"labels\"]\n ]\n\n return model_inputs\n\n with accelerator.main_process_first():\n processed_datasets = raw_datasets.map(\n preprocess_function,\n batched=True,\n remove_columns=raw_datasets[\"train\"].column_names,\n desc=\"Running tokenizer on dataset\",\n )\n\n train_dataset = processed_datasets[\"train\"]\n eval_dataset = processed_datasets[\"validation\"]\n\n # Log a few random samples from the training set:\n for index in random.sample(range(len(train_dataset)), 3):\n logger.info(f\"Sample {index} of the training set: {train_dataset[index]}.\")\n\n # DataLoaders creation:\n if not args.dynamic_length:\n data_collator = default_data_collator\n else:\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n model=model,\n label_pad_token_id=-100,\n pad_to_multiple_of=8,\n )\n\n train_dataloader = DataLoader(\n train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.batch_size, drop_last=True\n )\n eval_dataloader = DataLoader(\n eval_dataset, collate_fn=data_collator, batch_size=args.batch_size, drop_last=not args.dynamic_length\n )\n\n # Optimizer\n optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)\n\n # Scheduler.\n lr_scheduler = get_scheduler(\n name=\"linear\",\n optimizer=optimizer,\n num_warmup_steps=0,\n num_training_steps=len(train_dataloader) * args.num_epochs,\n )\n\n # Prepare everything with our `accelerator`.\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(\n model, optimizer, train_dataloader, eval_dataloader, lr_scheduler\n )\n\n # Metric\n metric = evaluate.load(\"sacrebleu\")\n\n def postprocess_text(preds, labels):\n preds = [pred.strip() for pred in preds]\n labels = [[label.strip()] for label in labels]\n\n return preds, labels\n\n # Train!\n # Only show the progress bar once on each machine.\n train_steps = min(len(train_dataloader) * args.num_epochs, 1000)\n progress_bar = tqdm(range(train_steps), disable=not accelerator.is_local_main_process)\n start_time = time.time()\n\n for epoch in range(args.num_epochs):\n model.train()\n for step, batch in enumerate(train_dataloader):\n # We need to skip steps until we reach the resumed step\n outputs = model(**batch)\n loss = outputs.loss\n accelerator.backward(loss)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n if step == 0 and epoch == 0:\n first_step_time = time.time() - start_time\n elif step >= 1000:\n break\n\n total_training_time = time.time() - start_time\n avg_iteration_time = (total_training_time - first_step_time) / (train_steps - 1)\n print(\"Training finished.\")\n print(f\"First iteration took: {first_step_time:.2f}s\")\n print(f\"Average time after the first iteration: {avg_iteration_time * 1000:.2f}ms\")\n\n model.eval()\n start_time = time.time()\n for step, batch in enumerate(eval_dataloader):\n with torch.no_grad():\n generated_tokens = accelerator.unwrap_model(model).generate(\n batch[\"input_ids\"], attention_mask=batch[\"attention_mask\"], max_length=args.max_length\n )\n generated_tokens = accelerator.pad_across_processes(\n generated_tokens, dim=1, pad_index=tokenizer.pad_token_id\n )\n labels = batch[\"labels\"]\n if args.dynamic_length:\n labels = accelerator.pad_across_processes(batch[\"labels\"], dim=1, pad_index=tokenizer.pad_token_id)\n\n generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()\n labels = accelerator.gather(labels).cpu().numpy()\n\n # Replace -100 in the labels as we can't decode them.\n labels = np.where(labels != -100, labels, tokenizer.pad_token_id)\n\n decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)\n decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n\n decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)\n\n metric.add_batch(predictions=decoded_preds, references=decoded_labels)\n if step == 0:\n first_step_time = time.time() - start_time\n\n total_eval_time = time.time() - start_time\n avg_iteration_time = (total_eval_time - first_step_time) / (len(eval_dataloader) - 1)\n\n print(\"Evaluation finished.\")\n print(f\"First iteration took: {first_step_time:.2f}s\")\n print(f\"Average time after the first iteration: {avg_iteration_time * 1000:.2f}ms\")\n\n eval_metric = metric.compute()\n print(f\"Test BLEU score for backend {args.dynamo_backend}: {eval_metric['score']}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sgugger/torchdynamo-tests","sub_path":"scripts/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":9527,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"33229606564","text":"import json\nimport os\n\nimport languages\n\n\n# Writes specified data to the specified file\ndef write_json(data, filename):\n with open(filename, 'w') as file:\n json.dump(data, file, indent=4)\n\n\ndef change_language(language='En'):\n language = language.lower()\n language_dict = {}\n if language == 'en':\n language_dict = languages.language_english\n elif language == \"au\":\n language_dict = languages.language_australian\n edit_config('language', language)\n return language_dict\n\n\ndef init_bot():\n file_path = 'config.json'\n if not os.path.exists(file_path):\n print('Error initializing the bot, missing config file')\n else:\n with open(file_path) as file:\n data = json.load(file)\n token = data['token']\n language = data['language']\n return token, language\n\n\ndef edit_config(key, new_value):\n file_path = 'config.json'\n if not os.path.exists(file_path):\n print('Error initializing the bot, missing config file')\n else:\n with open(file_path) as file:\n data = json.load(file)\n data[key] = new_value\n write_json(data, file_path)\n","repo_name":"lmikel1337/TGBot_Organizer","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2483439117","text":"from fastapi import FastAPI\nfrom contextlib import asynccontextmanager\n\nfrom tortoise import Tortoise\n\nimport pwncore.docs as docs\nimport pwncore.routes as routes\n\nfrom pwncore.container import docker_client\nfrom pwncore.config import config\nfrom pwncore.models import Container\n\n\n@asynccontextmanager\nasync def app_lifespan(app: FastAPI):\n # Startup\n await Tortoise.init(db_url=config.db_url, modules={\"models\": [\"pwncore.models\"]})\n await Tortoise.generate_schemas()\n\n yield\n # Shutdown\n # Stop and remove all running containers\n containers = await Container.all().values()\n await Container.all().delete()\n for db_container in containers:\n try:\n container = await docker_client.containers.get(db_container[\"docker_id\"])\n await container.stop()\n await container.delete()\n except (\n Exception\n ): # Raises DockerError if container does not exist, just pass for now.\n pass\n\n # close_connections is deprecated, not sure how to use connections.close_all()\n await Tortoise.close_connections()\n await docker_client.close()\n\n\napp = FastAPI(\n title=\"Pwncore\",\n openapi_tags=docs.tags_metadata,\n description=docs.description,\n lifespan=app_lifespan,\n)\napp.include_router(routes.router)\n","repo_name":"lugvitc/pwncore","sub_path":"src/pwncore/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"733602313","text":"def solution(dirs):\n answer = 0\n\n # 현재 좌표 저장\n cur_x = 0\n cur_y = 0\n\n # 리스트 생성: 방문한 경로 저장\n visited_path = list()\n\n for char in dirs:\n if char == \"U\":\n # 현재 y 좌표가 5 미만? Up 가능!\n if cur_y < 5:\n cur_y += 1\n # 처음 가본 길이면 +1\n if not [[cur_x, cur_y - 1], [cur_x, cur_y]] in visited_path:\n visited_path.append([[cur_x, cur_y - 1], [cur_x, cur_y]])\n answer += 1\n else:\n continue\n\n elif char == \"D\":\n # 현재 y 좌표가 -5 초과? Down 가능!\n if -5 < cur_y:\n cur_y -= 1\n # 처음 가본 길이면 +1\n if not [[cur_x, cur_y], [cur_x, cur_y + 1]] in visited_path:\n visited_path.append([[cur_x, cur_y], [cur_x, cur_y + 1]])\n answer += 1\n else:\n continue\n\n elif char == \"R\":\n # 현재 x 좌표가 5 미만? Right 가능!\n if cur_x < 5:\n cur_x += 1\n # 처음 가본 길이면 +1\n if not [[cur_x - 1, cur_y], [cur_x, cur_y]] in visited_path:\n visited_path.append([[cur_x - 1, cur_y], [cur_x, cur_y]])\n answer += 1\n else:\n continue\n\n elif char == \"L\":\n # 현재 x 좌표가 -5 초과? Left 가능!\n if -5 < cur_x:\n cur_x -= 1\n # 처음 가본 길이면 +1\n if not [[cur_x, cur_y], [cur_x + 1, cur_y]] in visited_path:\n visited_path.append([[cur_x, cur_y], [cur_x + 1, cur_y]])\n answer += 1\n else:\n continue\n\n return answer\n\n\nprint(solution(\"ULURRDLLU\"))\nprint(solution(\"LULLLLLLU\"))\n","repo_name":"reload1bronze/algorithm-hh99","sub_path":"2주차_그래프_트리_비선형_자료구조/test_programmers_49994_방문길이.py","file_name":"test_programmers_49994_방문길이.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73893295555","text":"\"\"\"\n5. Write a Python program to solve the Fibonacci sequence using recursion.\n\"\"\"\n\n\ndef fibonacci(n: int) -> int:\n if n == 0:\n return 0\n if n == 1:\n return 1\n return fibonacci(n - 1) + fibonacci(n - 2)\n\n\nif __name__ == '__main__':\n print(fibonacci(10))\n","repo_name":"aoki-h-jp/playground","sub_path":"python-w3resource-exercises/python-recursion/005.py","file_name":"005.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70709414596","text":"# Copyright 2017 Integrity Software and Games, LLC\r\n#\r\n# ##### BEGIN GPL LICENSE BLOCK ######\r\n# This file is part of UltiMaze.\r\n#\r\n# UltiMaze is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# UltiMaze is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with UltiMaze. If not, see .\r\n# ##### END GPL LICENSE BLOCK #####\r\n\r\n\"\"\"\r\nConverts back and forth between image and text mazes.\r\n\r\nAvailable Functions:\r\n console_prog - Displays progress in the console\r\n write_to_text - Writes text to Blender text file with current width and\r\n height settings\r\n write_to_text_img - Writes text to Blender text file with given width and\r\n height\r\n str_list_maze - Converts a python maze into a text block\r\n convert_list_maze - Convert text maze into a Python list maze\r\n\"\"\"\r\n\r\nimport bpy\r\n\r\nfrom . import prep_manager\r\nfrom .maze_tools import Maze\r\nfrom .progress_display import BlenderProgress\r\nfrom .time_display import TimeDisplay\r\nfrom .logging_setup import setup_logger\r\nfrom .addon_name import get_addon_name\r\n\r\nlogger = setup_logger(__name__)\r\n\r\n\r\ndef write_to_text(text):\r\n \"\"\"Writes text to Blender text file with current width and height settings.\r\n\r\n Args:\r\n text - text to write\r\n\r\n Returns:\r\n actual name of text block it wrote to\r\n \"\"\"\r\n width = bpy.context.scene.mg.mg_width\r\n height = bpy.context.scene.mg.mg_height\r\n\r\n attempted_name = (str(width) + \"x\" + str(height) + \"_maze_list\")\r\n\r\n text_block = [\"\"]\r\n text_data_block = bpy.data.texts.new(name=attempted_name)\r\n text_block[0] = text_data_block\r\n\r\n text_block[0].from_string(str(text))\r\n\r\n text_block_name = text_block[0].name\r\n\r\n return text_block_name\r\n\r\n\r\ndef write_to_text_img(text, width, height):\r\n \"\"\"Writes text to Blender text file with given width and height.\r\n\r\n Args:\r\n text - text to write\r\n width - width of 'maze'\r\n height - height of 'maze'\r\n\r\n Returns:\r\n actual name of text block it wrote to\r\n \"\"\"\r\n attempted_name = (str(width) + \"x\" + str(height) + \"_maze_list\")\r\n\r\n text_block = [\"\"]\r\n text_data_block = bpy.data.texts.new(name=attempted_name)\r\n text_block[0] = text_data_block\r\n\r\n text_block[0].from_string(text)\r\n\r\n text_block_name = text_block[0].name\r\n\r\n return text_block_name\r\n\r\n\r\ndef str_list_maze(maze):\r\n \"\"\"Converts a python maze into a text block.\r\n\r\n Args:\r\n maze - python list in the format:\r\n [[(space in maze - x, y), is path, is walkable, active path],\r\n [(space in maze - x, y), is path, is walkable, active path], ...]\r\n\r\n Returns:\r\n actual name of text block it wrote to\r\n \"\"\"\r\n str_maze = \"\"\r\n for row in range(maze.height):\r\n for column in range(maze.width):\r\n if maze.is_path(column, row):\r\n str_maze += \"1\"\r\n else:\r\n str_maze += \"0\"\r\n\r\n text_block_name = write_to_text(str_maze)\r\n\r\n return text_block_name\r\n\r\n\r\ndef convert_list_maze():\r\n \"\"\"Convert text maze into a Python list maze.\r\n\r\n Returns:\r\n maze - python list in the format:\r\n [[(space in maze - x, y), is path, is walkable, active path],\r\n [(space in maze - x, y), is path, is walkable, active path], ...]\r\n \"\"\"\r\n\r\n mg = bpy.context.scene.mg\r\n list_maze = mg.list_maze\r\n str_maze = bpy.data.texts[list_maze].as_string()\r\n\r\n # replace \"\\n\" with \"\"\r\n str_maze = str_maze.replace(\"\\n\", \"\")\r\n\r\n x_dim = mg.mg_width\r\n y_dim = mg.mg_height\r\n\r\n maze = Maze(x_dim, y_dim)\r\n for y in range(maze.height):\r\n for x in range(maze.width):\r\n index = y * maze.width + x\r\n try:\r\n if str_maze[index] == \"1\":\r\n maze.make_path(x, y)\r\n except IndexError:\r\n logger.warning(\"IndexError when trying to access a text file's string for \"\r\n \"converting to a list maze...\"\r\n \"index={}, maze.width={}, maze.height={}\".format(index,\r\n maze.width,\r\n maze.height))\r\n\r\n return maze\r\n\r\n\r\nclass ConvertMazeImageMG(bpy.types.Operator):\r\n bl_label = \"Image to Text\"\r\n bl_idname = \"maze_gen.convert_maze_image\"\r\n bl_description = \"Creates a textblock with maze generated from image\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n mg = context.scene.mg\r\n\r\n # check if image is assigned\r\n if not mg.maze_image:\r\n logger.debug(\"Image missing! Please assign a valid image data block.\")\r\n self.report({'ERROR'}, \"Image missing! Please assign a valid image data block.\")\r\n return {'CANCELLED'}\r\n\r\n # save files\r\n save_return, bad_file = prep_manager.always_save()\r\n if save_return == \"BLEND_ERROR\":\r\n logger.debug(\"Save file or disable always save in user prefs.\")\r\n self.report({'ERROR'}, \"Save file or disable always save in user prefs.\")\r\n return {'CANCELLED'}\r\n\r\n elif save_return == \"IMAGE_ERROR\":\r\n logger.debug(\"Image: {} does not have a valid file path (for saving). Assign a valid path, \"\r\n \"pack img, or disable save images in user prefs\".format(bad_file.name))\r\n self.report({'ERROR'}, \"Image '\" + bad_file.name +\r\n \"' does not have a valid file path (for saving). Assign a \"\r\n \"valid path, pack image, or disable save images in user prefs\")\r\n return {'CANCELLED'}\r\n\r\n elif save_return == \"TEXT_ERROR\":\r\n logger.debug(\"Text: {} does not have a valid file path (for saving). Assign a valid path,\"\r\n \"or disable save texts in user prefs\".format(bad_file.name))\r\n self.report({'ERROR'}, \"Text '\" + bad_file.name +\r\n \"' does not have a valid file path (for saving). Assign a \" +\r\n \"valid path or disable save texts in user prefs\")\r\n return {'CANCELLED'}\r\n\r\n # size of the images in the UV/Image editor\r\n x_dim = bpy.data.images[mg.maze_image].size[0]\r\n y_dim = bpy.data.images[mg.maze_image].size[1]\r\n \r\n # warn the user if the image dimensions are not the same as the maze\r\n maze_width = bpy.context.scene.mg.mg_width\r\n maze_height = bpy.context.scene.mg.mg_height\r\n if x_dim != maze_width or y_dim != maze_height:\r\n self.report({'ERROR'}, \"Image dimensions are not the same as your maze dimensions. \\nImage dimensions are: \"\r\n \"(x:{}, y:{}), while maze settings are: (x:{}, y:{}). \\nThis will cause issues if you \"\r\n \"try to convert the resulting text block to a maze without changing the maze layout \"\r\n \"settings.\".format(x_dim, \r\n y_dim,\r\n maze_width,\r\n maze_height\r\n ))\r\n\r\n maze = \"\"\r\n count = 0\r\n \r\n # iterate over all pixels while making one long line for the maze str\r\n while count < len(bpy.data.images[mg.maze_image].pixels):\r\n\r\n # if value is white, its a path, otherwise a wall\r\n # more specifically, if the red channel's value is > 0.5\r\n if bpy.data.images[mg.maze_image].pixels[count] > 0.5:\r\n maze += \"1\"\r\n else:\r\n maze += \"0\"\r\n\r\n # factor in RGBA channels for each real pixel\r\n count += 4\r\n\r\n # the maze at this point is a mirror of what it should be\r\n flipped_maze = \"\"\r\n row = (y_dim - 1)\r\n while row >= 0:\r\n # snippet from exist test (here for reference only)\r\n # index = (x + (y * (x_dimensions)))\r\n\r\n maze_row = maze[(row * x_dim):(row * x_dim + x_dim)]\r\n flipped_maze += maze_row\r\n\r\n row -= 1\r\n\r\n text_block_name = write_to_text_img(flipped_maze, x_dim, y_dim)\r\n\r\n self.report({'INFO'}, \"See '\" + str(text_block_name) +\r\n \"' in the text editor\")\r\n\r\n return {'FINISHED'}\r\n\r\n\r\nclass CreateImageFromListMG(bpy.types.Operator):\r\n bl_label = \"Text to Image\"\r\n bl_idname = \"maze_gen.create_image_from_list\"\r\n bl_description = \"Creates an image with maze generated from textblock\"\r\n bl_options = {'REGISTER', 'UNDO'}\r\n\r\n def execute(self, context):\r\n debug = bpy.context.user_preferences.addons[get_addon_name()].preferences.debug_mode\r\n mg = context.scene.mg\r\n\r\n if not mg.list_maze:\r\n self.report({'ERROR'}, \"List missing! Please assign a \" +\r\n \"valid text data block.\")\r\n return {'CANCELLED'}\r\n\r\n # save files\r\n save_return, bad_file = prep_manager.always_save()\r\n if save_return == \"BLEND_ERROR\":\r\n self.report({'ERROR'}, \"Save file or disable always save \" +\r\n \"in user prefs.\")\r\n return {'CANCELLED'}\r\n\r\n elif save_return == \"IMAGE_ERROR\":\r\n self.report({'ERROR'}, \"Image '\" + bad_file.name +\r\n \"' does not have a valid file path (for saving). Assign a \" +\r\n \"valid path, pack image, or disable save images in user prefs\")\r\n return {'CANCELLED'}\r\n\r\n elif save_return == \"TEXT_ERROR\":\r\n self.report({'ERROR'}, \"Text '\" + bad_file.name +\r\n \"' does not have a valid file path (for saving). Assign a \" +\r\n \"valid path or disable save texts in user prefs\")\r\n return {'CANCELLED'}\r\n\r\n bldr_prog = BlenderProgress(\"Text to Image\", debug)\r\n bldr_prog.start()\r\n\r\n # get list maze as string\r\n str_list_maze = bpy.data.texts[mg.list_maze].as_string()\r\n\r\n # settings check before execution\r\n area = mg.mg_width * mg.mg_height\r\n if len(str_list_maze) != area:\r\n self.report({'ERROR'}, \"Width and Height settings don't match \" +\r\n \"selected textblock! Width x Height should equal the number \" +\r\n \"of characters in text.\")\r\n return {'CANCELLED'}\r\n\r\n # create image\r\n image_maze = bpy.data.images.new(\r\n name=\"Maze\",\r\n width=mg.mg_width,\r\n height=mg.mg_height)\r\n\r\n image_row = mg.mg_height - 1\r\n count = 0\r\n while image_row >= 0:\r\n image_col = 0\r\n while image_col < mg.mg_width:\r\n if str_list_maze[count] == \"1\":\r\n # Red Channel\r\n image_maze.pixels[(image_row * mg.mg_width * 4 +\r\n image_col * 4 + 0)] = 1\r\n # Green Channel\r\n image_maze.pixels[(image_row * mg.mg_width * 4 +\r\n image_col * 4 + 1)] = 1\r\n # Blue Channel\r\n image_maze.pixels[(image_row * mg.mg_width * 4 +\r\n image_col * 4 + 2)] = 1\r\n # Alpha Channel\r\n image_maze.pixels[(image_row * mg.mg_width * 4 +\r\n image_col * 4 + 3)] = 1\r\n\r\n # report progress if changed\r\n progress = count / area\r\n bldr_prog.update(progress)\r\n\r\n image_col += 1\r\n count += 1\r\n image_row -= 1\r\n\r\n bldr_prog.finish()\r\n time_disp = TimeDisplay()\r\n time_disp.convert(bldr_prog.elapsed_time())\r\n\r\n self.report({'INFO'}, \"Finished generating 2d maze in \" + str(time_disp))\r\n\r\n self.report({'INFO'}, \"See '\" + image_maze.name +\r\n \"' in the image editor\")\r\n\r\n return {'FINISHED'}\r\n","repo_name":"ekaj2/UltiMaze","sub_path":"txt_img_converter.py","file_name":"txt_img_converter.py","file_ext":"py","file_size_in_byte":12752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"26235423636","text":"import base64\nimport json\nimport logging\nimport subprocess\nimport sys\nfrom pickle import load\n\nimport mxnet as mx\nimport numpy as np\nfrom mxnet import autograd, nd, gluon\nfrom mxnet.gluon import Trainer\nfrom mxnet.gluon.loss import L2Loss\nfrom mxnet.gluon.nn import Conv2D, MaxPool2D, Dropout, Flatten, Dense, Sequential\nfrom mxnet.initializer import Xavier\n\n\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n\ninstall(\"opencv-python\")\nimport cv2\n\nlogging.basicConfig(level=logging.INFO)\n\n\n# https://docs.aws.amazon.com/sagemaker/latest/dg/mxnet-training-inference-code-template.html\n\ndef train(hyperparameters, channel_input_dirs, num_gpus, hosts):\n batch_size = hyperparameters.get(\"batch_size\", 64)\n epochs = hyperparameters.get(\"epochs\", 3)\n\n mx.random.seed(42)\n\n training_dir = channel_input_dirs['training']\n\n logging.info(\"Loading data from {}\".format(training_dir))\n\n with open(\"{}/train/data.p\".format(training_dir), \"rb\") as pickle:\n train_nd = load(pickle)\n with open(\"{}/validation/data.p\".format(training_dir), \"rb\") as pickle:\n validation_nd = load(pickle)\n\n train_data = gluon.data.DataLoader(train_nd, batch_size, shuffle=True)\n validation_data = gluon.data.DataLoader(validation_nd, batch_size, shuffle=True)\n\n net = Sequential()\n # http: // gluon.mxnet.io / chapter03_deep - neural - networks / plumbing.html # What's-the-deal-with-name_scope()?\n with net.name_scope():\n net.add(Conv2D(channels=32, kernel_size=(3, 3), padding=0, activation=\"relu\"))\n net.add(Conv2D(channels=32, kernel_size=(3, 3), padding=0, activation=\"relu\"))\n net.add(MaxPool2D(pool_size=(2, 2)))\n net.add(Dropout(.25))\n net.add(Flatten())\n net.add(Dense(1))\n\n ctx = mx.gpu() if num_gpus > 0 else mx.cpu()\n\n # Also known as Glorot\n net.collect_params().initialize(Xavier(magnitude=2.24), ctx=ctx)\n # Calculates the mean squared error between pred and label.\n loss = L2Loss()\n\n # kvstore type for multi - gpu and distributed training.\n if len(hosts) == 1:\n kvstore = \"device\" if num_gpus > 0 else \"local\"\n else:\n kvstore = \"dist_device_sync'\" if num_gpus > 0 else \"dist_sync\"\n\n trainer = Trainer(net.collect_params(), optimizer=\"adam\", kvstore=kvstore)\n\n smoothing_constant = .01\n\n for e in range(epochs):\n moving_loss = 0\n for i, (data, label) in enumerate(train_data):\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n with autograd.record():\n output = net(data)\n loss_result = loss(output, label)\n loss_result.backward()\n trainer.step(batch_size)\n\n curr_loss = nd.mean(loss_result).asscalar()\n moving_loss = (curr_loss if ((i == 0) and (e == 0))\n else (1 - smoothing_constant) * moving_loss + smoothing_constant * curr_loss)\n trn_total, trn_detected = measure_performance(net, ctx, train_data)\n validation_total, validation_detected = measure_performance(net, ctx, validation_data)\n logging.info(\"Epoch {}: loss: {:0.4f} Test accuracy: {:0.2f} Validation accuracy: {:0.2f}\"\n .format(e, moving_loss, trn_detected / trn_total, validation_detected / validation_total))\n\n return net\n\n\ndef measure_performance(model, ctx, data_iter):\n raw_predictions = np.array([])\n rounded_predictions = np.array([])\n actual_labels = np.array([])\n for i, (data, label) in enumerate(data_iter):\n data = data.as_in_context(ctx)\n label = label.as_in_context(ctx)\n output = model(data)\n predictions = nd.round(output)\n raw_predictions = np.append(raw_predictions, output.asnumpy().squeeze())\n rounded_predictions = np.append(rounded_predictions, predictions.asnumpy().squeeze())\n actual_labels = np.append(actual_labels, label.asnumpy().squeeze())\n\n results = np.concatenate((raw_predictions.reshape((-1, 1)),\n rounded_predictions.reshape((-1, 1)),\n actual_labels.reshape((-1, 1))), axis=1)\n detected = 0\n i = -1\n for i in range(int(results.size / 3)):\n if results[i][1] == results[i][2]:\n detected += 1\n return i + 1, detected\n\n\ndef save(net, model_dir):\n y = net(mx.sym.var(\"data\"))\n y.save(\"{}/model.json\".format(model_dir))\n net.collect_params().save(\"{}/model.params\".format(model_dir))\n\n\ndef model_fn(model_dir):\n with open(\"{}/model.json\".format(model_dir), \"r\") as model_file:\n model_json = model_file.read()\n outputs = mx.sym.load_json(model_json)\n inputs = mx.sym.var(\"data\")\n param_dict = gluon.ParameterDict(\"model_\")\n net = gluon.SymbolBlock(outputs, inputs, param_dict)\n # We will serve the model on CPU\n net.load_params(\"{}/model.params\".format(model_dir), ctx=mx.cpu())\n return net\n\n\n# noinspection PyUnusedLocal\ndef transform_fn(model, input_data, content_type, accept):\n if content_type == \"application/png\":\n img = nd.array(img2arr(input_data))\n response = model(img).asnumpy().ravel().tolist()\n return json.dumps(response), accept\n elif content_type == \"application/json\":\n json_array = json.loads(input_data, encoding=\"utf-8\")\n imgs = [img2arr(base64img) for base64img in json_array]\n imgs = np.concatenate(imgs)\n imgs = nd.array(imgs)\n response = model(imgs)\n response = nd.round(response)\n response = response.asnumpy()\n response = response.ravel()\n response = response.tolist()\n return json.dumps(response), accept\n else:\n raise ValueError(\"Cannot decode input to the prediction.\")\n\n\ndef img2arr(base64img):\n img = base64.b64decode(base64img)\n img = np.asarray(bytearray(img), dtype=np.uint8)\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n img = img.astype(np.float32)\n img = mx.nd.array(img)\n img = mx.nd.transpose(img, (2, 0, 1))\n img = img / 255\n img = img.reshape((1, 3, 128, 128))\n img = img.asnumpy()\n return img\n","repo_name":"cosmincatalin/object-counting-with-mxnet-and-sagemaker","sub_path":"object-counting-sagemaker-script.py","file_name":"object-counting-sagemaker-script.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3811938656","text":"from lib2to3.pgen2 import driver\n\nimport allure\n\nfrom web.BaseTest.base import *\nfrom selenium.webdriver.chrome.webdriver import WebDriver\nfrom web.locators.web_locator.contact_locator import *\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom time import sleep\n\n\nclass contact_run(Base_home):\n\n def __init__(self, driver: WebDriver):\n self.driver = driver\n self.wait = WebDriverWait(self.driver, 20)\n self.contact_link= Contact_link.Contact\n # self.ContactText = Contact_link.ContactEmail\n self.EmailEnter = Contact_link.Email\n self.ContactNameText = Contact_link.ContactName\n self.NameEnter = Contact_link.Name\n self.MessageText = Contact_link.Message\n self.MessageEnter = Contact_link.EnterMessage\n self.MessageSend = Contact_link.SendMessage\n self.Out = Contact_link.Close\n\n def click_contact_page(self):\n super().__init__()\n self.driver.find_element(By.XPATH, self.contact_link).click()\n sleep(5)\n\n @allure.step\n @allure.description('clicking on signup button - should navigate to signup page')\n def click_contact_page(self):\n self.driver.find_element(By.XPATH, self.contact_link).click()\n\n @allure.step\n @allure.description('insert value to \"username\" input')\n def enter_Username(self, EmailEnter):\n field = self.driver.find_element(By.XPATH, self.EmailEnter)\n field.clear()\n field.send_keys(EmailEnter)\n sleep(3)\n\n @allure.step\n @allure.description('insert value to \"password\"')\n def enter_passwored(self, NameEnter):\n field = self.driver.find_element(By.XPATH, self.NameEnter)\n field.clear()\n field.send_keys(NameEnter)\n sleep(3)\n\n @allure.step\n @allure.description('insert you want message')\n def Click_massage(self, MessageEnter):\n field = self.driver.find_element(By.XPATH, self.MessageEnter)\n field.clear()\n field.send_keys(MessageEnter)\n sleep(3)\n\n\n @allure.step\n @allure.description('clicking on signup button')\n def send_button_click(self):\n self.driver.find_element(By.XPATH, self.MessageSend).click()\n\n def alat_off(self):\n self.driver.switch_to.alert.accept()","repo_name":"fentahunadisu/FinalProjectDemoblaze","sub_path":"web/Page/web_page/Contact_page.py","file_name":"Contact_page.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33631032571","text":"# 清除token\nfrom base.base import Base\nfrom page import *\n\n\nclass ClearTokenPage(Base):\n def __init__(self):\n # 重写父类初始化方法\n super().__init__()\n\n # 统一用2次返回,退回微信主页的判断\n def clear_token_page(self):\n self.assert_png(\"首次进入健康数据空间\", authorization_information)\n self.click_8(authorization_information)\n\n\nif __name__ == '__main__':\n ClearTokenPage().clear_token_page()","repo_name":"yebinhuang/test","sub_path":"page/health_data_space/clear_token_page.py","file_name":"clear_token_page.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26893163543","text":"'''\nDescription: Marco likes strings a lot. His friend has a 1-indexed binary string of length n where all the bits are 0 initially and will flip all the bits of this binary string (i.e., change them from 0 to 1) one by one. He has given him a 1-indexed integer array flips where flips[i] indicates that the bit at index i will be flipped in the ith step.\n\nA binary string is prefix-aligned if, after the ith step, all the bits in the inclusive range [1, i] are ones and all the other bits are zeros.\n\nMarco has been asked to return the number of times the binary string is prefix-aligned during the flipping process.\n'''\n\ndef checkPrefixAlign(string, i):\n part1 = string[0 : i + 1]\n part2 = string[i + 1: ]\n\n # 1 part must have all 1 and the other all 0\n return all(i == '1' for i in part1) and all(i == '0' for i in part2)\n\n# Driver code\nn = int(input())\ndata = [int(x) for x in input().split()]\nstring = ['0'] * n\ncount = 0\n\nfor index, i in enumerate(data):\n string[i - 1] = '1'\n # If function is true, increment count\n if(checkPrefixAlign(string, index)):\n count += 1\nprint(count)","repo_name":"aditi-govindu/Unstop-100-days","sub_path":"Day54/BinaryStringPrefixAlign.py","file_name":"BinaryStringPrefixAlign.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21947410245","text":"#!/usr/bin/env python\n\nimport geojson\nimport ast\nimport json\n\n\ndef get_data_from_line(line):\n chunks = line.strip('\\n').split(' ')\n osm_id = int(chunks[0].split('_')[1])\n coord_string = '[{}]'.format(chunks[1].strip('\\n'))\n coords = ast.literal_eval(coord_string)\n return osm_id, coords\n\n\ndef make_json(input_file, instruction, output_file):\n tasks = []\n with open(input_file, 'r') as infile:\n for line in infile:\n task = {}\n task['instruction'] = instruction\n task['geometries'] = {\n 'type': 'FeatureCollection',\n 'features': []\n }\n\n osm_id, coords = get_data_from_line(line)\n geometry = geojson.LineString(coords)\n\n feature = {\n 'type': 'Feature',\n 'geometry': geometry,\n 'properties': {\"osmid\": osm_id}\n }\n\n task['geometries']['features'].append(feature)\n tasks.append(task)\n\n with open(output_file, 'w+') as outfile:\n json.dump(tasks, outfile)\n\nif __name__ == '__main__':\n import argparse\n\n description = 'Produce un file JSON a partire dal txt degli errori.'\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('input_file', help='File txt di input')\n parser.add_argument('instruction', help='istruzioni del task')\n parser.add_argument('-o', dest='output_file',\n help='File json di output',\n default='output.json'\n )\n\n args = parser.parse_args()\n\n # OUTPUT EXAMPLE\n #\n # This can be obtained with the two following commands:\n #\n # python make_json.py test.txt \"This is a hard task\"\n # cat output.json | python -mjson.tool\n #\n #{\n # \"instruction\" : \"This is a hard task\",\n # \"geometries\" : {\n # \"type\": \"FeatureCollection\",\n # \"features\": [\n # { \"type\": \"Feature\",\n # \"geometry\":\n # { \"type\": \"LineString\",\n # \"coordinates\": [[-88.72199, 30.39396], [-88.72135, 30.39395],\n # [-88.72125, 30.3939]]\n # },\n # \"properties\": {\"osmid\": 23456}\n # }\n # ]\n # }\n #}\n\n make_json(args.input_file, args.instruction, args.output_file)\n","repo_name":"osmItalia/stat_roulette","sub_path":"tools/make_json.py","file_name":"make_json.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35039538086","text":"#!/usr/bin/env python3\n\nimport argparse\nimport subprocess\nimport sys\n\n\nRESET = \"\\033[0m\"\nBOLD = \"\\033[1m\"\nFAIL = \"\\033[31m\"\nPASS = \"\\033[32m\"\n\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, flush=True, **kwargs)\n\n\ndef testrun(cmd, repetitions=10, stdout=None, stderr=None):\n eprint(\" \".join(cmd) + \": \", end='')\n\n try:\n for _ in range(repetitions):\n eprint(\".\", end='')\n subprocess.run(cmd, stdout=stdout, stderr=stderr, check=True)\n except subprocess.CalledProcessError:\n eprint(f\"\\b{BOLD}{FAIL}X{RESET}\", end='')\n\n eprint()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-d\", \"--discard-output\", dest=\"stdout\",\n action=\"store_const\", const=subprocess.DEVNULL,\n help=\"discard output\",\n required=False)\n\n parser.add_argument(\"-r\", \"--repetitions\",\n type=int, default=10,\n help=\"number of repetitions (default is 10)\",\n required=False)\n\n parser.add_argument(\"cmd\",\n nargs=\"*\",\n help=\"program to run\")\n\n args = parser.parse_args()\n\n if args.cmd:\n testrun(args.cmd, args.repetitions, args.stdout)\n else:\n # Read commands from file\n with open(\"testrun.input\") as file:\n for line in file:\n cmd = line.split()\n testrun(cmd, args.repetitions, args.stdout)\n","repo_name":"aprell/tasking-2.0","sub_path":"utils/testrun.py","file_name":"testrun.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"6979658444","text":"from .facebook import FacebookHelper\nimport bot.utils.messages as messages\nfrom typing import Dict, List, Any\n\nclass Templates:\n \"\"\"docstring for Templates\"\"\"\n\n @staticmethod\n def options_buttons() -> List[Dict[str,str]]:\n return [\n FacebookHelper.post_back_button(messages.NEW_POST_TITLE, messages.NEW_POST_PAYLOAD),\n FacebookHelper.post_back_button(messages.CHECK_GROUPS_TITTLE, messages.CHECK_GROUPS_PAYLOAD),\n ]\n\n @staticmethod\n def generic_element(title: str, buttons: Dict[str,str]=None) -> Dict[str,str]:\n element = {\n 'title': title,\n }\n\n if buttons:\n element['buttons'] = buttons\n\n return element\n\n @staticmethod\n def add_group_buttons(group: Dict[str,str]) -> Dict[str,str]:\n return [\n FacebookHelper.post_back_button(messages.ADD_GROUP_TITTLE, messages.ADD_GROUP_PAYLOAD + group.get('id')),\n ]\n\n\n @staticmethod\n def groups_carousel(groups: Dict[str,str]) -> List[Dict[str,str]]:\n carousel = []\n for group in groups:\n buttons = Templates.add_group_buttons(group)\n carousel.append(Templates.generic_element(group.get('name'), buttons))\n return carousel\n","repo_name":"tume01/Group-Message-Bot","sub_path":"bot/helpers/templates.py","file_name":"templates.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71965839873","text":"import quad2d_viewer\nimport quad3d_viewer\nimport acrobot_viewer\nimport unicycle1_viewer\nimport unicycle2_viewer\nimport car_with_trailer_viewer\nfrom robot_viewer import check_viewer\n\nimport unittest\nimport os\nimport subprocess\n\nbase_path = \"./\" + os.path.dirname(__file__) + \"/\" + \"../../\"\nthis_path = \"./\" + os.path.dirname(__file__) + \"/\"\n\n\nvisualize: bool = False\n\nprint(base_path)\nprint(this_path)\n\n\nclass TestCli(unittest.TestCase):\n def test_cli_env(self):\n cmd = [\n \"python3\",\n this_path + \"viewer_cli.py\",\n \"--robot\",\n \"unicycle1\",\n \"--env\",\n base_path + \"envs/unicycle1_v0/bugtrap_0.yaml\",\n ]\n print(\"running cmd: \", \" \".join(cmd))\n\n out = subprocess.run(cmd)\n self.assertEqual(out.returncode, 0)\n\n def test_cli_traj(self):\n cmd = [\n \"python3\",\n this_path + \"viewer_cli.py\",\n \"--robot\",\n \"unicycle1\",\n \"--env\",\n base_path + \"envs/unicycle1_v0/bugtrap_0.yaml\",\n \"--result\",\n base_path + \"envs/unicycle1_v0/motions/guess_bugtrap_0_sol0.yaml\",\n ]\n\n print(\"running cmd: \", \" \".join(cmd))\n\n out = subprocess.run(cmd)\n self.assertEqual(out.returncode, 0)\n\n\nclass TestViewers(unittest.TestCase):\n def test_quad3d_viewer(self):\n argv = [\n \"--env\",\n base_path + \"envs/quadrotor_v0/quad_one_obs.yaml\",\n \"--result\",\n base_path + \"envs/quadrotor_v0/trajectories/quadrotor_0_obs_0.yaml\",\n ]\n if visualize:\n argv.append(\"-i\")\n viewer = quad3d_viewer.Quad3dViewer()\n check_viewer(viewer, argv=argv)\n\n def test_unicycle1_viewer(self):\n argv = [\n \"--env\",\n base_path + \"envs/unicycle1_v0/bugtrap_0.yaml\",\n \"--result\",\n base_path + \"envs/unicycle1_v0/motions/guess_bugtrap_0_sol0.yaml\",\n ]\n viewer = unicycle1_viewer.Unicycle1Viewer()\n if visualize:\n argv.append(\"-i\")\n check_viewer(viewer, argv=argv)\n\n def test_unicycle2_viewer(self):\n argv = [\n \"--env\",\n base_path + \"envs/unicycle2_v0/parallelpark_0.yaml\",\n \"--result\",\n base_path + \"envs/unicycle2_v0/trajectories/guess_parallelpark_0_sol0.yaml\",\n ]\n if visualize:\n argv.append(\"-i\")\n viewer = unicycle2_viewer.Unicycle2Viewer()\n check_viewer(viewer, argv=argv)\n\n def test_quad2d_viewer(self):\n argv = [\n \"--env\",\n base_path + \"envs/multirotor2d_v0/quad2d_recovery_obs.yaml\",\n \"--result\",\n base_path\n + \"envs/multirotor2d_v0/trajectories/quad2d_recovery_good_init_guess.yaml\",\n ]\n if visualize:\n argv.append(\"-i\")\n\n viewer = quad2d_viewer.Quad2dViewer()\n check_viewer(viewer, argv=argv)\n\n def test_acrobot_viewer(self):\n argv = [\n \"--env\",\n base_path + \"envs/acrobot_v0/swing_up_empty.yaml\",\n \"--result\",\n base_path + \"envs/acrobot_v0/trajectories/swing_up_empty_init_guess.yaml\",\n ]\n if visualize:\n argv.append(\"-i\")\n viewer = acrobot_viewer.AcrobotViewer()\n check_viewer(viewer, argv=argv)\n\n def test_car_with_trailer(self):\n argv = [\n \"--env\",\n base_path + \"envs/car1_v0/bugtrap_0.yaml\",\n \"--result\",\n base_path + \"envs/car1_v0/trajectories/guess_bugtrap_0_sol0.yaml\",\n ]\n if visualize:\n argv.append(\"-i\")\n viewer = car_with_trailer_viewer.CarWithTrailerViewer()\n check_viewer(viewer, argv=argv)\n\n\n# Run without visualization\n# python3 ../utils/viewer/viewer_test.py\n\n# Run with visualization\n# VISUALIZE=1 python3 ../utils/viewer/viewer_test.py\n\nif __name__ == \"__main__\":\n visualize = bool(os.environ.get(\"VISUALIZE\", False))\n unittest.main()\n","repo_name":"quimortiz/dynobench","sub_path":"utils/viewer/viewer_test.py","file_name":"viewer_test.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"34762850009","text":"# In[74]:\nimport numpy as np\nimport tensorflow as tf\nprint(tf.__version__)\n\n# In[74]:\nfrom tflite_model_maker import configs\nfrom tflite_model_maker import ExportFormat\nfrom tflite_model_maker import model_spec\nfrom tflite_model_maker import image_classifier\nfrom tflite_model_maker.image_classifier import DataLoader\nassert tf.__version__.startswith('2')\n\n# In[74]:\ntf.get_logger().setLevel('ERROR')\n#data_path = tf.keras.utils.get_file('flower_photos','https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',untar=True)\ndata_path='F:\\\\Pneumonia\\\\chest_xray\\\\chest_xray\\\\train'\n#data_path='/mnt/f/Pneumonia/chest_xray/chest_xray/train'\ndata = DataLoader.from_folder(data_path)\ntrain_data, test_data = data.split(0.5)\nmodel = image_classifier.create(train_data)\nprint(model)\nloss, accuracy = model.evaluate(test_data)\nprint(loss,accuracy)\n\ndata_path='F:\\\\Pneumonia\\\\chest_xray\\\\chest_xray\\\\test'\n#data_path='/mnt/f/Pneumonia/chest_xray/chest_xray/test'\ndata = DataLoader.from_folder(data_path)\ntrain_data, test_data = data.split(0.5)\nprint(train_data, test_data)\nloss, accuracy = model.evaluate(test_data)\nprint(loss,accuracy)\n\nloss, accuracy = model.evaluate(train_data)\n\nprint(loss,accuracy)\n\n\n# In[74]:\n\n\nmodel.export(export_dir='.')\n\n\n# In[75]:\n\n\nmodel.evaluate_tflite('model.tflite', test_data)\n\n\n# In[72]:\n\n\nimport pickle\nfilenaM = \"tfliteModel.pkl\"\nwith open(filenaM, 'wb') as file:\n pickle.dump(model, file)\n","repo_name":"animesh/scripts","sub_path":"tfPneumo.py","file_name":"tfPneumo.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"24571642436","text":"import safety_gym\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import MultivariateNormal\nimport torch.nn.functional as F\nimport gym\nimport torch.optim as optim\nimport random\nimport numpy as np\nimport json\nfrom IPython.display import clear_output\nimport matplotlib.pyplot as plt\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass ReplayBuffer:\n def __init__(self, capacity):\n self.capacity = capacity\n self.buffer = []\n self.position = 0\n \n def push(self, state, action, reward, next_state, done):\n if len(self.buffer) < self.capacity:\n self.buffer.append(None)\n self.buffer[self.position] = (state, action, reward, next_state, done)\n self.position = int((self.position + 1) % self.capacity) # as a ring buffer\n \n def sample(self, batch_size):\n batch = random.sample(self.buffer, batch_size)\n state, action, reward, next_state, done = map(np.stack, zip(*batch)) # stack for each element\n ''' \n the * serves as unpack: sum(a,b) <=> batch=(a,b), sum(*batch) ;\n zip: a=[1,2], b=[2,3], zip(a,b) => [(1, 2), (2, 3)] ;\n the map serves as mapping the function on each list element: map(square, [2,3]) => [4,9] ;\n np.stack((1,2)) => array([1, 2])\n '''\n return state, action, reward, next_state, done\n \n def __len__(self):\n return len(self.buffer)\n\nclass CEM():\n ''' \n cross-entropy method, as optimization of the action policy \n '''\n def __init__(self, theta_dim, ini_mean_scale=0.0, ini_std_scale=1.0):\n self.theta_dim = theta_dim\n self.initialize(ini_mean_scale=ini_mean_scale, ini_std_scale=ini_std_scale)\n\n def initialize(self, ini_mean_scale=0.0, ini_std_scale=1.0):\n self.mean = ini_mean_scale*np.ones(self.theta_dim)\n self.std = ini_std_scale*np.ones(self.theta_dim)\n \n def sample(self):\n # theta = self.mean + np.random.randn(self.theta_dim) * self.std\n theta = self.mean + np.random.normal(size=self.theta_dim) * self.std\n return theta\n\n def sample_multi(self, n):\n theta_list=[]\n for i in range(n):\n theta_list.append(self.sample())\n return np.array(theta_list)\n\n\n def update(self, selected_samples):\n self.mean = np.mean(selected_samples, axis = 0)\n # print('mean: ', self.mean)\n self.std = np.std(selected_samples, axis = 0) # plus the entropy offset, or else easily get 0 std\n # print('std: ', self.std)\n\n return self.mean, self.std\n\n\nclass QNetwork(nn.Module):\n def __init__(self, input_dim, hidden_dim, init_w=3e-3):\n super(QNetwork, self).__init__()\n \n self.linear1 = nn.Linear(input_dim, hidden_dim)\n self.linear2 = nn.Linear(hidden_dim, hidden_dim)\n self.linear3 = nn.Linear(hidden_dim, 1)\n \n self.linear3.weight.data.uniform_(-init_w, init_w)\n self.linear3.bias.data.uniform_(-init_w, init_w)\n \n def forward(self, state, action):\n x = torch.cat([state, action], 1) # the dim 0 is number of samples\n x = F.relu(self.linear1(x))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x\n\nclass QT_Opt():\n def __init__(self, state_dim, action_dim, hidden_dim, replay_buffer, q_lr=3e-4, cem_update_itr=4, select_num=6, num_samples=64):\n self.num_samples = num_samples\n self.select_num = select_num\n self.cem_update_itr = cem_update_itr\n self.replay_buffer = replay_buffer\n self.qnet = QNetwork(state_dim+action_dim, hidden_dim).to(device) # gpu\n self.target_qnet1 = QNetwork(state_dim+action_dim, hidden_dim).to(device)\n self.target_qnet2 = QNetwork(state_dim+action_dim, hidden_dim).to(device)\n self.cem = CEM(theta_dim = action_dim) # cross-entropy method for updating\n\n self.q_optimizer = optim.Adam(self.qnet.parameters(), lr=q_lr)\n self.step_cnt = 0\n\n def update(self, batch_size, gamma=0.9, soft_tau=1e-2, update_delay=100):\n state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)\n self.step_cnt+=1\n\n \n state_ = torch.FloatTensor(state).to(device)\n next_state_ = torch.FloatTensor(next_state).to(device)\n action = torch.FloatTensor(action).to(device)\n reward = torch.FloatTensor(reward).unsqueeze(1).to(device) # reward is single value, unsqueeze() to add one dim to be [reward] at the sample dim;\n done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(device)\n\n predict_q = self.qnet(state_, action) # predicted Q(s,a) value\n\n # get argmax_a' from the CEM for the target Q(s', a')\n new_next_action = []\n for i in range(batch_size): # batch of states, use them one by one, to prevent the lack of memory\n new_next_action.append(self.cem_optimal_action(next_state[i]))\n new_next_action=torch.FloatTensor(new_next_action).to(device)\n\n target_q_min = torch.min(self.target_qnet1(next_state_, new_next_action), self.target_qnet2(next_state_, new_next_action))\n target_q = reward + (1-done)*gamma*target_q_min\n\n q_loss = ((predict_q - target_q.detach())**2).mean() # MSE loss, note that original paper uses cross-entropy loss\n self.q_optimizer.zero_grad()\n q_loss.backward()\n self.q_optimizer.step()\n\n # update the target nets, according to original paper:\n # one with Polyak averaging, another with lagged/delayed update\n self.target_qnet1=self.target_soft_update(self.qnet, self.target_qnet1, soft_tau)\n self.target_qnet2=self.target_delayed_update(self.qnet, self.target_qnet2, update_delay)\n \n\n\n def cem_optimal_action(self, state):\n ''' evaluate action wrt Q(s,a) to select the optimal using CEM '''\n cuda_states = torch.FloatTensor(np.vstack([state]*self.num_samples)).to(device)\n self.cem.initialize() # every time use a new cem, cem is only for deriving the argmax_a'\n for itr in range(self.cem_update_itr):\n actions = self.cem.sample_multi(self.num_samples)\n q_values = self.target_qnet1(cuda_states, torch.FloatTensor(actions).to(device)).detach().cpu().numpy().reshape(-1) # 2 dim to 1 dim\n max_idx=q_values.argsort()[-1] # select one maximal q\n idx = q_values.argsort()[-int(self.select_num):] # select top maximum q\n selected_actions = actions[idx]\n _,_=self.cem.update(selected_actions)\n optimal_action = actions[max_idx]\n return optimal_action\n \n\n def target_soft_update(self, net, target_net, soft_tau):\n ''' Soft update the target net '''\n for target_param, param in zip(target_net.parameters(), net.parameters()):\n target_param.data.copy_( # copy data value into target parameters\n target_param.data * (1.0 - soft_tau) + param.data * soft_tau\n )\n\n return target_net\n\n def target_delayed_update(self, net, target_net, update_delay):\n ''' delayed update the target net '''\n if self.step_cnt%update_delay == 0:\n for target_param, param in zip(target_net.parameters(), net.parameters()):\n target_param.data.copy_( # copy data value into target parameters\n param.data \n )\n\n return target_net\n\n def save_model(self, path):\n torch.save(self.qnet.state_dict(), path)\n torch.save(self.target_qnet1.state_dict(), path)\n torch.save(self.target_qnet2.state_dict(), path)\n\n def load_model(self, path):\n self.qnet.load_state_dict(torch.load(path))\n self.target_qnet1.load_state_dict(torch.load(path))\n self.target_qnet2.load_state_dict(torch.load(path))\n self.qnet.eval()\n self.target_qnet1.eval()\n self.target_qnet2.eval()\n\ndef plot(rewards):\n clear_output(True)\n plt.figure(figsize=(20,5))\n # plt.subplot(131)\n plt.plot(rewards)\n plt.savefig('qt_opt_v2.png')\n # plt.show()\n \ndef main():\n ############## Hyperparameters ##############\n env_name = \"Safexp-PointGoal1-v0\"\n render = False\n log_interval = 20 # print avg reward in the interval\n max_episodes = 1000000 # max training episodes\n max_timesteps = 1000 # max timesteps in one episode\n batch_size = 128\n update_timestep = 500 # update policy every n timesteps\n hidden_dim = 512\n lr = 0.0003 # parameters for Adam optimizer\n random_seed = None\n data_path = f'./data/qtopt_{env_name}'\n log_path = f'./data/{env_name}_qtopt.json'\n #############################################\n \n # creating environment\n env = gym.make(env_name)\n state_dim = env.observation_space.shape[0]\n action_dim = env.action_space.shape[0]\n \n if random_seed:\n print(\"Random Seed: {}\".format(random_seed))\n torch.manual_seed(random_seed)\n env.seed(random_seed)\n np.random.seed(random_seed)\n \n replay_buffer_size = 5e5\n replay_buffer = ReplayBuffer(replay_buffer_size)\n model = QT_Opt(state_dim, action_dim, hidden_dim, replay_buffer, lr)\n \n # logging variables\n avg_length = 0\n time_step = 0\n running_reward = 0\n running_cost = 0\n log = {'time_step':[], 'avg_length': [], 'running_reward': [], 'running_cost': []}\n # training loop\n for i_episode in range(1, max_episodes+1):\n state = env.reset()\n for t in range(max_timesteps):\n time_step +=1\n # Running policy_old:\n action = model.cem_optimal_action(state)\n next_state, reward, done, info = env.step(action)\n\n replay_buffer.push(state, action, reward, next_state, done)\n state = next_state\n\n # update if its time\n if time_step % update_timestep == 0 and len(replay_buffer) > batch_size:\n model.update(batch_size)\n model.save_model(data_path)\n running_reward += reward\n running_cost += info['cost']\n if render:\n env.render()\n if done:\n break\n \n avg_length += t\n \n # logging\n if i_episode % log_interval == 0:\n avg_length = int(avg_length/log_interval)\n running_reward = running_reward/log_interval\n running_cost = running_cost/log_interval\n log['time_step'].append(time_step)\n log['avg_length'].append(avg_length)\n log['running_reward'].append(running_reward)\n log['running_cost'].append(running_cost)\n json.dump(log, open(log_path, 'w'))\n\n print('Episode {} \\t Avg length: {} \\t Avg reward: {:.3f} \\t Avg cost: {}'.format(i_episode, avg_length, running_reward, running_cost))\n running_reward = 0\n running_cost = 0\n avg_length = 0\n \n if i_episode% 10==0:\n plot(running_reward)\n\nif __name__ == '__main__':\n main()\n ","repo_name":"quantumiracle/safety_rl","sub_path":"qtopt_v3_safety_gym.py","file_name":"qtopt_v3_safety_gym.py","file_ext":"py","file_size_in_byte":11089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30240962699","text":"from random import randint, shuffle\n\nfrom django.core.files.images import ImageFile\nfrom logbooks.models.pages import StoryPage\nfrom smartforests.models import CmsImage\nfrom django.core.management.base import BaseCommand\nfrom django.core.files.temp import NamedTemporaryFile\nfrom django.db import transaction\nfrom wagtail.core.rich_text import RichText\nfrom commonknowledge.wagtail.helpers import get_children_of_type\nfrom datetime import datetime\n\nfrom faker import Faker, providers\nimport requests\n\nfrom logbooks.models import LogbookIndexPage, LogbookPage, StoryIndexPage, LogbookEntryPage\n\n\nclass Command(BaseCommand):\n help = 'Seed the forest'\n\n def add_arguments(self, parser):\n parser.add_argument('-ss', '--storysize', dest='storysize', type=int,\n help='Control the size distribution of stories', default=3)\n\n parser.add_argument('-l', '--logbooks', dest='logbooks', type=int,\n help='How many logbooks', default=10)\n\n parser.add_argument('-s', '--stories', dest='stories', type=int,\n help='How many stories', default=100)\n\n parser.add_argument('-t', '--tags', dest='tags', type=int,\n help='How many tags', default=30)\n\n parser.add_argument('--tags_per_logbook', dest='tags_per_logbook', type=int,\n help='How many tags per logbook', default=4)\n\n parser.add_argument('--tags_per_story', dest='tags_per_story', type=int,\n help='How many tags per story', default=3)\n\n @transaction.atomic\n def handle(self, *args, **options):\n fake = Faker()\n\n def random_distribution():\n # Get a nice distribition for a story length\n return max(1, int(randint(0, options.get('storysize')) ** 2 / 5), max(3, options.get('storysize')))\n\n # https://faker.readthedocs.io/en/master/providers.html\n fake.add_provider(providers.internet)\n fake.add_provider(providers.lorem)\n fake.add_provider(providers.misc)\n\n tags = [\n fake.word()\n for _ in range(options.get('tags'))\n ]\n\n def get_image(seed):\n title = f'example_{seed}'\n\n try:\n return CmsImage.objects.get(title=title)\n except CmsImage.DoesNotExist:\n image_temp_file = NamedTemporaryFile(delete=True)\n\n width = 600\n height = randint(5, 10) * 100\n\n res = requests.get(\n f'https://picsum.photos/{width}/{height}.jpg', stream=True)\n\n # Write the in-memory file to the temporary file\n # Read the streamed image in sections\n for block in res.iter_content(1024 * 8):\n\n # If no more file then stop\n if not block:\n break # Write image block to temporary file\n image_temp_file.write(block)\n\n image = CmsImage(\n title=title, alt_text=fake.sentence(), width=width, height=height, file=ImageFile(image_temp_file))\n image.save()\n return image\n\n def apply_tags(x, count=5):\n shuffle(tags)\n selected_tags = tags[:count]\n x.tags.set(*selected_tags)\n\n block_generators = {\n 'text': lambda: RichText(f'

{\" \".join(fake.paragraphs(4))}

'),\n 'quote': lambda: {\n 'text': RichText(f'

{fake.sentence()}

'),\n 'author': fake.name(),\n 'title': fake.sentence(),\n 'date': datetime.now(),\n 'link': '/'\n },\n 'image': lambda: {\n 'image': get_image(randint(100, 200)),\n 'caption': fake.sentence()\n }\n }\n\n def generate_story_block():\n keys = list(block_generators.keys())\n shuffle(keys)\n type = keys[0]\n\n return (type, block_generators[type]())\n\n def populate_logbook(logbook: LogbookPage):\n apply_tags(logbook, options.get('tags_per_logbook'))\n\n if logbook.is_leaf():\n for _ in range(random_distribution()):\n entry = LogbookEntryPage(title=fake.sentence())\n entry.body = [\n generate_story_block(), generate_story_block()\n ]\n apply_tags(logbook, options.get('tags_per_story'))\n logbook.add_child(instance=entry)\n\n logbook.save()\n\n def populate_story(story: LogbookEntryPage):\n story.body = [generate_story_block()\n for _ in range(random_distribution())]\n apply_tags(story, options.get('tags_per_story'))\n\n story.save()\n\n for index in LogbookIndexPage.objects.all():\n if index.is_leaf():\n for _ in range(options.get('logbooks')):\n logbook = LogbookPage(\n title=fake.sentence(),\n description=fake.paragraph(),\n first_published_at=fake.past_datetime(\n start_date='-60d')\n )\n index.add_child(instance=logbook)\n populate_logbook(logbook)\n else:\n for logbook in get_children_of_type(index, LogbookPage):\n populate_logbook(logbook)\n\n for index in StoryIndexPage.objects.all():\n if index.is_leaf():\n for _ in range(options.get('stories')):\n story = StoryPage(\n title=fake.sentence(),\n first_published_at=fake.past_datetime(\n start_date='-60d')\n )\n index.add_child(instance=story)\n populate_story(story)\n else:\n for story in get_children_of_type(index, LogbookEntryPage):\n populate_story(story)\n","repo_name":"planetarypraxis/smartforests","sub_path":"smartforests/management/commands/seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8715849134","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\n @author: Pan M. CHU\n @Email: pan_chu@outlook.com\n\"\"\"\n# %%\n# Built-in/Generic Imports\nimport os\nimport sys\n# […]\n\n# Libs\nimport time\n\nimport pandas as pd\nimport numpy as np # Or any other\n# […]\n\n# Own modules\n\n\nfrom scipy.stats import binned_statistic, linregress\nimport matplotlib.pyplot as plt\nimport sciplot as splt\nfrom RNA_seq_analyzer import RNASeqAnalyzer\nfrom seq_utility import BAMile, base_position2relative_pos\nfrom tqdm import tqdm\nimport _thread as thread\nimport subprocess as sbp\n\nsplt.whitegrid()\n\nglobal_lock = thread.allocate_lock()\n\n\ndef deep_seq_pip(sample_name, ref_ps, reads, ori_site, ter_site, bin_length, export_dir, index) -> int:\n \"\"\"\n\n Parameters\n ----------\n sample_name : string\n sample name\n ref_ps : string\n reference genome path\n reads : str or list\n\n ori_site : int\n location of oriC\n bin_length : int\n binned size\n export_dir : str\n save data dir\n\n Returns\n -----------\n None\n\n \"\"\"\n if reads is str:\n read1, read2 = reads, None\n else:\n read1, read2 = reads\n sample = RNASeqAnalyzer(sample_name=sample_name, ref_ps=ref_ps, gff_ps=None,\n seq_ps1=read1, seq_ps2=read2, bowtie_pars={\"-p\": 64}, output_dir=export_dir)\n sample.seq_data_align()\n\n def coverage_process(sample: RNASeqAnalyzer, ori_site, ter_site, bin_length, index):\n \"\"\"\n\n Parameters\n ----------------\n sample: object\n RNASeqAnalyzer object\n ori_site: int\n the location of oriC\n bin_length: int\n\n\n \"\"\"\n thread_state[index] = True\n bam_file = BAMile(sample.bam_sorted_ps, sample.gff_ps, sample.reference_file_path,\n paired_flag=sample.paired_flag)\n bam_file.separate_bam_by_strand(clean_rtRNA=False)\n bam_file.count_coverage()\n # coverage = bam_file.fetch_coverage(bam_file.genome_set[0], ori_site, ori_site - 1, move_average=150)\n coverage = bam_file.fetch_coverage(bam_file.genome_set[0], move_average=150)\n\n genome_length = len(bam_file.genomes[bam_file.genome_set[0]])\n coverage_binned = binned_statistic(np.arange(len(coverage)), coverage, 'mean',\n bins=int(genome_length / bin_length))\n\n coverage_binned_mean = coverage_binned.statistic\n covg_binned_base_pos = coverage_binned.bin_edges\n genome_index = np.array([np.int(np.mean([covg_binned_base_pos[i], covg_binned_base_pos[i+1]]))\n for i in range(len(covg_binned_base_pos)-1)])\n relative_pos = np.array([base_position2relative_pos(index, genome_length, ori_site, ter_site)[0]\n for index in genome_index])\n\n # zero_index = round(len(coverage_binned_mean) / 2)\n # coverage_binned_mean = np.roll(coverage_binned_mean, round(zero_index))\n #\n # left_pos = np.linspace(-1, 0, num=zero_index, endpoint=False)\n # right_pos = np.linspace(0, 1, num=(len(coverage_binned_mean) - zero_index), endpoint=True)\n # relative_pos = np.concatenate([left_pos, right_pos])\n\n # genome_index = np.arange(1, genome_length)\n # genome_index = np.roll(genome_index, genome_length - ori_site)[::bin_length][:-1]\n inf_filter = coverage_binned_mean > 0\n log2_coverage = np.zeros(len(coverage_binned_mean))\n log2_coverage[inf_filter] = np.log2(coverage_binned_mean[inf_filter])\n\n # Export statistic data\n data_exp = pd.DataFrame(data=dict(Relative_position=relative_pos,\n genome_position=genome_index,\n Count=coverage_binned_mean,\n Log2_count=log2_coverage))\n data_exp.to_csv(os.path.join(sample.output_dir, f'{sample_name}_depth_statistic.csv'))\n\n # Plot the coverage\n # filter for two arms of chromosome\n x_fliter = relative_pos >= 0\n filter = np.logical_and(x_fliter, inf_filter)\n x_fliter2 = relative_pos <= 0\n filter2 = np.logical_and(x_fliter2, inf_filter)\n filters = [filter, filter2]\n fig1, ax2 = plt.subplots(1, 1, figsize=(12, 12))\n results = []\n\n for flt in filters:\n ret = linregress(relative_pos[flt], np.log2(coverage_binned_mean[flt]))\n results.append(ret)\n ax2.scatter(relative_pos[flt], np.log2(coverage_binned_mean[flt]), c='#85C1E9')\n ax2.plot(relative_pos[flt], ret.intercept + ret.slope * relative_pos[flt],\n '--', label='Slope: %.3f' % ret.slope, c='#F1948A')\n\n ax2.set_title('%s Average Slope: %.3f' %\n (sample.sample_name, np.mean([np.abs(ret.slope) for ret in results])),\n pad=12)\n ax2.set_ylabel('$\\mathrm{log}_{2}X_c$', labelpad=7)\n ax2.set_xlabel('$m^{\\prime}$', labelpad=7)\n ax2.legend()\n global_lock.acquire()\n fig1.savefig(os.path.join(sample.output_dir, f'{sample_name}_depth_statistic.svg'), transparent=True)\n global_lock.release()\n print(f'[{sample.sample_name}] -> Successful!')\n thread_state[index] = False\n return None\n\n t_id = thread.start_new_thread(coverage_process, (sample, ori_site, ter_site, bin_length, index))\n return t_id\n\n\n# %%\nif __name__ == '__main__':\n # %%\n parent_dir = r'/media/fulab/fulab_zc_1/seq_data/LLW_data/20220322_17_sample_data/17_sample/soapnuke/clean'\n ref_ps = '/media/fulab/fulab_zc_1/seq_data/Genome_ref/1655_genome_Liu_lab_20220322.fa'\n exp_dir = r'/media/fulab/fulab_zc_1/seq_data/LLW_data/20220322_17_sample_data/17_sample_deep_seq_results'\n ori_site = 1 # 3925859\n ter_site = 2305111\n bin_length = 5000\n\n sample_dir = [fold.name for fold in os.scandir(parent_dir) if fold.is_dir()]\n sample_msg = {}\n\n for dir in tqdm(sample_dir, desc=f'[Dir Scanning]'):\n reads = [os.path.join(parent_dir, dir, fa_file.name)\n for fa_file in os.scandir(os.path.join(parent_dir, dir))\n if fa_file.name.split('.')[-1] == 'gz']\n sample_msg[dir] = reads\n\n thread_state = [False] * len(list(sample_msg.keys()))\n\n for index, (sample, reads) in enumerate(tqdm(sample_msg.items())):\n th_id = deep_seq_pip(sample, ref_ps, reads, ori_site, ter_site, bin_length, exp_dir, index)\n\n while True in thread_state:\n time.sleep(5)\n\n output_dirs = [os.path.join(exp_dir, dir.name)\n for dir in os.scandir(exp_dir)\n if dir.name.split('_')[-1] == 'output' and dir.is_dir()]\n rets_file = []\n for dir in output_dirs:\n rets_flies_list = [os.path.join(dir, file.name)\n for file in os.scandir(dir)\n if file.name.split('.')[-1] in ['csv', 'svg']]\n rets_file += rets_flies_list\n\n all_rets_ps = os.path.join(exp_dir, 'all_rests')\n try:\n os.mkdir(all_rets_ps)\n except FileExistsError:\n pass\n\n cmd = f\"cp {' '.join(rets_file)} {all_rets_ps}\"\n\n sbp.run(cmd, shell=True)\n\n\n\n#%%\n # # deep_seq_pip('WT', ref_ps, sample_msg['WT'], ori_site, bin_length, exp_dir, 0)\n # sample_name = 'WT'\n # ref_ps = '/media/fulab/Fu_lab_data1/seq_data/20211101_dnaA_datA/1655_genome_Liu_lab.fa'\n # exp_dir = '/media/fulab/Fu_lab_data1/seq_data/20211101_dnaA_datA/dnaAdatA/'\n # sample = RNASeqAnalyzer(sample_name=sample_name, ref_ps=ref_ps, gff_ps=None,\n # seq_ps1=sample_msg['WT'][0], seq_ps2=sample_msg['WT'][1], bowtie_pars={\"-p\": 32},\n # output_dir=exp_dir)\n # sample.seq_data_align()\n #\n # bam_file = BAMile(sample.bam_sorted_ps, sample.gff_ps, sample.reference_file_path,\n # paired_flag=sample.paired_flag)\n # bam_file.separate_bam_by_strand(clean_rtRNA=False)\n # bam_file.count_coverage()\n # coverage = bam_file.fetch_coverage(bam_file.genome_set[0], ori_site, ori_site - 1, move_average=150)\n #\n # genome_length = len(bam_file.genomes[bam_file.genome_set[0]])\n # coverage_binned = binned_statistic(np.arange(len(coverage)), coverage, 'mean',\n # bins=int(genome_length / bin_length))\n #\n # coverage_binned_mean = coverage_binned.statistic\n # zerio_index = round(len(coverage_binned_mean) / 2)\n # coverage_binned_mean = np.roll(coverage_binned_mean, round(zerio_index))\n #\n # left_pos = np.linspace(-1, 0, num=zerio_index, endpoint=False)\n # right_pos = np.linspace(0, 1, num=(len(coverage_binned_mean) - zerio_index), endpoint=True)\n # relative_pos = np.concatenate([left_pos, right_pos])\n #\n # genome_index = np.arange(1, genome_length)\n # genome_index = np.roll(genome_index, genome_length - ori_site)[::bin_length][:-1]\n # inf_filter = coverage_binned_mean > 0\n # log2_coverage = np.zeros(len(coverage_binned_mean))\n # log2_coverage[inf_filter] = np.log2(coverage_binned_mean[inf_filter])","repo_name":"MinTTT/RNA_seq_pip","sub_path":"Deep_seq_for_C.py","file_name":"Deep_seq_for_C.py","file_ext":"py","file_size_in_byte":9078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3218699637","text":"from collections import defaultdict, Counter\nfrom math import log10\nfrom asap.core import Base, SparseVector, DenseVector\nimport numpy as np\n\n\nclass Tokenizer(Base):\n\n def __init__(self, sep=' ', key='tokens'):\n super().__init__(key)\n self._sep = sep\n\n def process(self, instance):\n tokens = instance.text.split(self._sep)\n instance.add_feature(self.key, tokens)\n return instance\n\n\nclass TFIDF(Base):\n\n def __init__(self, topk=None, idf=None, token_map=None, key='tfidf'):\n super().__init__(key, True)\n\n self._topk = topk\n self._idf = idf\n self._token_index_map = token_map\n\n def process(self, instance):\n term_counts = Counter(instance.get_feature('tokens'))\n vector = SparseVector(len(self._token_index_map))\n for tok in term_counts.elements():\n if tok in self._token_index_map:\n vector[self._token_index_map[tok]] = term_counts[tok] * self._idf[tok]\n instance.add_feature(self.key, vector)\n\n return instance\n\n def train(self, instances):\n \"\"\"\n\n :Instance instances: list of instances to train with\n \"\"\"\n assert isinstance(instances, list)\n\n total_docs = float(len(instances))\n # Gather all token occurrences in documents\n term_docs = defaultdict(set)\n for inst in instances:\n for tok in inst.get_feature('tokens'):\n term_docs[tok].add(inst.id)\n\n # Calculate IDF and TFIDF value for each token and sort by TFIDF value (high to low)\n idf_vals = {}\n tfidf_vals = []\n for tok in term_docs.keys():\n idf_vals[tok] = log10(total_docs / (len(term_docs[tok]) + 1))\n tfidf_vals.append((tok, (len(term_docs[tok]) + 1) * idf_vals[tok]))\n tfidf_vals.sort(key=lambda x: x[1], reverse=True)\n\n # Assign cutoff point\n if self._topk is not None:\n limit = self._topk\n else:\n limit = len(idf_vals)\n\n # Save IDF values and vector indices for values that make the cut\n idf = {}\n index_map = {}\n for i, tup in enumerate(tfidf_vals[:limit]):\n idf[tup[0]] = idf_vals[tup[0]]\n index_map[tup[0]] = i\n\n self._idf = idf\n self._token_index_map = index_map\n\n\nclass BagOfWords(Base):\n\n def __init__(self, min_occur=1, key='bow'):\n super().__init__(key, True)\n\n assert isinstance(min_occur, int)\n self._min_occur = min_occur\n self._bow_lookup = None\n\n def process(self, instance):\n vector = SparseVector(len(self._bow_lookup))\n for token in instance.get_feature('tokens'):\n if token in self._bow_lookup:\n vector[self._bow_lookup[token]] = 1\n\n instance.add_feature(self.key, vector)\n\n return instance\n\n def train(self, instances):\n assert isinstance(instances, list)\n\n # Count token occurrences\n counter = Counter()\n for inst in instances:\n counter.update(inst.get_feature('tokens'))\n\n self._bow_lookup = {}\n n = 0\n for token, count in counter.most_common():\n if count >= self._min_occur:\n self._bow_lookup[token] = n\n n += 1\n else:\n break\n\n\nclass NN_BagOfWords(Base):\n\n def __init__(self, min_occur=1, chunk_len=10, step_size=5, key='nn-bow'):\n super().__init__(key, True)\n\n assert isinstance(min_occur, int)\n self._min_occur = min_occur\n self._chunk_len = chunk_len\n self._step_size = step_size\n self._bow_lookup = None\n self._bow_size = 0\n\n def process(self, instance):\n # Get list of words that are in the bag\n in_bag = [self._bow_lookup[token] for token in instance.get_feature('tokens') if token in self._bow_lookup]\n\n # Chunk the sentence and in order to provide constant length inputs\n chunks = []\n for i in range(0, max(len(in_bag) - self._step_size, 1), self._step_size):\n indices = in_bag[i:i + self._chunk_len]\n vecs = []\n # Create one hot vectors for each word\n for index in indices:\n v = np.zeros(self._bow_size)\n v[index] = 1\n vecs.append(v)\n if len(vecs) < self._chunk_len:\n for i in range(self._chunk_len - len(vecs)):\n vecs.append(np.zeros(self._bow_size))\n chunks.append(vecs)\n\n # Place lists in the instance\n instance.add_feature(self.key, DenseVector(contents=chunks))\n return instance\n\n def train(self, instances):\n # Count token occurrences\n counter = Counter()\n for inst in instances:\n counter.update(inst.get_feature('tokens'))\n\n self._bow_lookup = {}\n n = 0\n for token, count in counter.most_common():\n if count >= self._min_occur:\n self._bow_lookup[token] = n\n n += 1\n else:\n break\n self._bow_size = len(self._bow_lookup)\n\n\nclass UniqueWordCount(Base):\n \"\"\"Count the unique words in an instance's text.\"\"\"\n\n def __init__(self, key=\"unique-word-count\"):\n super().__init__(key)\n\n def process(self, instance):\n instance.add_feature(self.key, len(set(instance.get_feature(\"tokens\").to_list())))\n return instance\n\n\nclass WordCount(Base):\n \"\"\"Coutn all of the words (tokens) in an instance's text.\"\"\"\n\n def __init__(self, key=\"word-count\"):\n super().__init__(key)\n\n def process(self, instance):\n instance.add_feature(self.key, len(instance.get_feature(\"tokens\")))\n return instance\n\n\nclass CharacterCount(Base):\n \"\"\"Count all of the characters in an instance's text.\"\"\"\n\n def __init__(self, key=\"char-count\"):\n super().__init__(key)\n\n def process(self, instance):\n instance.add_feature(self.key, len(instance.text))\n return instance\n\n\nclass NonWhitespaceCharacterCount(Base):\n \"\"\"Count all of the non-whitespace characters in an instance's text.\"\"\"\n\n def __init__(self, key=\"!white-char-count\"):\n super().__init__(key)\n\n def process(self, instance):\n instance.add_feature(self.key, len(instance.text.replace(' ', '')))\n return instance\n\n\nclass ContainsWords(Base):\n \"\"\"Create word presence vector based on a list of words.\"\"\"\n\n def __init__(self, word_list_path, key='word-presence'):\n super().__init__(key)\n self._words = {}\n with open(word_list_path) as f:\n for i, line in enumerate(f.readlines()):\n self._words[line.strip().lower()] = i\n\n def process(self, instance):\n vec = SparseVector(len(self._words))\n for tok in instance.get_feature('tokens'):\n if tok in self._words:\n vec[self._words[tok]] = 1\n instance.add_feature(self.key, vec)\n return instance\n","repo_name":"CollinM/asap-sas","sub_path":"asap/core/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70573873795","text":"import boto3\nfrom flask import json\nfrom flask import render_template, redirect, url_for, abort, flash, request, \\\n current_app, make_response\nfrom flask_login import login_required, current_user\nfrom flask_sqlalchemy import get_debug_queries\nfrom sqlalchemy import text\n\nfrom . import main\nfrom .forms import EditProfileForm, EditProfileAdminForm, SchoolForm, \\\n UserForm, EditSchoolForm, AssetForm, DeleteUserForm, DeleteSchoolForm, \\\n ChangePasswordAdminForm, GameDataForm, DeleteAssetForm, DeleteUserStudentsForm, \\\n BatchUsersForm\nfrom .. import db, redis_store\nfrom ..decorators import admin_required\nfrom ..models import Role, User, School, Permission, Score, Asset, GameData, UserSchool\n\n\n@main.after_app_request\ndef after_request(response):\n for query in get_debug_queries():\n if query.duration >= current_app.config['BACKEND_SLOW_DB_QUERY_TIME']:\n current_app.logger.warning(\n 'Slow query: %s\\nParameters: %s\\nDuration: %fs\\nContext: %s\\n'\n % (query.statement, query.parameters, query.duration,\n query.context))\n return response\n\n\n@main.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')\n\n\n@main.route('/user/')\n@login_required\ndef user(username):\n user = User.query.filter_by(username=username).first_or_404()\n return render_template('user.html', user=user)\n\n\n@main.route('/school/', methods=['GET', 'POST'])\n@login_required\ndef school(id):\n school = School.query.filter_by(id=id).first_or_404()\n return render_template('school.html', school=school)\n\n\n@main.route('/schools', methods=['GET', 'POST'])\n@login_required\ndef schools():\n form = SchoolForm()\n if current_user.can(Permission.CREATE_SCHOOLS) and form.validate_on_submit():\n school = School(name=form.name.data, description=form.description.data)\n db.session.add(school)\n return redirect(url_for('.index'))\n page = request.args.get('page', 1, type=int)\n if not current_user.is_administrator():\n query = School.query.join(UserSchool)\\\n .filter(UserSchool.user_id == current_user.id)\n else:\n query = School.query\n pagination = query.order_by(School.created.desc()).paginate(\n page, per_page=current_app.config['BACKEND_POSTS_PER_PAGE'],\n error_out=False)\n schools = pagination.items\n return render_template('schools.html', form=form, schools=schools, pagination=pagination)\n\n@main.route('/scores', methods=['GET'])\n@login_required\ndef scores():\n page = request.args.get('page', 1, type=int)\n query = Score.query\n pagination = query.order_by(Score.created.desc()).paginate(\n page, per_page=current_app.config['BACKEND_POSTS_PER_PAGE'],\n error_out=False)\n scores = pagination.items\n return render_template('scores.html', scores=scores, pagination=pagination)\n\n@main.route('/users', methods=['GET'])\n@login_required\ndef users():\n form = UserForm()\n if current_user.can(Permission.CREATE_USERS) and form.validate_on_submit():\n user = User()\n user.username = form.username.data\n user.password = form.password.data\n user.role_id = form.role.data\n db.session.add(user)\n return redirect(url_for('.index'))\n page = request.args.get('page', 1, type=int)\n if current_user.is_teacher():\n query = User.query.filter(User.teacher_id == current_user.id)\n else:\n query = User.query\n pagination = query.order_by(User.created.desc()).paginate(\n page, per_page=current_app.config['BACKEND_POSTS_PER_PAGE'],\n error_out=False)\n users = pagination.items\n return render_template('users.html', form=form, users=users, pagination=pagination)\n\n\n@main.route('/add-user', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef add_user():\n form = UserForm()\n if current_user.can(Permission.CREATE_USERS) and form.validate_on_submit():\n user = User()\n user.username = form.username.data\n user.password = form.password.data\n user.role = Role.query.get(form.role.data)\n user.confirmed = True\n user.enabled = True\n db.session.add(user)\n flash('The user {} has been created'.format(user.username))\n return redirect(url_for('.users'))\n return render_template('edit_profile.html', form=form)\n\n@main.route('/batch-add-users', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef batch_add_users():\n form = BatchUsersForm()\n if current_user.can(Permission.CREATE_USERS) and form.validate_on_submit():\n User.import_students_from_data(form.csv_data.data, delimiter='\\t')\n return redirect(url_for('.users'))\n return render_template('batch_add_users.html', form=form)\n\n@main.route('/change-password-admin/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef change_password_admin(id):\n user = User.query.get_or_404(id)\n form = ChangePasswordAdminForm(user=user)\n if form.validate_on_submit():\n new_password = form.new_password.data\n token = user.generate_reset_token()\n user.reset_password(token, new_password)\n return redirect(url_for('.user', username=user.username))\n return render_template('change_password_admin.html', form=form)\n\n@main.route('/delete-school/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef delete_school(id):\n school = School.query.get_or_404(id)\n form = DeleteSchoolForm(school=school)\n if form.validate_on_submit():\n db.session.delete(school)\n return redirect(url_for('.schools'))\n return render_template('delete_school.html', school=school, form=form)\n\n@main.route('/delete-user/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef delete_user(id):\n user = User.query.get_or_404(id)\n delete_students = request.args.get('delete_students')\n if delete_students == \"false\":\n delete_students = False\n else:\n delete_students = True\n if delete_students and user.is_teacher() and len(user.my_students().all()) > 0:\n return redirect(url_for('.delete_user_students', id=id))\n form = DeleteUserForm(user=user)\n if form.validate_on_submit():\n if not delete_students:\n for student in user.my_students():\n student.teacher_id = None\n db.session.add(student)\n db.session.commit()\n db.session.delete(user)\n return redirect(url_for('.users'))\n return render_template('delete_user.html', user=user, form=form)\n\n@main.route('/delete-user-students/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef delete_user_students(id):\n user = User.query.get_or_404(id)\n if not user.is_teacher() or not len(user.my_students().all()) > 0:\n return redirect(url_for('.users'))\n form = DeleteUserStudentsForm(user=user)\n if form.validate_on_submit():\n if form.delete_students.data:\n for student in user.my_students():\n db.session.delete(student)\n db.session.commit()\n return redirect(url_for('.delete_user', id=id) + '?delete_students=false')\n return render_template('delete_user_students.html', user=user, form=form)\n\n@main.route('/edit-profile', methods=['GET', 'POST'])\n@login_required\ndef edit_profile():\n form = EditProfileForm()\n if form.validate_on_submit():\n current_user.name = form.name.data\n db.session.add(current_user)\n flash('Your profile has been updated.')\n return redirect(url_for('.user', username=current_user.username))\n form.name.data = current_user.name\n return render_template('edit_profile.html', form=form)\n\n\n@main.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_profile_admin(id):\n user = User.query.get_or_404(id)\n form = EditProfileAdminForm(user=user)\n if form.validate_on_submit():\n if form.email.data == \"\":\n user.email = None\n else:\n user.email = form.email.data\n user.username = form.username.data\n user.confirmed = form.confirmed.data\n user.enabled = form.enabled.data\n user.role = Role.query.get(form.role.data)\n user.name = form.name.data\n user.add_to_schools(form.schools.data)\n if user.is_student():\n user.teacher = User.query.get(form.teacher.data)\n db.session.add(user)\n flash('The profile has been updated.')\n return redirect(url_for('.user', username=user.username))\n form.email.data = user.email\n form.username.data = user.username\n form.confirmed.data = user.confirmed\n form.enabled.data = user.enabled\n form.role.data = user.role_id\n form.name.data = user.name\n form.schools.data = [s.id for s in user.schools]\n if user.teacher and form.teacher:\n form.teacher.data = user.teacher.id\n return render_template('edit_profile.html', form=form, user=user)\n\n\n@main.route('/edit-school/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_school(id):\n school = School.query.get_or_404(id)\n form = EditSchoolForm(school=school)\n if form.validate_on_submit():\n school.name = form.name.data\n school.enabled = form.enabled.data\n school.address = form.address.data\n school.email = form.email.data\n school.description = form.description.data\n\n db.session.add(school)\n flash('The school has been updated.')\n return redirect(url_for('.school', id=school.id))\n form.name.data = school.name\n form.enabled.data = school.enabled\n form.address.data = school.address\n form.email.data = school.email\n form.description.data = school.description\n return render_template('edit_school.html', form=form, user=user)\n\n@main.route('/user-stats')\n@login_required\ndef user_stats():\n from rq import Queue\n from ..jobs import game_stats\n\n s3_bucket = current_app.config['S3_BUCKET']\n aws_region = current_app.config['AWS_REGION']\n\n q = Queue(connection=redis_store)\n\n user_role = ''\n if current_user.is_student():\n user_role = 'student'\n if current_user.is_teacher():\n user_role = 'teacher'\n if current_user.is_administrator():\n user_role = 'administrator'\n\n result = q.enqueue(game_stats.game_stats, aws_region, s3_bucket, current_user.id, user_role, timeout=59*30)\n\n job_url = 'https://s3.amazonaws.com/{}/jobs/{}.csv'.format(s3_bucket, result.id)\n\n return render_template('game_stats.html', job_url=job_url)\n\n@main.route('/game-data')\n@login_required\n@admin_required\ndef game_data():\n page = request.args.get('page', 1, type=int)\n query = GameData.query\n pagination = query.order_by(GameData.file_name.desc()).paginate(\n page, per_page=current_app.config['BACKEND_POSTS_PER_PAGE'],\n error_out=False)\n game_data = pagination.items\n return render_template('game_data.html', game_data=game_data, pagination=pagination)\n\n\n@main.route('/edit-game-data/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef edit_game_data(id):\n game_data = GameData.query.get(id)\n form = GameDataForm(game_data=game_data)\n if form.validate_on_submit():\n game_data.content = form.file_content.data\n return redirect(url_for('.game_data'))\n return render_template('edit_game_data.html', form=form, game_data=game_data)\n\n\n@main.route('/assets')\n@login_required\n@admin_required\ndef assets():\n page = request.args.get('page', 1, type=int)\n query = Asset.query\n pagination = query.order_by(Asset.file_name.asc()).paginate(\n page, per_page=current_app.config['BACKEND_POSTS_PER_PAGE'],\n error_out=False)\n assets = pagination.items\n return render_template('assets.html', assets=assets, pagination=pagination)\n\n\n@main.route('/upload-asset', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef upload_asset():\n form = AssetForm()\n if form.validate_on_submit():\n asset = Asset()\n asset.file_name = form.file_name.data\n asset.file_type = form.file_type.data\n db.session.add(asset)\n return redirect(url_for('.assets'))\n return render_template('upload_asset.html', form=form)\n\n\n@main.route('/delete-asset/', methods=['GET', 'POST'])\n@login_required\n@admin_required\ndef delete_asset(id):\n asset = Asset.query.filter_by(id=id).first_or_404()\n form = DeleteAssetForm()\n if form.validate_on_submit():\n s3 = boto3.client('s3', current_app.config['AWS_REGION'])\n s3.delete_object(Bucket=current_app.config['S3_BUCKET'], Key='assets/{}'.format(asset.file_name))\n db.session.delete(asset)\n return redirect(url_for('.assets'))\n return render_template('delete_asset.html', form=form, asset=asset)\n\n@main.route('/sign-s3/')\n@login_required\n@admin_required\ndef sign_s3():\n S3_BUCKET = current_app.config['S3_BUCKET']\n AWS_REGION = current_app.config['AWS_REGION']\n\n file_name = request.args.get('file-name')\n file_type = request.args.get('file-type')\n\n s3 = boto3.client('s3', AWS_REGION)\n\n presigned_post = s3.generate_presigned_post(\n Bucket = S3_BUCKET,\n Key = 'assets/{}'.format(file_name),\n Fields = {\"acl\": \"public-read\", \"Content-Type\": file_type},\n Conditions = [\n {\"acl\": \"public-read\"},\n {\"Content-Type\": file_type}\n ],\n ExpiresIn = 3600\n )\n\n return json.dumps({\n 'data': presigned_post,\n 'file_name': file_name,\n 'file_type': file_type,\n 'url': 'https://s3.amazonaws.com/{}/assets/{}'.format(S3_BUCKET, file_name)\n })\n","repo_name":"amanya/backdne","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15088940519","text":"# Created: 13.07.2022\n# Last Revision: 13.07.2022\n# Authors: ?, Mathieu Udriot\n# Emails: ?, mathieu.udriot@epfl.ch\n# Description: Adapted from TCAT code Common_functions (version 2022, eSpace), used to compute delta_v in orbit\n\nfrom numpy.linalg import norm\nfrom Manoeuvre import *\nfrom poliastro.twobody import Orbit\nfrom poliastro.core import perturbations\n\n# global parameters for space debris index computation\nALTITUDE_ATMOSPHERE_LIMIT = 200 * u.km\n\ndef instant_orbital_velocity(orbit, radius):\n \"\"\" Returns instantaneous orbital velocity at a particular distance from attractor.\n Used during delta v calculations.\n\n Args:\n orbit (poliastro.twobody.Orbit): orbit\n radius (u.): distance to the center of the attractor\n\n Return:\n (u.deg): orbital speed at distance given in argument\n \"\"\"\n # Check if radius is smaller than apogee plus 1m to account for rounding errors.\n if radius > orbit.r_a + 1 * u.m:\n raise Exception('Unattainable radius specified.', radius, orbit)\n # compute speed\n body = orbit.attractor\n speed = np.sqrt(body.k * (2./radius - 1/orbit.a)).to(u.m / u.s)\n return speed.to(u.m/u.s)\n \ndef inclination_change_delta_v(initial_orbit, final_orbit):\n \"\"\" Returns delta_v necessary to change inclination from initial to final orbital plane.\n This only includes inclination change.\n\n Args:\n initial_orbit (poliastro.twobody.Orbit): initial orbit\n final_orbit (poliastro.twobody.Orbit): final orbit\n\n Return:\n (u.m / u.s): delta v necessary for inclination change\n \"\"\"\n body = initial_orbit.attractor\n delta_i = final_orbit.inc - initial_orbit.inc\n ecc = initial_orbit.ecc\n w = initial_orbit.argp\n a = max(initial_orbit.a, final_orbit.a)\n n = np.sqrt(body.k / a**3)\n f = -initial_orbit.argp\n delta_v = abs(2 * np.sin(delta_i / 2.) * (np.sqrt(1-ecc**2)*np.cos(w + f)*n*a) / (1 + ecc * np.cos(f)))\n return delta_v.to(u.m / u.s)\n\ndef high_thrust_delta_v(initial_orbit, final_orbit, initial_mass, mean_thrust, isp):\n \"\"\"Returns the delta v necessary to perform an orbit change, assuming impulsive maneuvers.\n This takes into account the transfer from one elliptical orbit to another.\n This takes into account possible inclination changes, performed during the adequate impulse.\n This neglects argument of periapsis changes.\n \n Args:\n initial_orbit (poliastro.twobody.Orbit): initial orbit\n final_orbit (poliastro.twobody.Orbit): final orbit\n initial_mass (u.kg) assumed servicer mass at start of maneuver\n mean_thrust (u.N): assumed thrust at start of maneuver\n isp (u.s): assumed isp, used to estimate manoeuvre duration\n\n Return:\n (u.m / u.s): total delta v to reach final orbit\n (poliastro.twobody.Orbit): transfer orbit if applicable\n (u.m / u.s): first impulse\n (u.m / u.s): second impulse\n (u.day): first impulse duration\n (u.day): second impulse duration\n (u.day): total orbit change duration\n \"\"\"\n manoeuvres = []\n # compute delta v for inclination change and find if inclination needs to be done during first or second impulse\n inc_delta_v = inclination_change_delta_v(initial_orbit, final_orbit)\n if initial_orbit.a > final_orbit.a:\n first_inc_delta_v = inc_delta_v\n second_inc_delta_v = 0. * u.m/u.s\n else:\n first_inc_delta_v = 0. * u.m/u.s\n second_inc_delta_v = inc_delta_v\n\n # let's simplify the problem by neglecting argument of periapsis changes\n # we suppose arguments of periapsis are either aligned or opposed\n # TODO: introduce argument of periapsis changes\n first_burn_radius = initial_orbit.r_a\n if abs(final_orbit.argp - initial_orbit.argp) < 180. * u.deg:\n second_burn_radius = final_orbit.r_p\n else:\n second_burn_radius = final_orbit.r_a\n\n # find transfer orbit, neglecting argument of periapsis change\n a = (first_burn_radius + second_burn_radius) / 2.\n ecc = abs(first_burn_radius - second_burn_radius) / (first_burn_radius + second_burn_radius)\n transfer_orbit = Orbit.from_classical(final_orbit.attractor, a, ecc, final_orbit.inc, final_orbit.raan,\n final_orbit.argp, final_orbit.nu, final_orbit.epoch)\n\n if final_orbit.attractor != initial_orbit.attractor:\n raise ValueError(\"Initial and final orbits have different attractors.\")\n\n # first burn\n v_i_1 = instant_orbital_velocity(initial_orbit, first_burn_radius)\n v_f_1 = instant_orbital_velocity(transfer_orbit, first_burn_radius)\n delta_v_1 = np.sqrt((v_f_1 - v_i_1)**2 + first_inc_delta_v**2)\n manoeuvre = Manoeuvre(delta_v_1,\"first high-trust dV\")\n burned_mass = manoeuvre.compute_burn_duration(initial_mass, mean_thrust, isp)\n manoeuvres.append(manoeuvre)\n\n # second burn\n # TODO add condition on second burn if altitude is lower than ALTITUDE_ATMOSPHERIC_LIMIT\n v_i_2 = instant_orbital_velocity(transfer_orbit, second_burn_radius)\n v_f_2 = instant_orbital_velocity(final_orbit, second_burn_radius)\n delta_v_2 = np.sqrt((v_f_2 - v_i_2)**2 + second_inc_delta_v**2)\n manoeuvre = Manoeuvre(delta_v_2,\"second high-trust dV\")\n burned_mass = burned_mass + manoeuvre.compute_burn_duration(initial_mass, mean_thrust, isp)\n manoeuvres.append(manoeuvre)\n\n transfer_duration = (transfer_orbit.period / 2).to(u.day)\n\n\n return manoeuvres, transfer_duration, transfer_orbit, burned_mass\n\n\n# from poliastro example https://docs.poliastro.space/en/stable/examples/Natural%20and%20artificial%20perturbations.html\n# def natural_decay(t0, state, k, R, C_D, A_over_m, H0, rho0, orbit):\n# # in progress\n# \"\"\"\n# Args: \n \n# orbit: orbit in LEO on which the spacecraft is left at end of mission (either the operational orbit if no EOL startegy, or the disposal orbit if there is a disposal manoeuvre)\n# \"\"\"\n\n# atmosphere_orbit = Orbit.from_classical(orbit.attractor, ALTITUDE_ATMOSPHERE_LIMIT+orbit.attractor.R, orbit.ecc,\n# orbit.inc, orbit.raan, orbit.argp, orbit.nu)\n\n# if orbit.ecc > 0.1:\n# raise Exception('Use of Edelbaum not valid for elliptic orbits')\n \n# # compute necessary inputs for Edelbaum formulations\n# initial_radius = (orbit.r_a + orbit.r_p) / 2\n# final_radius = (atmosphere_orbit.r_a + atmosphere_orbit.r_p) / 2\n# v_0 = instant_orbital_velocity(orbit, initial_radius)\n# v_f = instant_orbital_velocity(atmosphere_orbit, final_radius)\n\n# Delta_v = v_f - v_0\n\n# a_drag = perturbations.atmospheric_drag_exponential(t0, state, k, R, C_D, A_over_m, H0, rho0)\n# atm_perturbation = np.array([0, 0, 0, a_drag[0], a_drag[1], a_drag[2]])\n\n# decay_duration = (Delta_v / norm(a_drag)).to(u.year)\n\n# du_kep = func_twobody(t0, state, k)\n\n# f = du_kep + atm_perturbation\n# decay = orbit.to_ephem(strategy=EpochBounds(orbit.epoch, orbit.epoch + decay_duration.to(u.s)), method=CowellPropagator(f=f))\n","repo_name":"MatUspace/test_act","sub_path":"Common_functions.py","file_name":"Common_functions.py","file_ext":"py","file_size_in_byte":7094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8472533255","text":"from itertools import accumulate\nfrom typing import Any, Callable, Generator, Optional, Tuple\n\nfrom torch.utils.data import DataLoader, Dataset\n\n\nclass Dataset(Dataset):\n worker_init_fn: Optional[Callable] = None\n collate_fn: Optional[Callable] = None\n\n def __init__(self):\n super().__init__()\n\n def __iter__(self) -> Generator:\n for i in range(len(self)):\n yield self[i]\n\n \nclass CombinedDataset(Dataset):\n\n @property\n def collate_fn(self):\n return self.datasets[0].collate_fn\n\n def __init__(self, *datasets: Dataset):\n super().__init__()\n\n self.datasets = datasets\n self.lengths = [len(x) for x in self.datasets]\n self.accumulation = [0, *accumulate(self.lengths, lambda a, b: a + b)]\n\n def __len__(self) -> int:\n return sum(self.lengths)\n \n def to_local_index(self, index: int) -> Tuple[int, int]:\n for i, a in enumerate(self.accumulation):\n if index < a:\n break\n return i - 1, index - self.accumulation[i - 1]\n\n def __getitem__(self, index: int) -> Any:\n di, li = self.to_local_index(index)\n x = self.datasets[di][li]\n return x\n\n \nclass DataLoader(DataLoader):\n def __init__(\n self,\n dataset: Dataset,\n batch_size: Optional[int] = 1,\n shuffle: Optional[bool] = None,\n num_workers: int = 0,\n worker_init_fn: Optional[Callable] = None,\n collate_fn: Optional[Callable] = None,\n prefetch_factor: int = 2,\n pin_memory: bool = False,\n *args,\n **kwargs,\n ):\n worker_init_fn = worker_init_fn or dataset.worker_init_fn\n collate_fn = collate_fn or dataset.collate_fn\n\n super().__init__(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n prefetch_factor=prefetch_factor,\n worker_init_fn=worker_init_fn,\n collate_fn=collate_fn,\n pin_memory=pin_memory,\n *args,\n **kwargs\n )\n","repo_name":"ShinoharaHare/AI-CUP-2022-Fall-NLP","sub_path":"src/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39397736278","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n if list1 == None:\n return list2\n if list2 == None:\n return list1\n\n node1 = list1\n node2 = list2\n\n if node1.val <= node2.val:\n head = node1\n node1 = node1.next\n else:\n head = node2\n node2 = node2.next\n\n tmp = head\n while node1 and node2:\n if node1.val <= node2.val:\n tmp.next = node1\n node1 = node1.next\n else:\n tmp.next = node2\n node2 = node2.next\n tmp = tmp.next\n\n if node1 != None:\n tmp.next = node1\n else:\n tmp.next = node2\n\n return head\n","repo_name":"Otabek8866/my-leetcode-solutions","sub_path":"Algrithms-Level-1/day-10/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9603729238","text":"# Define job functions or Utils.\nimport datetime\n\nimport requests\n\n\ndef print_hello():\n print(\"Hello, world! The time is now {}\".format(datetime.datetime.now()))\n\ndef fetch(url):\n # Add error handling here.\n response = requests.get(url)\n if response.status_code == 200:\n print(f'API job for {url} executed successfully')\n else:\n print(f'API job for {url} failed with status code {response.status_code}')","repo_name":"himanshua790/cron_jobs_py","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14883906095","text":"from core.experiment import Experiment, ExperimentParameter\nimport logging\nimport os\n\n\nclass PQUICParameter(ExperimentParameter):\n PLUGINS = \"pquicPlugins\"\n CLIENT_PLUGINS = \"pquicClientPlugins\"\n SERVER_PLUGINS = \"pquicServerPlugins\"\n SIZE = \"pquicSize\"\n\n def __init__(self, experiment_parameter_filename):\n super(PQUICParameter, self).__init__(experiment_parameter_filename)\n self.default_parameters.update({\n PQUICParameter.PLUGINS: \"\",\n PQUICParameter.CLIENT_PLUGINS: \"\",\n PQUICParameter.SERVER_PLUGINS: \"\",\n PQUICParameter.SIZE: 10240000,\n })\n\n\nclass PQUIC(Experiment):\n NAME = \"pquic\"\n PARAMETER_CLASS = PQUICParameter\n\n BIN = \"~/pquic/picoquicdemo\"\n CERT_FILE = \"~/pquic/certs/cert.pem\"\n KEY_FILE = \"~/pquic/certs/key.pem\"\n SERVER_LOG = \"pquic_server.log\"\n CLIENT_LOG = \"pquic_client.log\"\n\n def __init__(self, experiment_parameter_filename, topo, topo_config):\n super(PQUIC, self).__init__(experiment_parameter_filename, topo, topo_config)\n self.load_parameters()\n self.ping()\n\n def load_parameters(self):\n super(PQUIC, self).load_parameters()\n self.plugins = self.experiment_parameter.get(PQUICParameter.PLUGINS)\n self.client_plugins = self.experiment_parameter.get(PQUICParameter.CLIENT_PLUGINS)\n self.server_plugins = self.experiment_parameter.get(PQUICParameter.SERVER_PLUGINS)\n self.size = int(self.experiment_parameter.get(PQUICParameter.SIZE))\n\n def prepare(self):\n super(PQUIC, self).prepare()\n self.topo.command_to(self.topo_config.client, \"rm {}\".format(PQUIC.CLIENT_LOG))\n self.topo.command_to(self.topo_config.server, \"rm {}\".format(PQUIC.SERVER_LOG))\n\n def get_plugin_cmd(self, client=False):\n device_plugins = self.client_plugins if client else self.server_plugins\n device_plugins = self.plugins if len(device_plugins) == 0 else device_plugins\n if len(device_plugins) == 0:\n return \"\"\n\n plugins = device_plugins.split(\",\")\n return \" \".join([\" -P {} \".format(p) for p in plugins])\n\n def get_pquic_server_cmd(self):\n s = \"{} {} -c {} -k {} &> {} &\".format(PQUIC.BIN, self.get_plugin_cmd(),\n PQUIC.CERT_FILE, PQUIC.KEY_FILE, PQUIC.SERVER_LOG)\n logging.info(s)\n return s\n\n def get_pquic_client_cmd(self):\n s = \"{} {} -4 -G {} {} 4443 &> {}\".format(PQUIC.BIN, self.get_plugin_cmd(client=True), self.size,\n self.topo_config.get_server_ip(), PQUIC.CLIENT_LOG)\n logging.info(s)\n return s\n\n def clean(self):\n super(PQUIC, self).clean()\n\n def run(self):\n cmd = self.get_pquic_server_cmd()\n self.topo.command_to(self.topo_config.server, cmd)\n\n self.topo.command_to(self.topo_config.client, \"sleep 2\")\n\n cmd = self.get_pquic_client_cmd()\n self.topo.command_to(self.topo_config.client, cmd)\n\n self.topo.command_to(self.topo_config.client, \"sleep 2\")\n","repo_name":"qdeconinck/minitopo","sub_path":"experiments/pquic.py","file_name":"pquic.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"2477421655","text":"import collections\nimport heapq\n\nclass Node:\n def __init__(self):\n self.isWord = False\n self.nodes = collections.defaultdict(Node)\n self.suggest = []\n \n \nclass Trie:\n def __init__(self):\n self.root = Node()\n \n def insert(self, word: str):\n cur = self.root\n for char in word:\n cur = cur.nodes[char]\n heapq.heappush(cur.suggest, word)\n cur.isWord = True\n \n def query(self, word: str):\n result = []\n cur = self.root\n for i in range(len(word)):\n if word[i] not in cur.nodes:\n return result + [[] for _ in range(len(word) - i)]\n cur = cur.nodes[word[i]]\n val = []\n while cur.suggest and len(val) < 3:\n val.append(heapq.heappop(cur.suggest))\n result.append(val)\n return result\n \n\nclass Solution:\n def suggestedProducts(self, products: List[str], searchWord: str) -> List[List[str]]:\n trie = Trie()\n for product in products:\n trie.insert(product)\n return trie.query(searchWord)\n","repo_name":"OhYoooo/Leetcode","sub_path":"python/8.Tree/trie/1268.search-suggestions-system.py","file_name":"1268.search-suggestions-system.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34739834429","text":"from common.utilities.sql import sql_execute\nfrom geoprocessing.business_logic.enums import TradeAreaThreshold\nfrom geoprocessing.custom_analytics.data_checks.base_data_check import BaseCustomAnalyticsDataCheck\n\n__author__ = 'erezrubinstein'\n\nclass CustomAnalyticsTradeAreaExistsDataCheck(BaseCustomAnalyticsDataCheck):\n \"\"\"\n This data check will read the geoprocessing config and make sure that every store has the correct trade area\n \"\"\"\n\n def _data_check_name(self):\n return \"All Trade Areas Exist\"\n\n\n def _run_data_check(self):\n\n # get the trade area ids that should exist\n trade_area_ids = [getattr(TradeAreaThreshold, threshold) for threshold in self._gp_config.trade_area_thresholds]\n\n # get the sql\n sql = self._create_sql(trade_area_ids)\n\n # run the sql and return the results\n return self._run_sql(sql)\n\n\n def _format_results(self, results):\n\n # get a count of how many stores are broken\n stores_missing_trade_areas = len(results)\n\n # default to no results\n formatted_results = {}\n\n if stores_missing_trade_areas > 0:\n\n # regenerate the sql to include in the report\n trade_area_ids = [getattr(TradeAreaThreshold, threshold) for threshold in self._gp_config.trade_area_thresholds]\n sql = self._create_sql(trade_area_ids)\n\n # set results\n formatted_results = {\n \"headers\": [\"# Incorrect Stores\", \"SQL\"],\n \"rows\": [\n {\n \"# Incorrect Stores\": stores_missing_trade_areas,\n \"SQL\": sql\n }\n ]\n }\n\n return formatted_results\n\n\n # --------------------------- Private Methods -------------------------- #\n\n def _create_sql(self, trade_area_ids):\n \"\"\"\n Create the below statement dynamically for all trade areas:\n\n select s.store_id, t_1.threshold_id as t_1, t_4.threshold_id as t_4, t_5.threshold_id as t_5, t_13.threshold_id as t_13\n from stores s\n left join trade_areas t_1 on t_1.store_id = s.store_id and t_1.threshold_id = 1\n left join trade_areas t_4 on t_4.store_id = s.store_id and t_4.threshold_id = 4\n left join trade_areas t_5 on t_5.store_id = s.store_id and t_5.threshold_id = 5\n left join trade_areas t_13 on t_13.store_id = s.store_id and t_13.threshold_id = 13\n where t_1.threshold_id is null or t_4.threshold_id is null or t_5.threshold_id is null or t_13.threshold_id is null\n \"\"\"\n\n\n # create the fields to select sql\n fields_to_select = [\"t_%i.threshold_id\" % ta for ta in trade_area_ids]\n fields_to_select = \", \".join(fields_to_select)\n\n # create a left join statement for every trade area\n left_joins_sql = [\n \"left join trade_areas t_%i on t_%i.store_id = s.store_id and t_%i.threshold_id = %i\" % (ta, ta, ta, ta)\n for ta in trade_area_ids\n ]\n left_joins_sql = \"\\n\".join(left_joins_sql)\n\n # create the where clause dynamically\n where_statements = [\"t_%s.threshold_id is null\" % ta for ta in trade_area_ids]\n where_statements = \" or \".join(where_statements)\n\n # create the main sql statement\n return \"\"\"\n select s.store_id, %s\n from stores s\n %s\n where %s\n \"\"\" % (fields_to_select, left_joins_sql, where_statements)\n\n def _run_sql(self, sql):\n\n # execute this *!%$#\n return sql_execute(sql)","repo_name":"erezrubinstein/aa","sub_path":"gp/custom_analytics/data_checks/trade_areas_exist_data_check.py","file_name":"trade_areas_exist_data_check.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70888031873","text":"while True:\r\n try:\r\n n, r = [int(x) for x in input().split()]\r\n foram = [x for x in range(1, n+1)]\r\n voltaram = [int(x) for x in input().split()]\r\n naoVoltaram = [x for x in range(1, n+1) if x not in voltaram]\r\n \r\n if not naoVoltaram:\r\n print('*')\r\n \r\n else:\r\n for j in range(len(naoVoltaram)):\r\n print(f'{naoVoltaram[j]} ', end='')\r\n print('') \r\n\r\n except EOFError:\r\n break\r\n","repo_name":"FKettl/beecrowd","sub_path":"Uri_1471.py","file_name":"Uri_1471.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8735160584","text":"import dataclasses\nimport re\nfrom math import prod\n\n\n@dataclasses.dataclass\nclass Cube:\n x_min: int\n x_max: int\n y_min: int\n y_max: int\n z_min: int\n z_max: int\n value: bool\n\n def overlaps(self, other: \"Cube\") -> bool:\n return not any(\n a[1] < b[0] or b[1] < a[0] for a, b in zip(self.bounds, other.bounds)\n )\n\n def intersection(self, other: \"Cube\", value: bool) -> \"Cube\":\n assert self.overlaps(other)\n return Cube(\n max(self.x_min, other.x_min),\n min(self.x_max, other.x_max),\n max(self.y_min, other.y_min),\n min(self.y_max, other.y_max),\n max(self.z_min, other.z_min),\n min(self.z_max, other.z_max),\n value,\n )\n\n @property\n def bounds(self) -> tuple[tuple[int, int], ...]:\n return (\n (self.x_min, self.x_max),\n (self.y_min, self.y_max),\n (self.z_min, self.z_max),\n )\n\n @property\n def volume(self) -> int:\n return prod(max_ - min_ + 1 for min_, max_ in self.bounds)\n\n @property\n def eff_volume(self) -> int:\n return (1 if self.value else -1) * self.volume\n\n @classmethod\n def from_parts(cls, *parts: str) -> \"Cube\":\n assert len(parts) == 7\n return cls(*map(int, parts[1:]), parts[0] == \"on\")\n\n\nINSTRUCTION_REGEX = re.compile(\n r\"(on|off) x=(-?\\d+)..(-?\\d+),y=(-?\\d+)..(-?\\d+),z=(-?\\d+)..(-?\\d+)\"\n)\n\n\nwith open(\"day_22/sample.txt\") as file:\n INSTRUCTIONS = [\n Cube.from_parts(*INSTRUCTION_REGEX.search(line).groups())\n for line in file.readlines()\n ]\n\ncubes: list[Cube] = []\n\nfor instruction in INSTRUCTIONS:\n new_cubes: list[Cube] = []\n\n for other_cube in cubes:\n if instruction.overlaps(other_cube):\n new_cubes.append(instruction.intersection(other_cube, not other_cube.value))\n\n cubes.extend(new_cubes)\n\n if instruction.value:\n cubes.append(instruction)\n\nprint(sum(cube.eff_volume for cube in cubes))\n","repo_name":"Akarys42/aoc-2021","sub_path":"day_22/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22937671877","text":"def find_number(p):\n missing=[]\n if not isinstance(p,list):\n raise TypeError('invalid input') \n else: \n for t in range (1,10):\n if t not in p:\n missing.append(t)\n return missing\n\nprint(find_number([1,2,3,5,6,7,9]))","repo_name":"JEMIMAHJULIAN/Challenge2DayThree","sub_path":"missingnumb.py","file_name":"missingnumb.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1786382109","text":"from exe.engine.idevicestore import IdeviceStore\nimport unittest\nimport utils\nimport os\n\n\n# ===========================================================================\n\nclass TestIdeviceStore(utils.SuperTestCase):\n\n def testLoad(self):\n \"\"\"\n Tests that idevices can be loaded\n \"\"\"\n self.assert_(isinstance(self.app.ideviceStore, IdeviceStore))\n self.assert_(os.path.exists(\"tmp/idevices/allgeneric.data\"))\n self.assert_(os.path.exists(\"tmp/idevices/extended.data\"))\n self.assert_(os.path.exists(\"tmp/idevices/showgeneric.data\"))\n\n def testLangsWithoutDuplicateIdeviceTitles(self):\n langsWithDuplicateIdeviceTitles = {}\n for lang, locale in self.app.config.locales.items():\n lang = str(lang)\n locale.install(unicode=True)\n titles = set()\n for idevice in self.app.ideviceStore.getIdevices():\n if idevice.title in titles:\n if lang in langsWithDuplicateIdeviceTitles:\n langsWithDuplicateIdeviceTitles[lang].append(idevice._title)\n else:\n langsWithDuplicateIdeviceTitles[lang] = [idevice._title]\n titles.add(idevice.title)\n if langsWithDuplicateIdeviceTitles:\n raise Exception(langsWithDuplicateIdeviceTitles)\n\nif __name__ == \"__main__\":\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestIdeviceStore))\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"exelearning/iteexe","sub_path":"testing/testidevicestore.py","file_name":"testidevicestore.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"61"} +{"seq_id":"2534038083","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0040_auto_20151211_1918'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='presentation',\n name='presentation_type',\n field=models.CharField(blank=True, max_length=100, null=True, choices=[(b'poster', b'Poster'), (b'presentation', b'Presentation')]),\n ),\n ]\n","repo_name":"jamstooks/hub","sub_path":"hub/apps/content/migrations/0041_auto_20151211_2003.py","file_name":"0041_auto_20151211_2003.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72958157953","text":"\"\"\"\n给你一个链表,删除链表的倒数第 n 个结点,并且返回链表的头结点。\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n \"\"\"\n 使用两个间隔为n的双指针,快指针先向前移动n+1步,接着快慢指针同时移动\n 使用虚拟头节点,移动n+1步是因为头节点可能会被删除\n :param head:\n :param n:\n :return:\n \"\"\"\n cur = ListNode(next=head)\n slow = cur\n fast = cur\n while n + 1:\n fast = fast.next\n n -= 1\n while fast:\n fast = fast.next\n slow = slow.next\n\n slow.next = slow.next.next\n\n return cur.next\n\n\ndef print_node(node):\n while node:\n print(node.val)\n node = node.next\n\n\nsolution = Solution()\nprint_node(\n solution.removeNthFromEnd(\n head=ListNode(1, next=ListNode(2, ListNode(3, ListNode(4, ListNode(5))))), n=2\n )\n)\nprint_node(solution.removeNthFromEnd(ListNode(1), 1))\n","repo_name":"SsuperL/leetcode-practice","sub_path":"medium/exercise_19.py","file_name":"exercise_19.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24833296172","text":"\nimport sys; sys.path.append('..') #Needed to do this hack, as there is a problem with the unittest module to import from other directories\nfrom RoboCraig import app\nimport unittest\n\n\nclass FlaskTestCase(unittest.TestCase):\n\n # Test to see if it can get to the landing page correctly\n def test_index(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type='html/text')\n self.assertEqual(response.status_code, 200)\n\n\n # Test to see if the login page properly loads with the text \"login\" on the page\n def test_login_load(self):\n tester = app.test_client(self)\n response = tester.get('/login', content_type='html/text')\n self.assertTrue(b'Login' in response.data)\n\n # Testing the home page redirect to see if the text \"Enter some search details\" displays\n def test_page_redirect(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type='html/text')\n self.assertTrue(b'Enter some search details' in response.data)\n\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"tango1542/RoboCraig","sub_path":"RoboCraig/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38304888737","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def FindKthToTail(self, head, k):\n # write code here\n res=[]\n while head:\n res.append(head)\n head=head.next\n if k>len(res):\n return None\n if k==0:\n return None\n return res[-k]","repo_name":"a1379478560/offer-python","sub_path":"链表中倒数第k个结点.py","file_name":"链表中倒数第k个结点.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5225740788","text":"#!/usr/bin/env python3\n#\n# Data manager for reference data for the 'mothur_toolsuite' Galaxy tools\nimport io\nimport json\nimport optparse\nimport os\nimport shutil\nimport sys\nimport tarfile\nimport tempfile\nimport urllib.error\nimport urllib.parse\nimport urllib.request\nimport zipfile\nfrom functools import reduce\n\n# When extracting files from archives, skip names that\n# start with the following strings\nIGNORE_PATHS = ('.', '__MACOSX/', '__')\n\n# Map file extensions to data table names\nMOTHUR_FILE_TYPES = {\".map\": \"map\",\n \".fasta\": \"aligndb\",\n \".align\": \"aligndb\",\n \".pat\": \"lookup\",\n \".tax\": \"taxonomy\"}\n\n# Reference data URLs\nMOTHUR_REFERENCE_DATA = {\n # Look up data\n # http://www.mothur.org/wiki/Lookup_files\n \"lookup_titanium\": {\n \"GS FLX Titanium\": [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/lookup_titanium.zip\", ]\n },\n \"lookup_gsflx\": {\n \"GSFLX\": [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/lookup_gsflx.zip\", ]\n },\n \"lookup_gs20\": {\n \"GS20\": [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/lookup_gs20.zip\", ]\n },\n # RDP reference files\n # http://www.mothur.org/wiki/RDP_reference_files\n \"RDP_v18\": {\n \"16S rRNA RDP training set 18\":\n [\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset18_062020.rdp.tgz\", ],\n \"16S rRNA PDS training set 18\":\n [\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset18_062020.pds.tgz\", ],\n },\n \"RDP_v16\": {\n \"16S rRNA RDP training set 16\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset16_022016.rdp.tgz\", ],\n \"16S rRNA PDS training set 16\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset16_022016.pds.tgz\", ],\n },\n \"RDP_v14\": {\n \"16S rRNA RDP training set 14\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset14_032015.rdp.tgz\", ],\n \"16S rRNA PDS training set 14\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset14_032015.pds.tgz\", ],\n },\n \"RDP_v10\": {\n \"16S rRNA RDP training set 10\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset10_082014.rdp.tgz\", ],\n \"16S rRNA PDS training set 10\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset10_082014.pds.tgz\", ],\n },\n \"RDP_v9\": {\n \"16S rRNA RDP training set 9\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset9_032012.rdp.zip\", ],\n \"16S rRNA PDS training set 9\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset9_032012.pds.zip\", ],\n },\n \"RDP_v7\": {\n \"16S rRNA RDP training set 7\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset7_112011.rdp.zip\", ],\n \"16S rRNA PDS training set 7\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/trainset7_112011.pds.zip\", ],\n \"8S rRNA Fungi training set 7\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/fungilsu_train_v7.zip\", ],\n },\n \"RDP_v6\": {\n \"RDP training set 6\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/rdptrainingset.zip\", ],\n },\n # Silva reference files\n # http://www.mothur.org/wiki/Silva_reference_files\n \"silva_release_138.1\": {\n \"SILVA release 138.1\":\n [\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v138_1.tgz\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v138_1.tgz\", ],\n },\n \"silva_release_128\": {\n \"SILVA release 128\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v128.tgz\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v128.tgz\", ],\n },\n \"silva_release_123\": {\n \"SILVA release 123\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v123.tgz\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v123.tgz\", ],\n },\n \"silva_release_119\": {\n \"SILVA release 119\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.nr_v119.tgz\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.seed_v119.tgz\", ],\n },\n \"silva_release_102\": {\n \"SILVA release 102\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.bacteria.zip\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.archaea.zip\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.eukarya.zip\", ],\n },\n \"silva_gold_bacteria\": {\n \"SILVA gold\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva.gold.bacteria.zip\", ],\n },\n # Greengenes\n # http://www.mothur.org/wiki/Greengenes-formatted_databases\n \"greengenes_August2013\": {\n \"Greengenes August 2013\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_8_99.refalign.tgz\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_8_99.taxonomy.tgz\", ],\n },\n \"greengenes_May2013\": {\n \"Greengenes May 2013\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_5_99.refalign.tgz\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_13_5_99.taxonomy.tgz\", ],\n },\n \"greengenes_old\": {\n \"Greengenes pre-May 2013\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/greengenes.alignment.zip\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/greengenes.tax.tgz\", ],\n },\n \"greengenes_gold_alignment\": {\n \"Greengenes gold alignment\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/greengenes.gold.alignment.zip\", ],\n },\n # Secondary structure maps\n # http://www.mothur.org/wiki/Secondary_structure_map\n \"secondary_structure_maps_silva\": {\n \"SILVA\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/silva_ss_map.zip\", ],\n },\n \"secondary_structure_maps_greengenes\": {\n \"Greengenes\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/gg_ss_map.zip\", ],\n },\n # Lane masks: not used here?\n \"lane_masks\": {\n \"Greengenes-compatible\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/Lane1241.gg.filter\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/lane1287.gg.filter\",\n \"https://mothur.s3.us-east-2.amazonaws.com/wiki/lane1349.gg.filter\", ],\n \"SILVA-compatible\":\n [\"https://mothur.s3.us-east-2.amazonaws.com/wiki/lane1349.silva.filter\", ]\n },\n}\n\n\n# Utility functions for interacting with Galaxy JSON\ndef read_input_json(jsonfile):\n \"\"\"Read the JSON supplied from the data manager tool\n\n Returns a tuple (param_dict,extra_files_path)\n\n 'param_dict' is an arbitrary dictionary of parameters\n input into the tool; 'extra_files_path' is the path\n to a directory where output files must be put for the\n receiving data manager to pick them up.\n\n NB the directory pointed to by 'extra_files_path'\n doesn't exist initially, it is the job of the script\n to create it if necessary.\n\n \"\"\"\n with open(jsonfile) as fh:\n params = json.load(fh)\n return (params['param_dict'],\n params['output_data'][0]['extra_files_path'])\n\n\n# Utility functions for creating data table dictionaries\n#\n# Example usage:\n# >>> d = create_data_tables_dict()\n# >>> add_data_table(d,'my_data')\n# >>> add_data_table_entry(dict(dbkey='hg19',value='human'))\n# >>> add_data_table_entry(dict(dbkey='mm9',value='mouse'))\n# >>> print(json.dumps(d))\ndef create_data_tables_dict():\n \"\"\"Return a dictionary for storing data table information\n\n Returns a dictionary that can be used with 'add_data_table'\n and 'add_data_table_entry' to store information about a\n data table. It can be converted to JSON to be sent back to\n the data manager.\n\n \"\"\"\n d = {}\n d['data_tables'] = {}\n return d\n\n\ndef add_data_table(d, table):\n \"\"\"Add a data table to the data tables dictionary\n\n Creates a placeholder for a data table called 'table'.\n\n \"\"\"\n d['data_tables'][table] = []\n\n\ndef add_data_table_entry(d, table, entry):\n \"\"\"Add an entry to a data table\n\n Appends an entry to the data table 'table'. 'entry'\n should be a dictionary where the keys are the names of\n columns in the data table.\n\n Raises an exception if the named data table doesn't\n exist.\n\n \"\"\"\n try:\n d['data_tables'][table].append(entry)\n except KeyError:\n raise Exception(\"add_data_table_entry: no table '%s'\" % table)\n\n\n# Utility functions for downloading and unpacking archive files\ndef download_file(url, target=None, wd=None):\n \"\"\"Download a file from a URL\n\n Fetches a file from the specified URL.\n\n If 'target' is specified then the file is saved to this\n name; otherwise it's saved as the basename of the URL.\n\n If 'wd' is specified then it is used as the 'working\n directory' where the file will be save on the local\n system.\n\n Returns the name that the file is saved with.\n\n \"\"\"\n print(f\"Downloading {url}\")\n if not target:\n target = os.path.basename(url)\n if wd:\n target = os.path.join(wd, target)\n print(f\"Saving to {target}\")\n with open(target, 'wb') as fh:\n url_h = urllib.request.urlopen(url)\n while True:\n buffer = url_h.read(io.DEFAULT_BUFFER_SIZE)\n if buffer == b\"\":\n break\n fh.write(buffer)\n return target\n\n\ndef unpack_zip_archive(filen, wd=None):\n \"\"\"Extract files from a ZIP archive\n\n Given a ZIP archive, extract the files it contains\n and return a list of the resulting file names and\n paths.\n\n 'wd' specifies the working directory to extract\n the files to, otherwise they are extracted to the\n current working directory.\n\n Once all the files are extracted the ZIP archive\n file is deleted from the file system.\n\n \"\"\"\n if not zipfile.is_zipfile(filen):\n print(f\"{filen}: not ZIP formatted file\")\n return [filen]\n file_list = []\n with zipfile.ZipFile(filen) as z:\n for name in z.namelist():\n if reduce(lambda x, y: x or name.startswith(y), IGNORE_PATHS, False):\n print(f\"Ignoring {name}\")\n continue\n if wd:\n target = os.path.join(wd, name)\n else:\n target = name\n if name.endswith('/'):\n # Make directory\n print(f\"Creating dir {target}\")\n try:\n os.makedirs(target)\n except OSError:\n pass\n else:\n # Extract file\n print(\"Extracting {target}\")\n try:\n os.makedirs(os.path.dirname(target))\n except OSError:\n pass\n with open(target, 'wb') as fh:\n fh.write(z.read(name))\n file_list.append(target)\n print(f\"Removing {filen}\")\n os.remove(filen)\n return file_list\n\n\ndef unpack_tar_archive(filen, wd=None):\n \"\"\"Extract files from a TAR archive\n\n Given a TAR archive (which optionally can be\n compressed with either gzip or bz2), extract the\n files it contains and return a list of the\n resulting file names and paths.\n\n 'wd' specifies the working directory to extract\n the files to, otherwise they are extracted to the\n current working directory.\n\n Once all the files are extracted the TAR archive\n file is deleted from the file system.\n\n \"\"\"\n file_list = []\n if not tarfile.is_tarfile(filen):\n print(f\"{filen}: not TAR file\")\n return [filen]\n with tarfile.open(filen) as t:\n for name in t.getnames():\n # Check for unwanted files\n if reduce(lambda x, y: x or name.startswith(y), IGNORE_PATHS, False):\n print(f\"Ignoring {name}\")\n continue\n # Extract file\n print(f\"Extracting {name}\")\n t.extract(name, wd)\n if wd:\n target = os.path.join(wd, name)\n else:\n target = name\n file_list.append(target)\n print(f\"Removing {filen}\")\n os.remove(filen)\n return file_list\n\n\ndef unpack_archive(filen, wd=None):\n \"\"\"Extract files from an archive\n\n Wrapper function that calls the appropriate\n unpacking function depending on the archive\n type, and returns a list of files that have\n been extracted.\n\n 'wd' specifies the working directory to extract\n the files to, otherwise they are extracted to the\n current working directory.\n\n \"\"\"\n print(f\"Unpack {filen}\")\n ext = os.path.splitext(filen)[1]\n print(f\"Extension: {ext}\")\n if ext == \".zip\":\n return unpack_zip_archive(filen, wd=wd)\n elif ext == \".tgz\":\n return unpack_tar_archive(filen, wd=wd)\n else:\n return [filen]\n\n\ndef fetch_files(urls, wd=None, files=None):\n \"\"\"Download and unpack files from a list of URLs\n\n Given a list of URLs, download and unpack each\n one, and return a list of the extracted files.\n\n 'wd' specifies the working directory to extract\n the files to, otherwise they are extracted to the\n current working directory.\n\n If 'files' is given then the list of extracted\n files will be appended to this list before being\n returned.\n\n \"\"\"\n if files is None:\n files = []\n for url in urls:\n filen = download_file(url, wd=wd)\n files.extend(unpack_archive(filen, wd=wd))\n return files\n\n\n# Utility functions specific to the Mothur reference data\ndef identify_type(filen):\n \"\"\"Return the data table name based on the file name\n\n \"\"\"\n ext = os.path.splitext(filen)[1]\n try:\n return MOTHUR_FILE_TYPES[ext]\n except KeyError:\n print(f\"WARNING: unknown file type for {filen}, skipping\")\n return None\n\n\ndef get_name(filen):\n \"\"\"Generate a descriptive name based on the file name\n \"\"\"\n # type_ = identify_type(filen)\n name = os.path.splitext(os.path.basename(filen))[0]\n for delim in ('.', '_'):\n name = name.replace(delim, ' ')\n return name\n\n\ndef fetch_from_mothur_website(data_tables, target_dir, datasets):\n \"\"\"Fetch reference data from the Mothur website\n\n For each dataset in the list 'datasets', download (and if\n necessary unpack) the related files from the Mothur website,\n copy them to the data manager's target directory, and add\n references to the files to the appropriate data table.\n\n The 'data_tables' dictionary should have been created using\n the 'create_data_tables_dict' and 'add_data_table' functions.\n\n Arguments:\n data_tables: a dictionary containing the data table info\n target_dir: directory to put the downloaded files\n datasets: a list of dataset names corresponding to keys in\n the MOTHUR_REFERENCE_DATA dictionary\n \"\"\"\n # Make working dir\n wd = tempfile.mkdtemp(suffix=\".mothur\", dir=os.getcwd())\n print(f\"Working dir {wd}\")\n # Iterate over all requested reference data URLs\n for dataset in datasets:\n print(f\"Handling dataset '{dataset}'\")\n for name in MOTHUR_REFERENCE_DATA[dataset]:\n for f in fetch_files(MOTHUR_REFERENCE_DATA[dataset][name], wd=wd):\n type_ = identify_type(f)\n name_from_file = os.path.splitext(os.path.basename(f))[0]\n entry_name = f\"{name_from_file} ({name})\"\n print(f\"{type_}\\t\\'{entry_name}'\\t.../{os.path.basename(f)}\")\n if type_ is not None:\n # Move to target dir\n ref_data_file = os.path.basename(f)\n f1 = os.path.join(target_dir, ref_data_file)\n print(f\"Moving {f} to {f1}\")\n shutil.move(f, f1)\n # Add entry to data table\n table_name = f\"mothur_{type_}\"\n add_data_table_entry(data_tables, table_name, dict(name=entry_name, value=ref_data_file))\n # Remove working dir\n print(f\"Removing {wd}\")\n shutil.rmtree(wd)\n\n\ndef files_from_filesystem_paths(paths):\n \"\"\"Return list of file paths from arbitrary input paths\n\n Given a list of filesystem paths, return a list of\n full paths corresponding to all files found recursively\n from under those paths.\n\n \"\"\"\n # Collect files to add\n files = []\n for path in paths:\n path = os.path.abspath(path)\n print(f\"Examining '{path}'...\")\n if os.path.isfile(path):\n # Store full path for file\n files.append(path)\n elif os.path.isdir(path):\n # Descend into directory and collect the files\n for f in os.listdir(path):\n files.extend(files_from_filesystem_paths((os.path.join(path, f), )))\n else:\n print(\"Not a file or directory, ignored\")\n return files\n\n\ndef import_from_server(data_tables, target_dir, paths, description, link_to_data=False):\n \"\"\"Import reference data from filesystem paths\n\n Creates references to the specified file(s) on the Galaxy\n server in the appropriate data table (determined from the\n file extension).\n\n The 'data_tables' dictionary should have been created using\n the 'create_data_tables_dict' and 'add_data_table' functions.\n\n Arguments:\n data_tables: a dictionary containing the data table info\n target_dir: directory to put copy or link to the data file\n paths: list of file and/or directory paths to import\n description: text to associate with the files\n link_to_data: boolean, if False then copy the data file\n into Galaxy (default); if True then make a symlink to\n the data file\n\n \"\"\"\n # Collect list of files based on input paths\n files = files_from_filesystem_paths(paths)\n # Handle each file individually\n for f in files:\n type_ = identify_type(f)\n if type_ is None:\n print(f\"{f}: unrecognised type, skipped\")\n continue\n ref_data_file = os.path.basename(f)\n target_file = os.path.join(target_dir, ref_data_file)\n entry_name = \"%s\" % os.path.splitext(ref_data_file)[0]\n if description:\n entry_name += \" (%s)\" % description\n print(f\"{type_}\\t\\'{entry_name}'\\t.../{ref_data_file}\")\n # Link to or copy the data\n if link_to_data:\n os.symlink(f, target_file)\n else:\n shutil.copyfile(f, target_file)\n # Add entry to data table\n table_name = f\"mothur_{type_}\"\n add_data_table_entry(data_tables, table_name, dict(name=entry_name, value=ref_data_file))\n\n\nif __name__ == \"__main__\":\n print(\"Starting...\")\n\n # Read command line\n parser = optparse.OptionParser()\n parser.add_option('--source', action='store', dest='data_source')\n parser.add_option('--datasets', action='store', dest='datasets', default='')\n parser.add_option('--paths', action='store', dest='paths', default=[])\n parser.add_option('--description', action='store', dest='description', default='')\n parser.add_option('--link', action='store_true', dest='link_to_data')\n options, args = parser.parse_args()\n print(f\"options: {options}\")\n print(f\"args : {args}\")\n\n # Check for JSON file\n if len(args) != 1:\n sys.stderr.write(\"Need to supply JSON file name\")\n sys.exit(1)\n\n jsonfile = args[0]\n\n # Read the input JSON\n params, target_dir = read_input_json(jsonfile)\n\n # Make the target directory\n print(f\"Making {target_dir}\")\n os.mkdir(target_dir)\n\n # Set up data tables dictionary\n data_tables = create_data_tables_dict()\n add_data_table(data_tables, 'mothur_lookup')\n add_data_table(data_tables, 'mothur_aligndb')\n add_data_table(data_tables, 'mothur_map')\n add_data_table(data_tables, 'mothur_taxonomy')\n\n # Fetch data from specified data sources\n if options.data_source == 'mothur_website':\n datasets = options.datasets.split(',')\n fetch_from_mothur_website(data_tables, target_dir, datasets)\n elif options.data_source == 'filesystem_paths':\n # Check description text\n description = options.description.strip()\n # Get list of paths (need to remove any escapes for '\\n' and '\\r'\n # that might have been inserted by Galaxy)\n paths = options.paths.replace('__cn__', '\\n').replace('__cr__', '\\r').split()\n import_from_server(data_tables, target_dir, paths, description, link_to_data=options.link_to_data)\n # Write output JSON\n print(\"Outputting JSON\")\n with open(jsonfile, 'w') as fh:\n json.dump(data_tables, fh, sort_keys=True)\n print(\"Done.\")\n","repo_name":"galaxyproject/tools-iuc","sub_path":"data_managers/data_manager_mothur_toolsuite/data_manager/fetch_mothur_reference_data.py","file_name":"fetch_mothur_reference_data.py","file_ext":"py","file_size_in_byte":20605,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"61"} +{"seq_id":"18532842607","text":"'''\nA format for expressing an ordered list of integers is to use a comma separated list of either\n\nindividual integers\nor a range of integers denoted by the starting integer separated from the end integer in the range by a dash, '-'. The range includes all integers in the interval including both endpoints. It is not considered a range unless it spans at least 3 numbers. For example \"12,13,15-17\"\nComplete the solution so that it takes a list of integers in increasing order and returns a correctly formatted string in the range format.\n\nExample:\n\nsolution([-6, -3, -2, -1, 0, 1, 3, 4, 5, 7, 8, 9, 10, 11, 14, 15, 17, 18, 19, 20])\n# returns \"-6,-3-1,3-5,7-11,14,15,17-20\"\n'''\ndef solution(args):\n string = str(args[0])\n # iterate through each element\n j = 1\n while j < len(args):\n \n # add a new number if not consecutive\n if args[j] - args[j-1] > 1:\n string += ',' + str(args[j])\n \n # find consecutive numbers\n if args[j] - args[j-1] <= 1:\n string += '-'\n while args[j] - args[j-1] <=1:\n # check if end of numbers\n if j < len(args)-1:\n j += 1\n else:\n string += str(args[j])\n return string\n else:\n # add the last number\n string += str(args[j-1])\n \n \n j += 1\n \n return string\n\n'''\nVerbose solution\n- group first number and last number of sequence (even if identical)\n- check if number are identical\n- check if sequence > 2\n'''\ndef solution(args):\n start = []\n end = []\n string = ''\n for i in range(len(args)):\n\n # start a new set\n if start == []:\n start.append(args[i])\n end.append(args[i])\n continue\n \n # check if consecutive numbers \n if args[i] - end[-1] == 1:\n end[-1] = args[i]\n \n else:\n start.append(args[i])\n end.append(args[i])\n \n for i in range(len(start)):\n if end[i] - start[i] == 0:\n string += str(start[i]) + ','\n\n if end[i] - start[i] == 1:\n string += str(start[i]) + ',' + str(end[i]) + ',' \n\n if end[i] - start[i] > 1:\n string += str(start[i]) + '-' + str(end[i]) + ',' \n return string[:-1]\n \n\nif __name__ == \"__main__\":\n import codewars_test as Test\n Test.describe(\"Sample Test Cases\")\n\n Test.it(\"Simple Tests\")\n Test.assert_equals(solution([-6,-3,-2,-1,0,1,3,4,5,7,8,9,10,11,14,15,17,18,19,20]), '-6,-3-1,3-5,7-11,14,15,17-20')\n Test.assert_equals(solution([-3,-2,-1,2,10,15,16,18,19,20]), '-3--1,2,10,15,16,18-20')","repo_name":"cbraissant/codewars","sub_path":"python/20_range_extraction.py","file_name":"20_range_extraction.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28367870091","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n self.wide = 0\n self.height = 0\nclass Solution:\n def verticalTraversal(self, root: TreeNode) -> List[List[int]]:\n cur_wide = 0\n cur_height = 0\n self.results = {}\n output = []\n self.updateWidthInorder(root, cur_wide, cur_height)\n # self.updateResultsDepthWise(root)\n for key in sorted(self.results.keys()):\n column = [i[1] for i in sorted(self.results[key])]\n output.append(column)\n return output\n \n def updateWidthInorder(self, node, cur_wide, cur_height):\n if node:\n node.wide = cur_wide\n node.height = cur_height\n if node.wide in self.results:\n self.results[node.wide].append((node.height, node.val))\n else:\n self.results[node.wide] = [(node.height, node.val)]\n self.updateWidthInorder(node.left, cur_wide-1, cur_height+1)\n self.updateWidthInorder(node.right, cur_wide+1, cur_height+1)\n \n ","repo_name":"medasuryatej/InterviewPrep","sub_path":"987-vertical-order-traversal-of-a-binary-tree/987-vertical-order-traversal-of-a-binary-tree.py","file_name":"987-vertical-order-traversal-of-a-binary-tree.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24969318718","text":"from math import radians, cos, sin, asin, sqrt\ndef haversine(lon1, lat1, lon2, lat2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n \"\"\"\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine公式\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r\n\n\ndef main():\n # dis = haversine(118.611072,31.114733,118.620051,31.102072)\n filename = input(\"请输入统计里程文件名称:\")\n readfile = open(filename, 'r')\n # lon1,lat1 = str(readfile.readline()).split(\",\")\n one_line = readfile.readline()\n lon1, lat1 = one_line.split(',')\n # print(lon1)\n # print(lat1)\n dis = 0.00000\n while True:\n a = readfile.readline()\n if len(a) == 0:\n readfile.close()\n break\n # print(a)\n lon2, lat2 = a.split(',')\n # 计算值\n lon1, lat1, lon2, lat2 = map(float, [lon1, lat1, lon2, lat2])\n dis = dis + haversine(lon1, lat1, lon2, lat2)\n lon1, lat1 = lon2, lat2\n print(\"总里程为:\")\n print(dis)\n\n\nif __name__ == \"__main__\":\n main()\n input(\"输入任意值结束\")","repo_name":"Dylan-bai/Dylan_tools","sub_path":"Distance calculation.py","file_name":"Distance calculation.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23415130861","text":"t = int(input())\r\nfor i in range(t):\r\n l1 = int(input()) - 1\r\n case1 = [[int(j) for j in input().split()] for k in range(4)]\r\n l2 = int(input()) - 1\r\n case2 = [[int(j) for j in input().split()] for k in range(4)]\r\n v1 = case1[l1]\r\n v2 = case2[l2]\r\n cnt = 0\r\n res = float(\"inf\")\r\n for j in v1:\r\n if j in v2:\r\n cnt += 1\r\n res = j\r\n if cnt == 0:\r\n print(\"Case #\", i + 1, \": Volunteer cheated!\", sep=\"\")\r\n elif cnt == 1:\r\n print(\"Case #\", i + 1, \": \", res, sep=\"\")\r\n else:\r\n print(\"Case #\", i + 1, \": Bad magician!\", sep=\"\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2895.py","file_name":"2895.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2369377799","text":"\"\"\"Test for propagator module.\"\"\"\n# %% Imports\nfrom __future__ import annotations\n\n# Third Party Imports\nfrom numpy import array\n\n# Punch Clock Imports\nfrom punchclock.dynamics.propagator import simplePropagate\n\n# %% Test simplePropagate\nprint(\"\\nTest simple functionality...\")\n\n\ndef dummyFunc(t, x0): # noqa\n xdot = array([1, 1, -0.1])\n return xdot\n\n\nx1 = simplePropagate(dummyFunc, array([0, 0, 0]), 0, 5)\nprint(f\"propagated state=\\n{x1}\")\n\n# %% Test with t0=tf\nprint(\"\\nTest t0=tf...\")\ntry:\n x1 = simplePropagate(dummyFunc, array([0, 0, 0]), 0, 0)\nexcept Exception:\n print(\"An exception occurred\")\n\n# %% Test with IVP failure\nprint(\"\\nTest if IVP fails...\")\n\n\ndef dummyFunc2(t, x0): # noqa\n xdot = [0, 0]\n if x0[0] < 0:\n xdot[0] = 1e15\n else:\n xdot[0] = -1e15\n\n xdot[1] = 1\n return xdot\n\n\ntry:\n x1 = simplePropagate(dummyFunc2, array([1e15, 1e15]), 0, 5)\n print(f\"propagated state=\\n{x1}\")\nexcept Exception:\n print(\"An exception occurred\")\n\n# %% Test with 2D initial conditions\nprint(\"\\nTest with 2D initial conditions...\")\n\nx0 = array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])\nx1 = simplePropagate(dummyFunc, x0, 0, 5)\nprint(f\"propagated state=\\n{x1}\")\n\n\n# %% Test with 2D but singleton dimension ICs\nprint(\"\\nTest with 2D initial conditions where one is a singleton...\")\n\nx0 = array([[1], [1], [1]])\nx1 = simplePropagate(dummyFunc, x0, 0, 5)\nprint(f\"propagated state=\\n{x1}\")\n\n\n# %%\nprint(\"done\")\n","repo_name":"dylan906/clockpunch","sub_path":"tests/dynamics/test_propagator.py","file_name":"test_propagator.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71206333953","text":"users = [\n {'name': 'hadiza',\n 'age': 21,\n 'gender': 'female',\n 'user_name': 'deezah',\n 'is_verified': True,\n 'tweets': [{'content': 'Po for president', 'likes': 450, 'retweets': 233},\n {'content': 'Atiku is our man', 'likes': 4, 'retweets': 2}]\n },\n\n {'name': 'Ibrahim',\n 'age': 21,\n 'gender': 'Male',\n 'user_name': 'ibro',\n 'is_verified': False,\n 'tweets': [{'content': 'Programming is fun', 'likes': 34, 'retweets': 12}],\n\n },\n {'name': 'James',\n 'age': 21,\n 'gender': 'Male',\n 'user_name': 'amez',\n 'is_verified': True,\n 'tweets': [{'content': 'love is life', 'likes': 450, 'retweets': 233},\n {'content': 'only Racheal i know', 'likes': 97, 'retweets': 21}]\n },\n {'name': 'Racheal',\n 'age': 21,\n 'gender': 'female',\n 'user_name': 'betty',\n 'is_verified': False,\n 'tweets': [{'content': '.', 'likes': 1450, 'retweets': 1330},\n {'content': 'Thinking about Amez', 'likes': 4, 'retweets': 2},\n {'content': 'Amezing grace', 'likes': 2000, 'retweets': 1580}, ]\n },\n {'name': 'Elijah',\n 'age': 17,\n 'gender': 'Male',\n 'user_name': 'el_d_si',\n 'is_verified': False,\n 'tweets': [{'content': '#Osun decides', 'likes': 12, 'retweets': 8},\n {'content': 'imole de', 'likes': 97, 'retweets': 21}]\n },\n {'name': 'Dorris',\n 'age': 16,\n 'gender': 'female',\n 'user_name': 'anything',\n 'is_verified': False,\n 'tweets': [{'content': 'i love Chimamanda ', 'likes': 450, 'retweets': 233},\n {'content': 'Feminism is the goal', 'likes': 97, 'retweets': 21}]\n },\n {'name': 'Jacob',\n 'age': 37,\n 'gender': 'Male',\n 'user_name': '',\n 'is_verified': True,\n 'tweets': [{'content': 'reflection is my goal', 'likes': 450, 'retweets': 233},\n {'content': 'how to get more likes on on twitter', 'likes': 97, 'retweets': 21}]\n },\n {'name': 'Derek',\n 'age': 29,\n 'gender': 'Male',\n 'user_name': 'standby_gen',\n 'is_verified': True,\n 'tweets': [{'content': 'love is life', 'likes': 450, 'retweets': 233},\n {'content': 'only Racheal i know', 'likes': 97, 'retweets': 21}]\n },\n {'name': 'Mubarak',\n 'age': 47,\n 'gender': 'Male',\n 'user_name': 'Whistle',\n 'is_verified': True,\n 'tweets': []\n }\n]\n\nno_of_users = len(users)\nusernames = {user['user_name'] for user in users}\nfemale_users = [user['name'] for user in users if user['gender'] == 'female']\nin_active_users = [user for user in users if len(user['tweets']) == 0]\nname_and_age = [{'name': user['name'], 'age': user['age']} for user in users]\ncheck_if_verified = [user for user in users if user['is_verified'] == True]\navg_age_of_users = round(sum(user['age'] for user in users) / len(users))\nprint(avg_age_of_users)","repo_name":"deezah12/python","sub_path":"pythonProject/work/chapter5/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25042860562","text":"# https://www.acmicpc.net/problem/1018\n# 체스판 다시 칠하기\n\n# N, M = map(int, input().split())\n\n# board = []\n# for i in range(N):\n# board.append(input())\n\n# minimum = N * M\n# for i in range(N-7):\n# for j in range(M-7):\n# first_black = 0\n# first_white = 0\n# for k in range(8):\n# for w in range(8):\n# if (k%2 == 0 and w%2 == 0) or (k%2 == 1 and w%2 == 1):\n# if board[k+i][w+j] == \"B\":\n# first_white += 1\n# else:\n# first_black += 1\n# else:\n# if board[k+i][w+j] == \"B\":\n# first_black += 1\n# else:\n# first_white += 1\n\n# # print(minimum, first_black, first_white) \n# minimum = min(minimum, first_black, first_white)\n\n# print(minimum)\n\n\nN, M = map(int, input().split())\n\nboard = []\n\nfor _ in range(N):\n board.append(input())\n\n\nmin_count = 1e9\nfor k in range(N-7):\n for l in range(M-7):\n repaint_w = 0\n repaint_b = 0\n for i in range(8):\n for j in range(8):\n # 제일 왼쪽 위칸이 흰색 -> (짝수, 짝수), (홀수, 홀수)가 흰색\n # 제일 왼쪽 위칸이 검정색 -> (짝수, 짝수), (홀수, 홀수)가 검정색\n if (i-j) % 2 == 0:\n if board[k+i][l+j] == \"B\":\n repaint_w += 1\n else:\n repaint_b += 1\n else:\n if board[k+i][l+j] == \"W\":\n repaint_w += 1\n else:\n repaint_b += 1\n min_count = min(min_count, repaint_w, repaint_b)\n\nprint(min_count)","repo_name":"hmkim199/PrepareCodingTest","sub_path":"Baekjoon/Practice1018.py","file_name":"Practice1018.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35470241995","text":"#!/usr/bin/env python\n\"\"\"\nDescription:\n Cross-correlation functionality\n\nReferences:\n\nCreationDate: 29/06/17\n\nDeveloper: laurence.davies@ga.gov.au\n\nRevision History:\n LastUpdate: 29/06/17 LD First commit of xcor code.\n LastUpdate: 13/07/17 LD Fixed xcor filtering issue when traces have different sample rates.\n LastUpdate: 11/08/17 RH Implement ASDF-based cross-correlation workflow\n LastUpdate: 11/07/18 RH Implemented parallel cross-correlator\n LastUpdate: 19/07/18 RH Implemented cross-correlation approaches described in Habel et al. 2018\n\n LastUpdate: dd/mm/yyyy Who Optional description\n\"\"\"\n\nimport os\nimport logging\nimport math\nfrom collections import defaultdict\n\nimport numpy as np\nimport scipy\n\nfrom obspy.core import UTCDateTime, Stats\nfrom obspy import Trace\nfrom obspy.signal.filter import bandpass, highpass, lowpass\nfrom obspy.geodetics.base import gps2dist_azimuth\nfrom scipy import signal\n\nfrom seismic.xcorqc.fft import *\nfrom seismic.ASDFdatabase.FederatedASDFDataSet import FederatedASDFDataSet\nfrom seismic.xcorqc.utils import get_stream\nfrom netCDF4 import Dataset\nfrom functools import reduce\n\nlogging.basicConfig()\n\n\ndef setup_logger(name, log_file, level=logging.INFO):\n \"\"\"\n Function to setup a logger; adapted from stackoverflow\n \"\"\"\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n handler = logging.FileHandler(log_file, mode='w')\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name+log_file)\n logger.setLevel(level)\n logger.addHandler(handler)\n logger.propagate = False\n return logger\n# end func\n\n\ndef zeropad(tr, padlen):\n assert (tr.shape[0] < padlen)\n padded = np.zeros(padlen)\n padded[0:tr.shape[0]] = tr\n return padded\n# end func\n\n\ndef zeropad_ba(tr, padlen):\n assert (tr.shape[0] < padlen)\n padded = np.zeros(padlen, dtype=np.complex_)\n s = int((padlen - tr.shape[0]) / 2)\n padded[s:(s + tr.shape[0])] = scipy.fftpack.fftshift(tr)\n return scipy.fftpack.ifftshift(padded)\n# end func\n\n\ndef taper(tr, taperlen):\n tr[0:taperlen] *= 0.5 * (1 + np.cos(np.linspace(-math.pi, 0, taperlen)))\n tr[-taperlen:] *= 0.5 * (1 + np.cos(np.linspace(0, math.pi, taperlen)))\n return tr\n# end func\n\n\ndef whiten(a, sampling_rate, window_freq=0):\n \"\"\"\n Applies spectral whitening to trace samples. When window_freq=0, all frequency bins are normalized\n by their amplitudes, i.e. all frequency bins end up with an amplitude of 1. When window_freq is\n nonzero, a smoothed amplitude spectrum (smoothing window length is as computed below) is used\n to normalize the frequency bins.\n\n :param a: trace samples\n :param sampling_rate: sampling rate\n :param window_freq: smoothing window length (Hz)\n :return: spectrally whitened samples\n \"\"\"\n # frequency step\n npts = a.shape[0]\n deltaf = sampling_rate / npts\n\n ffta = np.fft.rfft(a)\n\n # smooth amplitude spectrum\n halfwindow = int(round(window_freq / deltaf / 2.0))\n\n if halfwindow > 0:\n # moving average\n weight = np.convolve(np.abs(ffta), np.ones(halfwindow * 2 + 1) / (halfwindow * 2 + 1), mode='same')\n else:\n weight = np.abs(ffta)\n\n ss = ffta / weight\n\n a = np.fft.irfft(ss)\n\n return a\n# end func\n\n\ndef xcorr2(tr1, tr2, sta1_inv=None, sta2_inv=None,\n instrument_response_output='vel', water_level=50.,\n window_seconds=3600, window_overlap=0.1, window_buffer_length=0,\n interval_seconds=86400, taper_length=0.05, resample_rate=None,\n flo=None, fhi=None, clip_to_2std=False, whitening=False,\n whitening_window_frequency=0, one_bit_normalize=False, envelope_normalize=False,\n verbose=1, logger=None):\n\n # Length of window_buffer in seconds\n window_buffer_seconds = window_buffer_length * window_seconds\n adjusted_taper_length = taper_length\n if window_buffer_seconds:\n # adjust taper length\n adjusted_taper_length = taper_length / (1. + window_buffer_length * 2.)\n # end if\n\n sr1 = tr1.stats.sampling_rate\n sr2 = tr2.stats.sampling_rate\n sr1_orig = sr1\n sr2_orig = sr2\n tr1_d_all = tr1.data # refstn\n tr2_d_all = tr2.data\n lentr1_all = tr1_d_all.shape[0]\n lentr2_all = tr2_d_all.shape[0]\n window_samples_1 = (window_seconds + 2*window_buffer_seconds) * sr1\n window_samples_2 = (window_seconds + 2*window_buffer_seconds) * sr2\n interval_samples_1 = interval_seconds * sr1\n interval_samples_2 = interval_seconds * sr2\n # sr = 0\n resll = []\n\n # set day-aligned start-indices\n maxStartTime = max(tr1.stats.starttime, tr2.stats.starttime)\n dayAlignedStartTime = UTCDateTime(year=maxStartTime.year, month=maxStartTime.month,\n day=maxStartTime.day)\n itr1s = (dayAlignedStartTime - tr1.stats.starttime) * sr1\n itr2s = (dayAlignedStartTime - tr2.stats.starttime) * sr2\n\n if resample_rate:\n sr1 = resample_rate\n sr2 = resample_rate\n # end if\n sr = max(sr1, sr2)\n xcorlen = int(2 * window_seconds * sr - 1)\n fftlen = 2 ** (int(np.log2(xcorlen)) + 1)\n\n intervalCount = 0\n windowsPerInterval = [] # Stores the number of windows processed per interval\n intervalStartSeconds = []\n intervalEndSeconds = []\n while itr1s < lentr1_all and itr2s < lentr2_all:\n itr1e = min(lentr1_all, itr1s + interval_samples_1)\n itr2e = min(lentr2_all, itr2s + interval_samples_2)\n\n while (itr1s < 0) or (itr2s < 0):\n itr1s += (window_samples_1 - 2*window_buffer_seconds*sr1_orig) - \\\n (window_samples_1 - 2*window_buffer_seconds*sr1_orig) * window_overlap\n itr2s += (window_samples_2 - 2*window_buffer_seconds*sr2_orig) - \\\n (window_samples_2 - 2*window_buffer_seconds*sr2_orig) * window_overlap\n # end while\n\n if (np.fabs(itr1e - itr1s) < sr1_orig) or (np.fabs(itr2e - itr2s) < sr2_orig):\n itr1s = itr1e\n itr2s = itr2e\n continue\n # end if\n\n if (tr1.stats.starttime + itr1s / sr1_orig != tr2.stats.starttime + itr2s / sr2_orig):\n if logger:\n logger.warning('Detected misaligned traces..')\n\n windowCount = 0\n wtr1s = int(itr1s)\n wtr2s = int(itr2s)\n resl = []\n\n while wtr1s < itr1e and wtr2s < itr2e:\n wtr1e = int(min(itr1e, wtr1s + window_samples_1))\n wtr2e = int(min(itr2e, wtr2s + window_samples_2))\n\n # Discard small windows\n if ((wtr1e - wtr1s < window_samples_1) or (wtr2e - wtr2s < window_samples_2) or\n (wtr1e - wtr1s < sr1_orig) or (wtr2e - wtr2s < sr2_orig)):\n wtr1s = int(np.ceil(itr1e))\n wtr2s = int(np.ceil(itr2e))\n continue\n # end if\n\n # Discard windows with masked regions, i.e. with gaps or windows that are all zeros\n if (not (np.ma.is_masked(tr1_d_all[wtr1s:wtr1e]) or\n np.ma.is_masked(tr2_d_all[wtr2s:wtr2e]) or\n np.sum(tr1_d_all[wtr1s:wtr1e]) == 0 or\n np.sum(tr2_d_all[wtr2s:wtr2e]) == 0)):\n\n # logger.info('%s, %s' % (tr1.stats.starttime + wtr1s / 200., tr1.stats.starttime + wtr1e / sr1_orig))\n # logger.info('%s, %s' % (tr2.stats.starttime + wtr2s / 200., tr2.stats.starttime + wtr2e / sr2_orig))\n\n tr1_d = np.array(tr1_d_all[wtr1s:wtr1e], dtype=np.float32)\n tr2_d = np.array(tr2_d_all[wtr2s:wtr2e], dtype=np.float32)\n\n # STEP 1: detrend\n tr1_d = signal.detrend(tr1_d)\n tr2_d = signal.detrend(tr2_d)\n\n # STEP 2: demean\n tr1_d -= np.mean(tr1_d)\n tr2_d -= np.mean(tr2_d)\n\n # STEP 3: remove response\n if sta1_inv:\n resp_tr1 = Trace(data=tr1_d,\n header=Stats(header={'sampling_rate': sr1_orig,\n 'npts': len(tr1_d),\n 'network': tr1.stats.network,\n 'station': tr1.stats.station,\n 'location': tr1.stats.location,\n 'channel': tr1.stats.channel,\n 'starttime': tr1.stats.starttime + float(wtr1s)/sr1_orig,\n 'endtime': tr1.stats.starttime + float(wtr1e)/sr1_orig}))\n try:\n resp_tr1.remove_response(inventory=sta1_inv, output=instrument_response_output.upper(),\n water_level=water_level)\n except Exception as e:\n logger.error(str(e))\n # end try\n\n tr1_d = resp_tr1.data\n # end if\n\n # remove response\n if sta2_inv:\n resp_tr2 = Trace(data=tr2_d,\n header=Stats(header={'sampling_rate': sr2_orig,\n 'npts': len(tr2_d),\n 'network': tr2.stats.network,\n 'station': tr2.stats.station,\n 'location': tr2.stats.location,\n 'channel': tr2.stats.channel,\n 'starttime': tr2.stats.starttime + float(wtr2s)/sr2_orig,\n 'endtime': tr2.stats.starttime + float(wtr2e)/sr2_orig}))\n try:\n resp_tr2.remove_response(inventory=sta2_inv, output=instrument_response_output.upper(),\n water_level=water_level)\n except Exception as e:\n logger.error(str(e))\n # end try\n\n tr2_d = resp_tr2.data\n # end if\n\n # STEPS 4, 5: resample after lowpass @ resample_rate/2 Hz\n if resample_rate:\n tr1_d = lowpass(tr1_d, resample_rate/2., sr1_orig, corners=2, zerophase=True)\n tr2_d = lowpass(tr2_d, resample_rate/2., sr2_orig, corners=2, zerophase=True)\n\n tr1_d = Trace(data=tr1_d,\n header=Stats(header={'sampling_rate': sr1_orig,\n 'npts': window_samples_1})).resample(resample_rate,\n no_filter=True).data\n tr2_d = Trace(data=tr2_d,\n header=Stats(header={'sampling_rate': sr2_orig,\n 'npts': window_samples_2})).resample(resample_rate,\n no_filter=True).data\n # end if\n\n # STEP 6: Bandpass\n if flo and fhi:\n tr1_d = bandpass(tr1_d, flo, fhi, sr1, corners=2, zerophase=True)\n tr2_d = bandpass(tr2_d, flo, fhi, sr2, corners=2, zerophase=True)\n # end if\n\n # STEP 7: time-domain normalization\n # clip to +/- 2*std\n if clip_to_2std:\n std_tr1 = np.std(tr1_d)\n std_tr2 = np.std(tr2_d)\n clip_indices_tr1 = np.fabs(tr1_d) > 2 * std_tr1\n clip_indices_tr2 = np.fabs(tr2_d) > 2 * std_tr2\n\n tr1_d[clip_indices_tr1] = 2 * std_tr1 * np.sign(tr1_d[clip_indices_tr1])\n tr2_d[clip_indices_tr2] = 2 * std_tr2 * np.sign(tr2_d[clip_indices_tr2])\n # end if\n\n # 1-bit normalization\n if one_bit_normalize:\n tr1_d = np.sign(tr1_d)\n tr2_d = np.sign(tr2_d)\n # end if\n\n # Apply Rhys Hawkins-style default time domain normalization\n if (clip_to_2std == 0) and (one_bit_normalize == 0):\n # 0-mean\n tr1_d -= np.mean(tr1_d)\n tr2_d -= np.mean(tr2_d)\n\n # unit-std\n tr1_d /= np.std(tr1_d)\n tr2_d /= np.std(tr2_d)\n # end if\n\n # STEP 8: taper\n if adjusted_taper_length > 0:\n tr1_d = taper(tr1_d, int(np.round(adjusted_taper_length*tr1_d.shape[0])))\n tr2_d = taper(tr2_d, int(np.round(adjusted_taper_length*tr2_d.shape[0])))\n # end if\n\n # STEP 9: spectral whitening\n if whitening:\n tr1_d = whiten(tr1_d, sr1, window_freq=whitening_window_frequency)\n tr2_d = whiten(tr2_d, sr2, window_freq=whitening_window_frequency)\n\n # STEP 10: taper\n if adjusted_taper_length > 0:\n tr1_d = taper(tr1_d, int(np.round(adjusted_taper_length*tr1_d.shape[0])))\n tr2_d = taper(tr2_d, int(np.round(adjusted_taper_length*tr2_d.shape[0])))\n # end if\n # end if\n\n # STEP 11: Final bandpass\n # apply zero-phase bandpass\n if flo and fhi:\n tr1_d = bandpass(tr1_d, flo, fhi, sr1, corners=2, zerophase=True)\n tr2_d = bandpass(tr2_d, flo, fhi, sr2, corners=2, zerophase=True)\n # end if\n\n if window_buffer_seconds:\n # extract window of interest from buffered window\n tr1_d = tr1_d[int(window_buffer_seconds*sr1):-int(window_buffer_seconds*sr1)]\n tr2_d = tr2_d[int(window_buffer_seconds*sr2):-int(window_buffer_seconds*sr2)]\n # end if\n\n # cross-correlate waveforms\n if sr1 < sr2:\n fftlen2 = fftlen\n fftlen1 = int((fftlen2 * 1.0 * sr1) / sr)\n rf = zeropad_ba(fftn(zeropad(tr1_d, fftlen1), shape=[fftlen1]), fftlen2) * fftn(\n zeropad(ndflip(tr2_d), fftlen2), shape=[fftlen2])\n elif sr1 > sr2:\n fftlen1 = fftlen\n fftlen2 = int((fftlen1 * 1.0 * sr2) / sr)\n rf = fftn(zeropad(tr1_d, fftlen1), shape=[fftlen1]) * zeropad_ba(\n fftn(zeropad(ndflip(tr2_d), fftlen2), shape=[fftlen2]), fftlen1)\n else:\n rf = fftn(zeropad(tr1_d, fftlen), shape=[fftlen]) * fftn(zeropad(ndflip(tr2_d), fftlen),\n shape=[fftlen])\n # end if\n\n if not np.isnan(rf).any():\n resl.append(rf)\n windowCount += 1\n # end if\n # end if\n\n wtr1s += int((window_samples_1 - 2*window_buffer_seconds*sr1_orig) -\n (window_samples_1 - 2*window_buffer_seconds*sr1_orig) * window_overlap)\n wtr2s += int((window_samples_2 - 2*window_buffer_seconds*sr2_orig) -\n (window_samples_2 - 2*window_buffer_seconds*sr2_orig) * window_overlap)\n # end while (windows within interval)\n\n if verbose > 1:\n if logger:\n logger.info('\\tProcessed %d windows in interval %d' % (windowCount, intervalCount))\n # end if\n\n intervalStartSeconds.append(itr1s/sr1_orig + tr1.stats.starttime.timestamp)\n intervalEndSeconds.append(itr1e/sr1_orig + tr1.stats.starttime.timestamp)\n itr1s = itr1e\n itr2s = itr2e\n intervalCount += 1\n\n # Append an array of zeros if no windows were processed for the current interval\n if windowCount == 0:\n resl.append(np.zeros(fftlen))\n if verbose > 1:\n if logger:\n logger.info('\\tWarning: No windows processed due to gaps in data in current interval')\n # end if\n # end if\n\n windowsPerInterval.append(windowCount)\n\n if windowCount > 0:\n mean = reduce((lambda tx, ty: tx + ty), resl) / float(windowCount)\n else:\n mean = reduce((lambda tx, ty: tx + ty), resl)\n # end if\n\n if envelope_normalize:\n step = np.sign(np.fft.fftfreq(fftlen, 1.0 / sr))\n mean = mean + step * mean # compute analytic\n # end if\n\n mean = ifftn(mean)\n\n if envelope_normalize:\n # Compute magnitude of mean\n mean = np.abs(mean)\n normFactor = np.max(mean)\n\n # mean can be 0 for a null result\n if normFactor > 0:\n mean /= normFactor\n # end if\n # end if\n\n resll.append(mean[:xcorlen])\n # end while (iteration over intervals)\n\n if len(resll):\n return np.array(resll), np.array(windowsPerInterval), \\\n np.array(intervalStartSeconds, dtype='i8'), \\\n np.array(intervalEndSeconds, dtype='i8'), \\\n sr\n else:\n return None, None, None, None, sr\n # end if\n# end func\n\n\ndef IntervalStackXCorr(refds, tempds,\n start_time, end_time,\n ref_net_sta, temp_net_sta,\n ref_sta_inv, temp_sta_inv,\n instrument_response_output,\n water_level,\n ref_cha,\n temp_cha,\n baz_ref_net_sta,\n baz_temp_net_sta,\n resample_rate=None,\n taper_length=0.05,\n buffer_seconds=864000, interval_seconds=86400,\n window_seconds=3600, window_overlap=0.1, window_buffer_length=0,\n flo=None, fhi=None,\n clip_to_2std=False, whitening=False, whitening_window_frequency=0,\n one_bit_normalize=False, envelope_normalize=False,\n ensemble_stack=False,\n outputPath='/tmp', verbose=1, tracking_tag=''):\n \"\"\"\n This function rolls through two ASDF data sets, over a given time-range and cross-correlates\n waveforms from all possible station-pairs from the two data sets. To allow efficient, random\n data access asdf data sources, an instance of a SeisDB object, instantiated from\n the corresponding Json database is passed in (tempds_db) -- although this parameter is not\n mandatory, data-access from large ASDF files will be slow without it.\n\n Station-ids to be processed from the two data-sources can be specified as lists of strings,\n while wildcards can be used to process all stations. Data is fetched from the sources in chunks\n to limit memory usage and data-windows with gaps are discarded.\n\n Cross-correlation results are written out for each station-pair, in the specified folder, as\n NETCDF4 files. Panoply (https://www.giss.nasa.gov/tools/panoply/), already installed on the\n NCI VDIs can be used to interrogate these results.\n\n :type refds: FederatedASDFDataSet\n :param refds: FederatedASDFDataSet containing reference-station data\n :type tempds: FederatedASDFDataSet\n :param tempds: FederatedASDFDataSet containing temporary-stations data\n :type start_time: UTCDateTime\n :param: start_time: Start-time (UTCDateTime format) for data to be used in cross-correlation\n :type end_time: UTCDateTime\n :param: end_time: End-time (UTCDateTime format) for data to be used in cross-correlation\n :type ref_net_sta: str\n :param ref_net_sta: Network.Station for the reference Dataset.\n :type temp_net_sta: str\n :param temp_net_sta: Network.Station for the temporary Dataset.\n :type ref_sta_inv: Inventory\n :param ref_sta_inv: Inventory containing instrument response for station\n :type temp_sta_inv: Inventory\n :param temp_sta_inv: Inventory containing instrument response for station\n :type instrument_response_output: str\n :param instrument_response_output: Output of instrument response correction; can be either 'vel' or 'disp'\n :type water_level: float\n :param water_level: Water-level used during instrument response correction\n :type ref_cha: str\n :param ref_cha: Channel name for the reference Dataset\n :type temp_cha: str\n :param temp_cha: Channel name for the temporary Dataset\n :type baz_ref_net_sta: float\n :param baz_ref_net_sta: Back-azimuth of ref station from temp station in degrees\n :type baz_temp_net_sta: float\n :param baz_temp_net_sta: Back-azimuth of temp station from ref station in degrees\n :type resample_rate: float\n :param resample_rate: Resampling rate (Hz). Applies to both data-sets\n :type taper_length: float\n :param taper_length: Taper length as a fraction of window length\n :type buffer_seconds: int\n :param buffer_seconds: The amount of data to be fetched per call from the ASDFDataSets, because \\\n we may not be able to fetch all the data (from start_time to end_time) at \\\n once. The default is set to 10 days and should be a multiple of \\\n interval_seconds.\n :type interval_seconds: int\n :param interval_seconds: The interval in seconds, over which cross-correlation windows are \\\n stacked. Default is 1 day.\n :type window_seconds: int\n :param window_seconds: Length of cross-correlation window in seconds. Default is 1 hr.\n :type window_overlap: float\n :param window_overlap: Window overlap fraction. Default is 0.1.\n :type window_buffer_length: float\n :param window_buffer_length: Buffer length as a fraction of 'window-seconds' around actual data windows of \\\n interest. This helps exclude effects of tapering and other edge artefacts from \\\n data windows before cross-correlation. Default is 0\n :type flo: float\n :param flo: Lower frequency for Butterworth bandpass filter\n :type fhi: float\n :param fhi: Upper frequency for Butterworth bandpass filter\n :type clip_to_2std: bool\n :param clip_to_2std: Clip data in each window to +/- 2 standard deviations\n :type whitening: bool\n :param whitening: Apply spectral whitening\n :type whitening_window_frequency: float\n :param whitening_window_frequency: Window frequency (Hz) used to determine length of averaging window \\\n for smoothing spectral amplitude\n :type one_bit_normalize: bool\n :param one_bit_normalize: Apply one-bit normalization to data in each window\n :type envelope_normalize: bool\n :param envelope_normalize: Envelope via Hilbert transforms and normalize\n :type ensemble_stack: bool\n :param ensemble_stack: Outputs a single CC function stacked over all data for a given station-pair\n :type verbose: int\n :param verbose: Verbosity of printouts. Default is 1; maximum is 3.\n :type tracking_tag: str\n :param tracking_tag: File tag to be added to output file names so runtime settings can be tracked\n :type outputPath: str\n :param outputPath: Folder to write results to\n :return: 1: 1d np.array with time samples spanning [-window_samples+dt:window_samples-dt]\n 2: A dictionary of 2d np.arrays containing cross-correlation results for each station-pair. \\\n Rows in each 2d array represent number of interval_seconds processed and columns \\\n represent stacked samples of length window_seconds.\n 3: A dictionary of 1d np.arrays containing number of windows processed, within each \\\n interval_seconds period, for each station-pair. These Window-counts could be helpful \\\n in assessing robustness of results.\n \"\"\"\n #######################################\n # check consistency of parameterization\n #######################################\n if resample_rate and fhi:\n if resample_rate < 2*fhi:\n raise RuntimeError('Resample-rate should be >= 2*fmax')\n\n if clip_to_2std and one_bit_normalize:\n raise RuntimeError('Mutually exclusive parameterization: clip_to_2std and one-bit-normalizations'\n 'together is redundant')\n # end if\n\n # setup logger\n stationPair = '%s.%s' % (ref_net_sta, temp_net_sta)\n fn = os.path.join(outputPath, '%s.log' % (stationPair if not tracking_tag else\n '.'.join([stationPair, tracking_tag])))\n logger = setup_logger('%s.%s' % (ref_net_sta, temp_net_sta), fn)\n\n #######################################\n # Initialize variables for main loop\n #######################################\n startTime = UTCDateTime(start_time)\n endTime = UTCDateTime(end_time)\n\n cTime = startTime\n\n xcorrResultsDict = defaultdict(list) # Results dictionary indexed by station-pair string\n windowCountResultsDict = defaultdict(list) # Window-count dictionary indexed by station-pair string\n intervalStartTimesDict = defaultdict(list)\n intervalEndTimesDict = defaultdict(list)\n sr = 0\n while cTime < endTime:\n cStep = buffer_seconds\n\n if (cTime + cStep) > endTime:\n cStep = endTime - cTime\n\n logger.info('====Time range [%s - %s]====' % (str(cTime), str(cTime + cStep)))\n logger.info('Fetching data for station %s..' % ref_net_sta)\n\n refSt = None\n try:\n rnc, rsc = ref_net_sta.split('.')\n refSt = get_stream(refds, rnc, rsc, ref_cha, cTime, cTime + cStep, baz=baz_ref_net_sta,\n logger=logger, verbose=verbose)\n except Exception as e:\n logger.error('\\t'+str(e))\n logger.warning('\\tError encountered while fetching data. Skipping along..')\n\n if refSt is None:\n logger.info('Failed to fetch data..')\n cTime += cStep\n continue\n elif len(refSt) == 0:\n logger.info('Data source exhausted. Skipping time interval [%s - %s]' % (str(cTime), str(cTime + cStep)))\n cTime += cStep\n continue\n else:\n pass\n # print refSt\n # end if\n\n logger.info('\\tFetching data for station %s..' % temp_net_sta)\n\n tempSt = None\n try:\n tnc, tsc = temp_net_sta.split('.')\n tempSt = get_stream(tempds, tnc, tsc, temp_cha, cTime, cTime + cStep, baz=baz_temp_net_sta,\n logger=logger, verbose=verbose)\n except Exception as e:\n logger.error('\\t'+str(e))\n logger.warning('\\tError encountered while fetching data. Skipping along..')\n # end try\n\n if tempSt is None:\n logger.info('Failed to fetch data..')\n cTime += cStep\n continue\n elif len(tempSt) == 0:\n logger.info('Data source exhausted. Skipping time interval [%s - %s]' % (str(cTime), str(cTime + cStep)))\n cTime += cStep\n continue\n else:\n pass\n # print tempSt\n # end if\n\n if verbose > 2:\n logger.debug('\\t\\tData Gaps:')\n tempSt.print_gaps() # output sent to stdout; fix this\n print(\"\\n\")\n\n logger.info('\\tCross-correlating station-pair: %s' % stationPair)\n xcl, winsPerInterval, \\\n intervalStartSeconds, intervalEndSeconds, sr = \\\n xcorr2(refSt[0], tempSt[0], ref_sta_inv, temp_sta_inv,\n instrument_response_output=instrument_response_output,\n water_level=water_level,\n window_seconds=window_seconds,\n window_overlap=window_overlap,\n window_buffer_length=window_buffer_length,\n interval_seconds=interval_seconds,\n resample_rate=resample_rate,\n taper_length=taper_length,\n flo=flo, fhi=fhi,\n clip_to_2std=clip_to_2std,\n whitening=whitening,\n whitening_window_frequency=whitening_window_frequency,\n one_bit_normalize=one_bit_normalize,\n envelope_normalize=envelope_normalize,\n verbose=verbose, logger=logger)\n\n # Continue if no results were returned due to data-gaps\n if xcl is None:\n logger.warning(\"\\t\\tWarning: no cross-correlation results returned for station-pair %s, \" %\n stationPair + \" due to gaps in data.\")\n cTime += cStep\n continue\n # end if\n\n xcorrResultsDict[stationPair].append(xcl)\n windowCountResultsDict[stationPair].append(winsPerInterval)\n\n intervalStartTimesDict[stationPair].append(intervalStartSeconds)\n intervalEndTimesDict[stationPair].append(intervalEndSeconds)\n\n cTime += cStep\n # wend (loop over time range)\n\n x = None\n # skippedCount = 0\n # Concatenate results\n for k in list(xcorrResultsDict.keys()):\n combinedXcorrResults = None\n combinedWindowCountResults = None\n combinedIntervalStartTimes = None\n combinedIntervalEndTimes = None\n for i in np.arange(len(xcorrResultsDict[k])):\n if i == 0:\n combinedXcorrResults = xcorrResultsDict[k][0]\n combinedWindowCountResults = windowCountResultsDict[k][0]\n combinedIntervalStartTimes = intervalStartTimesDict[k][0]\n combinedIntervalEndTimes = intervalEndTimesDict[k][0]\n\n # Generate time samples (only needs to be done once)\n if x is None:\n dt = 1./sr\n x = np.linspace(-window_seconds + dt, window_seconds - dt,\n xcorrResultsDict[k][0].shape[1])\n # end if\n\n if ensemble_stack:\n if combinedXcorrResults.shape[0] > 1:\n combinedXcorrResults = np.expand_dims(np.sum(combinedXcorrResults,\n axis=0), axis=0)\n # end if\n # end if\n else:\n if combinedXcorrResults.shape[1] == xcorrResultsDict[k][i].shape[1]:\n if ensemble_stack:\n if xcorrResultsDict[k][i].shape[0] > 1:\n combinedXcorrResults += np.expand_dims(np.sum(xcorrResultsDict[k][i],\n axis=0), axis=0)\n else:\n combinedXcorrResults += xcorrResultsDict[k][i]\n # end if\n else:\n combinedXcorrResults = np.concatenate((combinedXcorrResults,\n xcorrResultsDict[k][i]))\n # end if\n else:\n if ensemble_stack:\n pass\n else:\n combinedXcorrResults = np.concatenate((combinedXcorrResults,\n np.zeros((xcorrResultsDict[k][i].shape[0],\n combinedXcorrResults.shape[1]))))\n # end if\n logger.warning(\"\\t\\tVariable sample rates detected. Current station-pair: %s\" % k)\n # end if\n combinedWindowCountResults = np.concatenate((combinedWindowCountResults,\n windowCountResultsDict[k][i]))\n combinedIntervalStartTimes = np.concatenate((combinedIntervalStartTimes,\n intervalStartTimesDict[k][i]))\n combinedIntervalEndTimes = np.concatenate((combinedIntervalEndTimes,\n intervalEndTimesDict[k][i]))\n # end if\n # end for\n\n # Replace lists with combined results\n xcorrResultsDict[k] = combinedXcorrResults\n windowCountResultsDict[k] = combinedWindowCountResults\n intervalStartTimesDict[k] = combinedIntervalStartTimes\n intervalEndTimesDict[k] = combinedIntervalEndTimes\n # end for\n\n # Save Results\n for i, k in enumerate(list(xcorrResultsDict.keys())):\n fn = os.path.join(outputPath, '%s.nc' % (k if not tracking_tag else '.'.join([k, tracking_tag])))\n\n root_grp = Dataset(fn, 'w', format='NETCDF4')\n root_grp.description = 'Cross-correlation results for station-pair: %s' % k\n\n # Dimensions\n root_grp.createDimension('lag', xcorrResultsDict[k].shape[1])\n root_grp.createDimension('nchar', 10)\n\n lag = root_grp.createVariable('lag', 'f4', ('lag',))\n\n # Add metadata\n lon1 = root_grp.createVariable('Lon1', 'f4')\n lat1 = root_grp.createVariable('Lat1', 'f4')\n lon2 = root_grp.createVariable('Lon2', 'f4')\n lat2 = root_grp.createVariable('Lat2', 'f4')\n distance = root_grp.createVariable('Distance', 'f4')\n\n ref_sta_coords = refds.unique_coordinates[ref_net_sta]\n temp_sta_coords = tempds.unique_coordinates[temp_net_sta]\n lon1[:] = ref_sta_coords[0] if len(ref_sta_coords) == 2 else -999\n lat1[:] = ref_sta_coords[1] if len(ref_sta_coords) == 2 else -999\n lon2[:] = temp_sta_coords[0] if len(temp_sta_coords) == 2 else -999\n lat2[:] = temp_sta_coords[1] if len(temp_sta_coords) == 2 else -999\n if np.min([v != -999 for v in [lon1[:], lat1[:], lon2[:], lat2[:]]]):\n distance[:], _, _ = gps2dist_azimuth(lat1[:], lon1[:], lat2[:], lon2[:])\n # end if\n\n # Add data\n if ensemble_stack:\n nsw = root_grp.createVariable('NumStackedWindows', 'i8')\n avgnsw = root_grp.createVariable('AvgNumStackedWindowsPerInterval', 'f4')\n ist = root_grp.createVariable('IntervalStartTime', 'i8')\n iet = root_grp.createVariable('IntervalEndTime', 'i8')\n xc = root_grp.createVariable('xcorr', 'f4', ('lag',))\n\n totalIntervalCount = int(np.sum(windowCountResultsDict[k] > 0))\n totalWindowCount = int(np.sum(windowCountResultsDict[k]))\n nsw[:] = totalWindowCount\n avgnsw[:] = np.mean(windowCountResultsDict[k][windowCountResultsDict[k]>0])\n ist[:] = int(np.min(intervalStartTimesDict[k]))\n iet[:] = int(np.max(intervalEndTimesDict[k]))\n if totalIntervalCount > 0:\n xc[:] = xcorrResultsDict[k].real / float(totalIntervalCount)\n else:\n xc[:] = xcorrResultsDict[k].real\n # end if\n else:\n root_grp.createDimension('interval', xcorrResultsDict[k].shape[0])\n # Variables\n interval = root_grp.createVariable('interval', 'f4', ('interval',))\n nsw = root_grp.createVariable('NumStackedWindows', 'f4', ('interval',))\n ist = root_grp.createVariable('IntervalStartTimes', 'i8', ('interval',))\n iet = root_grp.createVariable('IntervalEndTimes', 'i8', ('interval',))\n xc = root_grp.createVariable('xcorr', 'f4', ('interval', 'lag',))\n\n # Populate variables\n interval[:] = np.arange(xcorrResultsDict[k].shape[0])\n nsw[:] = windowCountResultsDict[k]\n ist[:] = intervalStartTimesDict[k]\n iet[:] = intervalEndTimesDict[k]\n xc[:, :] = xcorrResultsDict[k].real\n # end if\n\n lag[:] = x\n\n # Add and populate a new group for parameters used\n pg = root_grp.createGroup('Parameters')\n\n params = {'corr_chans': '%s.%s' % (ref_cha, temp_cha),\n 'instr_corr_applied_1': 1 if ref_sta_inv else 0,\n 'instr_corr_applied_2': 1 if temp_sta_inv else 0,\n 'instr_corr_output': instrument_response_output,\n 'instr_corr_water_level_db': water_level,\n 'resample_rate': resample_rate if resample_rate else -999,\n 'taper_length': taper_length,\n 'buffer_seconds': buffer_seconds,\n 'interval_seconds': interval_seconds,\n 'window_seconds': window_seconds,\n 'window_overlap': window_overlap,\n 'window_buffer_length': window_buffer_length,\n 'bandpass_fmin': flo if flo else -999,\n 'bandpass_fmax': fhi if fhi else -999,\n 'clip_to_2std': int(clip_to_2std),\n 'one_bit_normalize': int(one_bit_normalize),\n 'zero_mean_1std_normalize': int(clip_to_2std is False and one_bit_normalize is False),\n 'spectral_whitening': int(whitening),\n 'envelope_normalize': int(envelope_normalize),\n 'ensemble_stack': int(ensemble_stack)}\n\n if whitening:\n params['whitening_window_frequency'] = whitening_window_frequency\n\n for _k, _v in params.items():\n setattr(pg, _k, _v)\n # end for\n\n root_grp.close()\n # end for\n\n return x, xcorrResultsDict, windowCountResultsDict\n# end func\n","repo_name":"Ao-Chang/HiperSeis","sub_path":"seismic/xcorqc/xcorqc.py","file_name":"xcorqc.py","file_ext":"py","file_size_in_byte":37947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4496976873","text":"for _ in range(int(input())):\n def permute(nums):\n result = [[]]\n for n in nums:\n new = []\n for p in result:\n for i in range(len(p)+1):\n new.append(p[:i] + [n] + p[i:])\n result = new\n return result\n\n n = int(input())\n my_nums = list(map(int,input().split()))\n l = [str(x) for x in my_nums]\n res = permute(l)\n print(sum(int(\"\".join(x)) for x in res))\n","repo_name":"shubham2704/competetive_coding","sub_path":"codewars/chefAndCakes.py","file_name":"chefAndCakes.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"38044332711","text":"import random\r\nimport math\r\n\r\n\r\ndef getQ(i):\r\n return 1/i\r\n\r\n\r\ndef getRandomPoint(x, d):\r\n return [\r\n x[i] + random.random()*d - d/2\r\n for i in range(len(x))\r\n ]\r\n\r\n\r\ndef solve(func, x, d, qMin):\r\n k = 1\r\n q = getQ(k)\r\n while q > qMin:\r\n newX = getRandomPoint(x, d)\r\n p = None\r\n df = func(newX) - func(x)\r\n if df < 0:\r\n p = 1\r\n else:\r\n p = math.exp(-df/q)\r\n\r\n\r\n if random.random() <= p:\r\n print('Итерация', k)\r\n print('Текущая точка:', x)\r\n print('Значение функции в этой точке:', func(x))\r\n print('Новая точка:', newX)\r\n print('Значение функции в этой точке:', func(newX))\r\n x = newX\r\n\r\n k += 1\r\n q = getQ(k)\r\n print('\\nОптимальная точка:', x)\r\n print('Значение функции в этой точке:', func(x))\r\n\r\n\r\ndef f(x):\r\n x1 = x[0]\r\n x2 = x[1]\r\n return 3*x1*x1 + x1*x2 + 3*x2*x2 - 8*x1\r\n\r\nsolve(f, [0,0], 2, 0.01)\r\n\r\n","repo_name":"OlegNikulin/-","sub_path":"Классификация данных/обжиг.py","file_name":"обжиг.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14844306662","text":"from datetime import timedelta, date, datetime\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import StatesGroup, State\nfrom config import dp, bot, BRON_CHANNEL, db\nfrom aiogram import types\nfrom aiogram.types import ReplyKeyboardMarkup, KeyboardButton, ContentType\nfrom filters import IsUser\nfrom app import btnBrn, btnMenu, btnbar, btnTime, btndlv, sos\nfrom aiogram.types import ContentType\n\nb51 = KeyboardButton(\"❌ НЕТ\")\nb52 = KeyboardButton(\"✅ ВЕРНО\")\nbtn_done = \"✅ ВЕРНО\"\nbtn_tm = \"❌ НЕТ\"\notmBtn = ReplyKeyboardMarkup(resize_keyboard=True).add(b52).add(b51)\n\nb53 = \"❌ ОТМЕНИТЬ\"\n\n\"\"\"Bron stolov\"\"\"\nb1 = KeyboardButton(\"10:00\")\nb2 = KeyboardButton(\"10:30\")\nb3 = KeyboardButton(\"11:00\")\nb4 = KeyboardButton(\"11:30\")\nb5 = KeyboardButton(\"12:00\")\nb6 = KeyboardButton(\"12:30\")\nb7 = KeyboardButton(\"13:00\")\nb8 = KeyboardButton(\"13:30\")\nb9 = KeyboardButton(\"14:00\")\nb10 = KeyboardButton(\"14:30\")\nb11 = KeyboardButton(\"15:00\")\nb12 = KeyboardButton(\"15:30\")\nb13 = KeyboardButton(\"16:00\")\nb14 = KeyboardButton(\"16:30\")\nb15 = KeyboardButton(\"17:00\")\nb16 = KeyboardButton(\"17:30\")\nb17 = KeyboardButton(\"18:00\")\nb18 = KeyboardButton(\"18:30\")\nb19 = KeyboardButton(\"19:00\")\nb20 = KeyboardButton(\"19:30\")\nb21 = KeyboardButton(\"20:00\")\nb22 = KeyboardButton(\"20:30\")\nb23 = KeyboardButton(\"21:00\")\nb24 = KeyboardButton(\"21:30\")\nb25 = KeyboardButton(\"22:00\")\ntimeBtn = ReplyKeyboardMarkup().add(b1).add(b2).add(b3).add(b4).add(b5).add(b6).add(b7).add(b8).add(b9).add(b10)\ntimeBtn.add(b11).add(b12).add(b13).add(b14).add(b15).add(b16).add(b17).add(b18).add(b19).add(b20).add(b21) \\\n .add(b22).add(b23).add(b24).add(b25)\n\"\"\"date button\"\"\"\n\n\n# def json_serial(obj):\n# \"\"\"JSON serializer for objects not serializable by default json code\"\"\"\n#\n# if isinstance(obj):\n# return obj.isoformat()\n# raise TypeError(\"Type %s not serializable\" % type(obj))\n\n\ndef date_day():\n markup = ReplyKeyboardMarkup()\n first_date = date.today() + timedelta(days=0)\n duration = timedelta(days=14)\n for d in range(duration.days + 1):\n day = first_date + timedelta(days=d)\n day_in = day.strftime(\"%d-%m-%y\")\n markup.add(day_in)\n return markup\n\n\n\"\"\"People\"\"\"\n\n\ndef people():\n markup = ReplyKeyboardMarkup()\n for i in range(1, 11):\n i = str(i)\n markup.add(i)\n return markup\n\n\nb54 = KeyboardButton(\"📞 Отправить свой номер\", request_contact=True)\nsend_phone = ReplyKeyboardMarkup(resize_keyboard=True).add(b54)\n\n\nclass FSMbron(StatesGroup):\n name = State()\n time = State()\n date = State()\n people = State()\n phone_number = State()\n\n\n@dp.message_handler(IsUser(), text=btnBrn, state=None)\nasync def cmd_start(message: types.Message):\n is_allowed = db.fetchall('SELECT * FROM regime')\n\n if is_allowed[0][0] == 1:\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row(b53)\n await FSMbron.name.set()\n await message.answer('�� На чье имя бронируем стол?', reply_markup=markup)\n else:\n await message.answer('Приносим извинения, на данный момент брони не принимаются.')\n\n\n@dp.message_handler(IsUser(), state=FSMbron.name)\nasync def load_name(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n if message.text != \"❌ ОТМЕНИТЬ\":\n data['name'] = message.text\n await FSMbron.next()\n await message.reply('📅 На какую дату?', reply_markup=date_day())\n else:\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row(btnMenu, btnbar, btnTime).add(btnBrn, btndlv, sos)\n await bot.send_message(message.from_user.id, \"ПЕРЕХОД НА ГЛАВНОЕ МЕНЮ\", reply_markup=markup)\n await state.finish()\n\n\n@dp.message_handler(IsUser(), state=FSMbron.date)\nasync def load_date(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['date'] = message.text\n await FSMbron.next()\n await message.reply('🕗 Выберите время бронирования: ', reply_markup=timeBtn)\n\n\n@dp.message_handler(IsUser(), state=FSMbron.time)\nasync def load_time(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['time'] = message.text\n await FSMbron.next()\n await message.reply('👪 На какое количество гостей?', reply_markup=people())\n\n\n@dp.message_handler(IsUser(), state=FSMbron.people)\nasync def load_people(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['people'] = message.text\n await FSMbron.next()\n await message.reply('Введите номер телефона пожалуйста.\\n'\n 'Хостес перезвонит Вам для подтверждения брони.', reply_markup=send_phone)\n\n\n@dp.message_handler(IsUser(), content_types=ContentType.CONTACT, state=FSMbron.phone_number)\nasync def load_phone_number(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n if message.contact is not None:\n data['phone_number'] = message.contact[\"phone_number\"]\n await FSMbron.next()\n await message.reply(f\"Отлично!\\n\"\n f\"Будем ждать, {data['time']} в {data['people']}\\n\"\n f\"на имя {data['name']}\", reply_markup=otmBtn)\n\n\n@dp.message_handler(IsUser(), state=FSMbron.phone_number)\nasync def procces_phone(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['phone_number'] = message.text\n await FSMbron.next()\n await message.reply(f\"Отлично!\\n\"\n f\"Будем ждать, {data['time']} в {data['people']}\\n\"\n f\"на имя {data['name']}\", reply_markup=otmBtn)\n\n\n@dp.message_handler(IsUser(), text=btn_done)\nasync def cencel_message(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row(btnMenu, btnbar, btnTime).add(btnBrn, btndlv, sos)\n\n await bot.send_message(BRON_CHANNEL, f\"Бронь\\n\"\n f\"Ф.И.О: {data['name']}\\n\"\n f\"Время: {data['people']}\\n\"\n f\"Дата: {data['time']}\\n\"\n f\"Кол-во гостей: {data['date']}\\n\"\n f\"Номер телефона: {data['phone_number']}\")\n await message.reply(\"Бронь принята\\n\"\n \"Ожидайте подтверждения\", reply_markup=markup)\n await state.finish()\n\n\n@dp.message_handler(IsUser(), text=btn_tm)\nasync def otm(message: types.Message, state: FSMContext):\n markup = ReplyKeyboardMarkup(resize_keyboard=True)\n markup.row(btnMenu, btnbar, btnTime).add(btnBrn, btndlv, sos)\n await bot.send_message(message.from_user.id, \"Бронь отменена\", reply_markup=markup)\n await state.finish()\n","repo_name":"Mekan777-alt/outpack","sub_path":"hendlers/user/reserved.py","file_name":"reserved.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33686051138","text":"\"\"\" Calcoliamo una tavola pitagorica. \"\"\"\r\n\r\n# Titolo.\r\nprint (\" TAVOLA PITAGORICA\\n\")\r\n\r\n# Variabile contatore.\r\nriga = 1\r\n\r\n# Impostazione del ciclo.\r\nwhile riga <= 10 :\r\n\r\n colonna = 1\r\n # Impostiamo il secondo ciclo annidato.\r\n while colonna <= 10 :\r\n\r\n # Stampiamo con una tabulazione il risultato del valore della riga con\r\n # quello della colonna.\r\n print ('\\t', riga * colonna, end = \" \")\r\n\r\n # Incrementiamo il contatore.\r\n colonna = colonna + 1\r\n\r\n # Incrementiamo l contatore.\r\n riga = riga + 1\r\n print(\"\\n\")\r\n","repo_name":"open-education-polito/esercizi-python","sub_path":"Programmi_v3/Modulo16.py","file_name":"Modulo16.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26319157894","text":"from os import replace\n\n#función para calcular las dos letras que más aparecen. Usa un array de 26 enteros (uno por cada letra). Con ord(char) se obtine el valor númerico del caracter.\n#Sabiendo el valor de A, Z, a y z se ajusta el valor dado por ord para que sea el número de la letras dentro del abecedario. Se suma 1 en la posición por cada aparición.\n#A las dos letras más repetidas se les asigna e y a por ser las que más aparecen el castellano.\ndef calcFrecPrinc(cfr,desc):\n frecabs = [0]*26\n n=0\n for carac in cfr:\n ind = ord(carac)\n if ind>=65 and ind<=90:\n ind = ind-65\n elif ind>=97 and ind<=122:\n ind = ind-97\n else:\n ind=28\n if ind<28: \n frecabs[ind] = frecabs[ind]+1\n n=n+1 \n frec=frecabs\n for i in range(0, 26):\n frec[i]=frecabs[i]/n\n i = i+1\n let_e=0\n let_a=0\n x=0\n for i in range(0, 26):\n if x=65 and ord(desc[i])<=90:\n for c in range (0, len(txt)):\n if txt[c]==desc[i]:\n txt[c]=l\n txt=convert_str(txt)\n return(txt) \n\n#convierte una lista de caracteres a una string\ndef convert_str(s):\n nueva_str = \"\"\n for x in s:\n nueva_str += x \n return nueva_str\n\n#dado un texto y un string que buscar con '?' en la posición de la letra buscada, devuelve la letra que se busca.\n#el programa falla si la primera letra del string es la ?. Es mejor empezar y acabar con espacios para delimitar palabras.\n#ej: \" ho?a \" devolveria la letra del texto cifrado que representa la l. Todas las letras del string deben de haber sido descubiertas para usarse.\ndef buscar_coincidencias(txt,coincidencia):\n list_txt=list(txt)\n list_coinc=list(coincidencia)\n enc=False\n c=0\n descubierta=''\n while cn1:\n p3=p2\n n3=n2\n p2=p1\n n2=n1\n p1=p[0]\n n1=p[1]\n elif p[1]>n2:\n p3=p2\n n3=n2\n p2=p[0]\n n2=p[1]\n elif p[1]>n3:\n p3=p[0]\n n3=p[1] \n top_pal=[(p1,n1),(p2,n2),(p3,n3)]\n return(top_pal)\n\n#sustituye en el texto las tres palabras que más aparecen por 'de', 'en' y 'la'. Son las palabras más comunes en castellano. Comprueba en cuales aparece la 'e' para asignar correctamente las letras.\n#no funciona si las palabras que m\ndef sust_por_pal_princ(txt,top_pal):\n p1=top_pal[0][0]\n p1=list(p1)\n p2=top_pal[1][0]\n p2=list(p2)\n p3=top_pal[2][0]\n p3=list(p3)\n #estadísticamente el que más aparece es \"de\". Es muy improbable que \"en\" sea el más repetido\n if p1[1]==p2[0]: #de - en - la:\n txt=txt.replace(p1[0],'d')\n txt=txt.replace(p1[1],'e')\n txt=txt.replace(p2[0],'e')\n txt=txt.replace(p2[1],'n')\n txt=txt.replace(p3[0],'l')\n txt=txt.replace(p3[1],'a')\n elif p1[1]==p3[0]: #de - la - en\n txt=txt.replace(p1[0],'d')\n txt=txt.replace(p1[1],'e')\n txt=txt.replace(p2[0],'l')\n txt=txt.replace(p2[1],'a')\n txt=txt.replace(p3[0],'e')\n txt=txt.replace(p3[1],'n')\n else: #la - de - en\n txt=txt.replace(p1[0],'l')\n txt=txt.replace(p1[1],'a')\n txt=txt.replace(p2[0],'d')\n txt=txt.replace(p2[1],'e')\n txt=txt.replace(p3[0],'e')\n txt=txt.replace(p3[1],'n') \n return(txt)\n\nwith open('texto.txt','r') as f: \n txt=f.read() \ndesc=['']*26\n\n#usando la frecuencia de letras 'e' 'a'\ndesc=calcFrecPrinc(txt, desc)\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#usando la frecuencia de palabras más repetidas \"de\" \"en\" \"la\" (e/a/n/d/l)\n'''rep=palabras_rep(txt)\ntop_pal=pal_mas_rep(rep)\ntxt=sust_por_pal_princ(txt,top_pal)\ndesc=eliminar_desc(desc)'''\n\n#buscar l (a la)\nnuevo_desc=buscar_coincidencias(txt, \" a ?a \")\ndesc[11]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#buscar n (en el)\nnuevo_desc=buscar_coincidencias(txt, \" e? el \")\ndesc[13]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#buscar o (no)\nnuevo_desc=buscar_coincidencias(txt, \" n? \")\ndesc[14]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#buscar s (las)\nnuevo_desc=buscar_coincidencias(txt, \" la? \")\ndesc[18]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#Buscar r (rr)\nnuevo_desc=buscar_rr(txt)\ndesc[17]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#Buscar i (sin)\nnuevo_desc=buscar_coincidencias(txt,\" s?n \" )\ndesc[8]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\n#Buscar u (a su)\nnuevo_desc=buscar_coincidencias(txt,\" a s? \" )\ndesc[20]=nuevo_desc\ntxt=descifrar(txt,desc)\ndesc=eliminar_desc(desc)\n\nprint(desc)\nprint(txt)\n","repo_name":"Boscoaran/Criptografia","sub_path":"descifrador.py","file_name":"descifrador.py","file_ext":"py","file_size_in_byte":7480,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23622424951","text":"\ndef checkCase(data):\n rest=0\n for i in xrange(0, len(data)):\n if data[i]!=i+1:\n rest+=1\n return rest\n\n\ndata=open(\"D-large.in\",\"r\").read()\n\ndata=data.splitlines()[1:]\nout=open(\"out.txt\",\"w\")\n\nfor c in xrange(1, len(data),2):\n tmp=[int(i) for i in data[c].split(\" \")]\n \n out.write(\"Case #%i: %i\\n\"%(c/2+1,checkCase(tmp)))\n \nout.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_77/197.py","file_name":"197.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35833166035","text":"from json.tool import main\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import *\r\nfrom pygame import gfxdraw\r\nfrom uni_button import *\r\nimport map_creator\r\nimport map_menu\r\n\r\nSCREEN_RESOLUTION = (1920,1080)\r\nCOLOR_PRIMARY = (238,33,33)\r\nCOLOR_SECONDARY = (197,197,197)\r\n# COLOR_SECONDARY = (197,172,75)\r\nCOLOR_BACKGROUND = (7,7,7)\r\nCOLOR_BACKGROUND_SECOND = (14,14,14)\r\n\r\nclock = pygame.time.Clock()\r\npygame.init()\r\npygame.display.set_caption(\"Test\")\r\nflags = FULLSCREEN | DOUBLEBUF\r\nscreen = pygame.display.set_mode(SCREEN_RESOLUTION,flags,32)\r\npygame.mouse.set_cursor(*pygame.cursors.arrow)\r\n\r\ndef main_menu():\r\n BACKGROUND_IMAGE = pygame.image.load(\"content/menu/menu_background.png\").convert()\r\n BACKGROUND_IMAGE = pygame.transform.scale(BACKGROUND_IMAGE,SCREEN_RESOLUTION)\r\n # OV = pygame.image.load(\"content/menu/overlay.png\").convert_alpha()\r\n\r\n screen.blit(BACKGROUND_IMAGE,(0,0))\r\n space = 85\r\n menu_button1 = ButtonA(\"NOWA GRA\", (350,60), 45, space*1)\r\n # menu_button2 = ButtonA(\"WCZYTAJ GRĘ\", (350,60), 30, space*2)\r\n menu_button3 = ButtonA(\"KREATOR MAP\", (350,60), 30, space*2)\r\n # menu_button4 = ButtonA(\"O GRZE\", (350,60), 30, space*4)\r\n menu_button5 = ButtonA(\"WYJDŹ\", (350,60), 30, space*5)\r\n \r\n while True:\r\n mousePosition = pygame.mouse.get_pos()\r\n menu_button1.draw(screen,mousePosition)\r\n # menu_button2.draw(screen,mousePosition)\r\n menu_button3.draw(screen,mousePosition)\r\n # menu_button4.draw(screen,mousePosition)\r\n menu_button5.draw(screen,mousePosition)\r\n\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == MOUSEBUTTONDOWN:\r\n if menu_button1.cursorIn(mousePosition):\r\n map_menu.map_menu(screen)\r\n screen.blit(BACKGROUND_IMAGE,(0,0))\r\n # elif menu_button2.cursorIn(mousePosition):\r\n # pygame.quit()\r\n elif menu_button3.cursorIn(mousePosition):\r\n map_creator.map(screen)\r\n screen.blit(BACKGROUND_IMAGE,(0,0))\r\n # elif menu_button4.cursorIn(mousePosition):\r\n # pygame.quit()\r\n elif menu_button5.cursorIn(mousePosition):\r\n pygame.quit()\r\n\r\n \r\n # print(\"Menu | \"+str(clock.get_fps()))\r\n pygame.display.update()\r\n clock.tick(120)\r\n\r\n\r\nmain_menu()","repo_name":"Klavvv/a-wiec-wojna","sub_path":"Source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4264208354","text":"#==========================#\n# Questão Ciclovias #\n#==========================#\n\nn, m = [int(i) for i in input().split()]\n\n#armazena o caminho de cada nó numa lista\ngrafo = []\nfor i in range(n):\n grafo.append([])\n\nfor i in range(m):\n a, b = [int(i) for i in input().split()]\n grafo[a-1].append(b-1)\n grafo[b-1].append(a-1)\n\n#ordena os caminhos de cada nó\n[no.sort() for no in grafo]\n\n#print(grafo)\n\n#o maior caminho para o próprio nó é tamanho 1\nresp = [1 for i in range(n)]\nmaior = [0 for i in range(n)]\n\n#print(maior)\n#print(len(grafo[n-1]))\n\n#percorro os nós em ordem decrescente\nfor i in range(n-1,-1,-1):\n #armazeno o maior caminho de cada vizinho até o momento\n maior[len(grafo[i])] = 0\n for j in range(len(grafo[i])-1,-1,-1):\n v=grafo[i][j]\n maior[j] = max(resp[v], maior[j+1])\n\n #percorro o maior caminho de cada vizinho, do vizinho até o nó atual e atualizo na lista resp\n for j in range(len(grafo[i])):\n v=grafo[i][j]\n # e atualizo resp[v] para cada vizinho\n resp[v] = max(resp[v], 2+maior[j+1])\n\nprint(' '.join(str(k) for k in resp))\n","repo_name":"claytonmaciel/obi","sub_path":"ciclovias-estrategia-1.py","file_name":"ciclovias-estrategia-1.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"72255669313","text":"from django.shortcuts import render\nimport json\nfrom core.models import Profile, Category\nfrom django.http import Http404, HttpResponse\n\n# Create your views here.\ndef get_profile_info(request, profile_id):\n \"\"\"\n Возвращает информацию об общем затраченном времени и времени, затраченном на каждую из\n входящих в профиль категорий\n :param request: Объект запроса\n :param str profile_id: идентификатор профиля\n :return: json строка по объекту со следующими свойствами:\n @spent_time - общее время профиля в формате чч мм\n @categories - массив объектов со следующими свойствами:\n @name - название категории\n @spent_time - затраченное время\n \"\"\"\n try:\n profile = Profile.objects.get(pk=int(profile_id))\n except Profile.DoesNotExist:\n raise Http404\n\n data = get_profile_data(profile)\n\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\n\ndef get_profile_data(profile):\n \"\"\"\n Возвращает объект с данными по профилю\n :param profile: Профиль, для которого осуществляется поиск\n :return: объект с данными\n \"\"\"\n data = {}\n data['spent_time'] = min_to_hours(profile.spent_time)\n data['categories'] = []\n\n categories = Category.objects.filter(profile=profile)\n for category in categories:\n cat_obj = {\n \"name\": category.name,\n \"spent_time\": min_to_hours(category.spent_time)\n }\n data['categories'].append(cat_obj)\n\n return data\n\n\n\ndef min_to_hours(value):\n\t\"\"\"\n\tConvert minutes to hours\n\tfor example minutes: 62 out: 1h 2m\n\t\"\"\"\n\tminutes = int(value)\n\thours = minutes//60\n\tminutes = minutes%60\n\tresult = ''\n\tif hours > 0:\n\t\tresult = '%dh %dm' % (hours, minutes)\n\telse:\n\t\tresult = '%dm' % (minutes)\n\n\treturn result","repo_name":"denisyakorev/owntrackbot","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42304536698","text":"import gate_api\nfrom gate_api.exceptions import ApiException, GateApiException\nimport pytz\nfrom datetime import datetime\nimport requests\nimport time\nimport hashlib\nimport hmac\nimport math\n\nfrom .csv_generator import generate_csv_file, create_timestamp\n\nGATE_API_URL = \"https://api.gateio.ws/api/v4\"\n\n\nclass GateIOApi():\n def __init__(self, input_dict):\n print(f'kwargs:: {input_dict}')\n configuration = gate_api.Configuration(\n host = GATE_API_URL,\n key = input_dict.get('apikey'),\n secret = input_dict.get('apisecret')\n )\n api_client = gate_api.ApiClient(configuration)\n self.api_instance = gate_api.SpotApi(api_client)\n self.wallet_api_instance = gate_api.WalletApi(api_client)\n self.api_action = input_dict.get('apiaction')\n self.key = input_dict.get('apikey')\n self.secret = input_dict.get('apisecret')\n self.start_date = create_timestamp(\n input_dict.get('start_date')\n )\n self.end_date = create_timestamp(\n input_dict.get('end_date')\n )\n self.initiate_request()\n\n def initiate_request(self):\n #TODO one action for deposits and withdraws\n if self.api_action == 'deposits':\n self.get_deposits_and_withdrawals()\n elif self.api_action == 'spottrades':\n self.get_spot_trades()\n elif self.api_action == 'withdrawals':\n self.get_deposits_and_withdrawals()\n else:\n pass\n\n def gen_sign(self, method, url, query_string=None, payload_string=None):\n t = time.time()\n m = hashlib.sha512()\n m.update((payload_string or \"\").encode('utf-8'))\n hashed_payload = m.hexdigest()\n s = '%s\\n%s\\n%s\\n%s\\n%s' % (method, url, query_string or \"\", hashed_payload, t)\n sign = hmac.new(self.secret.encode('utf-8'), s.encode('utf-8'), hashlib.sha512).hexdigest()\n return {'KEY': self.key, 'Timestamp': str(t), 'SIGN': sign}\n\n def get_spot_trades(self):\n try:\n csv_header = ['Koinly Date', 'Pair', 'Side', 'Amount', 'Total', 'Fee Amount', 'Fee Currency', 'Order ID', 'Trade ID']\n\n host = \"https://api.gateio.ws\"\n prefix = \"/api/v4\"\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}\n\n url = '/spot/my_trades'\n print(f'_from: {math.trunc(self.start_date)}, to: {math.trunc(self.end_date)}')\n query_param = f'limit=1000&_from={math.trunc(self.start_date)}&to={math.trunc(self.end_date)}'\n sign_headers = self.gen_sign('GET', prefix + url, query_param)\n headers.update(sign_headers)\n r = requests.request('GET', host + prefix + url + \"?\" + query_param, headers=headers)\n print(f'Gate_IO spot trades: {r.json()}')\n trades = ({\n 'Koinly Date': datetime.fromtimestamp(int(trade.get('create_time'))).strftime('%Y-%m-%d %H:%M:%S'),\n 'Pair': trade.get('currency_pair'),\n 'Side': trade.get('side'),\n 'Amount': trade.get('amount'),\n 'Total': float(trade.get('amount')) * float(trade.get('price')),\n 'Fee Amount': trade.get('fee'),\n 'Fee Currency': trade.get('fee_currency'),\n 'Order ID': trade.get('order_id'),\n 'Trade ID': trade.get('id'),\n } for trade in r.json())\n generate_csv_file('capital_spot_trades', trades, csv_header)\n except GateApiException as ex:\n print(\"Gate api exception, label: %s, message: %s\\n\" % (ex.label, ex.message))\n except ApiException as e:\n print(\"Exception when calling SpotApi->list_trades: %s\\n\" % e)\n\n\n def get_deposits_and_withdrawals(self):\n print(f'key: {self.key}')\n print(f'secret: {self.secret}')\n deposits_and_withdrawals = []\n deposits_and_withdrawals.extend(list(self.get_withdrawals()))\n deposits_and_withdrawals.extend(list(self.get_deposits()))\n print(f'deposits_and_withdrawals: {deposits_and_withdrawals}')\n\n csv_header = ['Koinly Date', 'Amount', 'Currency', 'Label', 'Description', 'TxHash']\n generate_csv_file('capital_deposits_and_withdrawals', deposits_and_withdrawals, csv_header)\n\n def get_withdrawals(self):\n try:\n # returns list[LedgerRecord]\n api_response = self.wallet_api_instance.list_withdrawals(limit=1000)\n print(f'api_response withdraws: {api_response}')\n my_withdrawals = ({\n 'Koinly Date': datetime.fromtimestamp(int(withdraw.timestamp), pytz.UTC).strftime('%Y-%m-%d %H:%M'),\n 'Amount': withdraw.amount,\n 'Currency': withdraw.currency,\n 'Label': '',\n 'Description': withdraw.memo,\n 'TxHash': withdraw.txid\n } for withdraw in api_response)\n return my_withdrawals\n except GateApiException as ex:\n print(\"Gate api exception, label: %s, message: %s\\n\" % (ex.label, ex.message))\n except ApiException as e:\n print(\"Exception when calling WalletApi->list_withdrawals: %s\\n\" % e)\n\n\n def get_deposits(self):\n try:\n api_response = self.wallet_api_instance.list_deposits(limit=1000)\n print(f'api_response deposits: {api_response}')\n my_deposits = ({\n 'Koinly Date': datetime.fromtimestamp(int(deposit.timestamp), pytz.UTC).strftime('%Y-%m-%d %H:%M'),\n 'Amount': deposit.amount,\n 'Currency': deposit.currency,\n 'Label': '',\n 'Description': deposit.memo,\n 'TxHash': deposit.txid\n } for deposit in api_response)\n return my_deposits\n except GateApiException as ex:\n print(\"Gate api exception, label: %s, message: %s\\n\" % (ex.label, ex.message))\n except ApiException as e:\n print(\"Exception when calling WalletApi->list_deposits: %s\\n\" % e)\n","repo_name":"walimike/lt-capitol","sub_path":"app/gate_io.py","file_name":"gate_io.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28097126909","text":"class Solution:\n # @param A : integer: number of digits\n # @param B : integer: sum of digits\n # @return an integer\n\n def __init__(self):\n self.count = 0\n\n def check(self, A, B, summ, digits_used):\n if digits_used == A:\n if summ == B:\n self.count += 1\n return\n\n if digits_used == 0:\n for i in range(1, 10):\n self.check(A, B, summ + i, digits_used + 1)\n else:\n for i in range(10):\n self.check(A, B, summ + i, digits_used + 1)\n\n def check_memogized(self, A, B, summ, digits_used, dp):\n # print(f'summ: {summ} digits used: {digits_used}')\n if digits_used == A:\n if summ == B:\n return 1\n return 0\n\n if summ > B:\n return 0\n\n if dp[summ][digits_used] != -1:\n return dp[summ][digits_used]\n\n if digits_used == 0:\n ans2 = 0\n for i in range(1, 10):\n ans2 += self.check_memogized(A, B, summ + i, digits_used + 1, dp)\n else:\n ans2 = 0\n for i in range(10):\n ans2 += self.check_memogized(A, B, summ + i, digits_used + 1, dp)\n\n dp[summ][digits_used] = ans2\n return dp[summ][digits_used]\n\n\n def solve(self, A, B):\n dp = [[-1 for i in range(A+1)] for j in range(B+1)]\n ans = self.check_memogized(A, B, 0, 0, dp)\n ans %= 1000000007\n for row in dp:\n print(row)\n print(f'memogized ans is {ans}')\n # self.check(A, B, 0, 0)\n # return self.count % 1000000007\n\n\nif __name__ == '__main__':\n a = 5\n b = 18\n obj = Solution()\n ans = obj.solve(a, b)\n print(f'ans is {ans}')\n","repo_name":"navkant/ds_algo_practice","sub_path":"scaler/dp/dp4/n_digit_number.py","file_name":"n_digit_number.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24585054843","text":"'''\nAdding/finding our items in the database\n'''\n\nfrom flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nfrom models.item import ItemModel\n\n\nclass Item(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument(\n 'price',\n type=float,\n required=True,\n help=\"This field cannot be blank!\"\n )\n parser.add_argument(\n 'store_id',\n type=int,\n required=True,\n help=\"Every item needs a store id!\"\n )\n\n @jwt_required()\n def get(self, name):\n # Item now returns an OBJECT\n # - not a dictionary\n item = ItemModel.find_by_name(name)\n # So we have to pull the variables\n # from the object, and create a dict\n # using our ItemModel.json() method :)\n if item:\n return item.json()\n return {'message': 'Item not found'}, 404\n\n def post(self, name):\n if ItemModel.find_by_name(name):\n return {'message': '{} already exists'.format(name)}, 400\n\n data = self.parser.parse_args()\n # We can now create an item object,\n # instead of a manual dictionary\n item = ItemModel(name, data['price'], data['store_id'])\n\n try:\n # And instead of calling the class\n # again (as with a @classmethod),\n # we use a regular method on the\n # new `item` object\n item.save_to_db()\n except:\n return {'message': 'An error occurred inserting the item'}, 500\n\n # Return the dictionary, not the object\n return item.json(), 201\n\n def delete(self, name):\n item = ItemModel.find_by_name(name)\n if item:\n item.delete_from_db()\n\n return {'message': 'Item deleted'}\n\n\n def put(self, name):\n data = self.parser.parse_args()\n item = ItemModel.find_by_name(name)\n\n if item is None:\n # data['price'], data['store_id'] can be simplified\n # to unpacking the **data\n # - This is safe, so long as we run checks with parser\n item = ItemModel(name, **data)\n else:\n item.price = data['price']\n item.store_id = data['store_id']\n\n # save the stored item to database\n item.save_to_db()\n # convert object to json\n return item.json()\n\n\nclass ItemsList(Resource):\n def get(self):\n # or: list(map(lambda x: x.json(), ItemModel.query.all()))\n return {'items': [item.json() for item in ItemModel.query.all()]}","repo_name":"badlydrawnrob/python-playground","sub_path":"flask-rest-api/06/code/resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16516383638","text":"from PIL import Image\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\ndef readpicture(path):\n # 读入图片\n im = Image.open(path).convert('L')\n # 如果图片为空,返回错误信息,并终止程序\n if im is None:\n print(\"图片打开失败!\")\n exit()\n return im\n\n# 高斯噪声\ndef GaussianNoise(image, means, sigma, percetage):\n image = np.array(image)\n NoiseImg = image\n NoiseNum = int(percetage * image.shape[0] * image.shape[1])\n for i in range(NoiseNum):\n randX = random.randint(0, image.shape[0] - 1)\n randY = random.randint(0, image.shape[1] - 1)\n NoiseImg[randX, randY] = NoiseImg[randX, randY] + random.gauss(means, sigma)\n if NoiseImg[randX, randY] < 0:\n NoiseImg[randX, randY] = 0\n elif NoiseImg[randX, randY] > 255:\n NoiseImg[randX, randY] = 255\n return NoiseImg\n\n\n# 椒盐噪声\ndef PepperandSalt(image, percetage):\n image = np.array(image)\n NoiseImg = image\n rows = NoiseImg.shape[0]\n cols = NoiseImg.shape[1]\n NoiseNum = int(percetage*rows*cols)\n for i in range(NoiseNum):\n randX = random.randint(0, rows - 1)\n randY = random.randint(0, cols - 1)\n\n if random.randint(0, 1) <= 0.5:\n NoiseImg[randX, randY] = 0\n else:\n NoiseImg[randX, randY] = 255\n return NoiseImg\n\n\ndef DrawPicture1(image, im1, im2):\n plt.subplot(131)\n plt.imshow(image, plt.cm.gray)\n plt.title('before')\n plt.subplot(132)\n plt.imshow(im1, plt.cm.gray)\n plt.title('GaussianNoise')\n plt.subplot(133)\n plt.imshow(im2, plt.cm.gray)\n plt.title('PepperandSalt')\n plt.show()\n\n\nimage = readpicture('im.jpg')\nim1 = GaussianNoise(image, 0.01, 0.5, 1)\nim2 = PepperandSalt(image, 0.1)\nDrawPicture1(image, im1, im2)\n\n\n# 均值去噪和中值去噪\ndef MeanFilter(Imge,dim): #Image为待处理图像,dim为滤波器的大小dim*dim\n im=np.array(Imge)\n sigema=0\n for i in range(int(dim/2), im.shape[0] - int(dim/2)):\n for j in range(int(dim/2), im.shape[1] - int(dim/2)):\n for a in range(-int(dim/2), -int(dim/2)+dim):\n for b in range(-int(dim/2), -int(dim/2)+dim):\n sigema = sigema + im[i + a, j + b]\n im[i, j] = sigema / (dim*dim)\n sigema = 0\n return im\n\n\ndef MedianFilter(Imge,dim): #Image为待处理图像,dim为滤波器的大小dim*dim\n im=np.array(Imge)\n sigema=[]\n for i in range(int(dim/2), im.shape[0] - int(dim/2)):\n for j in range(int(dim/2), im.shape[1] - int(dim/2)):\n for a in range(-int(dim/2), -int(dim/2)+dim):\n for b in range(-int(dim/2), -int(dim/2)+dim):\n sigema.append(im[i + a, j + b])\n sigema.sort()\n im[i, j] = sigema[int(dim*dim/2)]\n sigema = []\n return im\n\n\ndef DrawPicture2(image, im3, im5, im7, title1):\n plt.subplot(221)\n plt.imshow(image, plt.cm.gray)\n plt.title('before')\n plt.subplot(222)\n plt.imshow(im3, plt.cm.gray)\n plt.title(title1+'3')\n plt.subplot(223)\n plt.imshow(im5, plt.cm.gray)\n plt.title(title1+'5')\n plt.subplot(224)\n plt.imshow(im7, plt.cm.gray)\n plt.title(title1+'7')\n plt.show()\n\n\n# 对椒盐噪声滤波\nim = im2\nim3 = MeanFilter(im, 3)\nim5 = MeanFilter(im, 5)\nim7 = MeanFilter(im, 7)\nDrawPicture2(im, im3, im5, im7, 'MeanFilter')\n\nim = im2\nim3 = MedianFilter(im, 3)\nim5 = MedianFilter(im, 5)\nim7 = MedianFilter(im, 7)\nDrawPicture2(im, im3, im5, im7, 'MedianFilter')\n\n\n# 对高斯噪声滤波\nim = im1\nim3 = MeanFilter(im, 3)\nim5 = MeanFilter(im, 5)\nim7 = MeanFilter(im, 7)\nDrawPicture2(im, im, im5, im7, 'MeanFilter')\n\nim = im1\nim3 = MedianFilter(im, 3)\nim5 = MedianFilter(im, 5)\nim7 = MedianFilter(im, 7)\nDrawPicture2(im, im3, im5, im7, 'MedianFilter')","repo_name":"WoodPecker1111/image_process_spatial","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42816164062","text":"from flask import Flask, render_template\nimport pandas as pd\n\napp = Flask(__name__)\ndf = pd.read_csv(\"dictionary.csv\")\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n\n\n@app.route(\"/api/v1/\")\ndef meaning(word):\n definition = df.loc[df[\"word\"] == word]['definition'].squeeze()\n definition = definition.strip().split(\"\\n\")\n result = {\"word\": word, \"definition\": [meaning for meaning in definition]}\n return result\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Chandrakanth0698/codingexerices","sub_path":"english_dictionary_api.py","file_name":"english_dictionary_api.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75051710595","text":"#! /usr/bin/env python\n\nimport sys\nfrom gtfutils import GTFRecord, gtf_bygroup\nfrom collections import Counter\n\n#--- Check that we can get the correct transcript IDs\n# genePredToGtf assigns transcript IDs from the \"name\" column of the genePred file.\n# Some IDs are repeated (same ID refers to distinct transcripts). When this happens,\n# the count is appended to the transcript ID, for example, the second transcript with\n# NR_XXXXXX as the ID will be called NR_XXXXXX_2, the third NR_XXXXXX_3, and so on.\n# This is in the same order as the genePred file.\nprint >>sys.stderr, 'Checking that transcript IDs can be generated from genePred...'\n_lines = (l.strip('\\n').split('\\t') for l in open('refGene.gtf','r') if not l.startswith('#'))\n_gtf = [GTFRecord(l) for l in _lines]\n_bytxid = gtf_bygroup(_gtf, 'transcript_id')\n\n_gp = (l.strip().split('\\t') for l in open('refGene.gpred','r'))\n_header = _gp.next()\n\n_transcript_counter = Counter()\nfor _gpr in _gp:\n #--- Calculate the transcript ID\n _transcript_counter[_gpr[1]] += 1\n _transcript_id = _gpr[1] if _transcript_counter[_gpr[1]]==1 else '%s_%d' % (_gpr[1], _transcript_counter[_gpr[1]])\n assert _transcript_id in _bytxid, '%s not in GTF file' % _transcript_id\n _exons = sorted(_bytxid[_transcript_id], key=lambda x:int(x.attrs['exon_number']))\n assert _exons[0].spos == int(_gpr[4]) + 1\n\nprint >>sys.stderr, 'Transcript IDs are OK'\nprint >>sys.stderr, 'Assigning tss_id and p_id...'\n\ntranscript_counter = Counter()\ntsskey_tssid = {}\ntranscript_tsskey = {}\npkey_pid = {}\ntranscript_pkey = {}\n\ngp = (l.strip().split('\\t') for l in open('refGene.gpred','r'))\nheader = gp.next()\n\nfor gpr in gp:\n #--- Calculate the transcript ID\n transcript_counter[gpr[1]] += 1\n transcript_id = gpr[1] if transcript_counter[gpr[1]]==1 else '%s_%d' % (gpr[1], transcript_counter[gpr[1]])\n \n #--- Assign TSS ID for transcript\n # First generate a key string that identifies the TSS, then assign a tss_id to that\n # key string. Subsequent genes with the same TSS will generate the same key string and\n # will thus be assigned the same tss_id\n if gpr[3] == '+':\n tsskey = '%s|%s|%s' % (gpr[2], gpr[3], gpr[4])\n else:\n assert gpr[3] == '-'\n tsskey = '%s|%s|%s' % (gpr[2], gpr[3], gpr[5])\n if tsskey not in tsskey_tssid:\n tsskey_tssid[tsskey] = 'TSS%d' % (len(tsskey_tssid)+1)\n # Now map the transcript ID to the tsskey, which maps to the tss_id\n transcript_tsskey[transcript_id] = tsskey\n \n #--- Assign Protein ID for transcript\n # Skip over genes with no CDS. First generate a key string that identifies the protein,\n # then assign a p_id.\n if gpr[6] == gpr[7]:\n pass # Non-coding\n else:\n pkey = '%s|%s|%s|%s|%s|%s' % (gpr[2], gpr[3], gpr[6], gpr[7], gpr[9], gpr[10])\n if pkey not in pkey_pid:\n pkey_pid[pkey] = 'P%d' % (len(pkey_pid)+1)\n # Now map the transcript ID to the pkey, which maps to the p_id\n transcript_pkey[transcript_id] = pkey\n\n# Reload the GTF file\nlines = (l.strip('\\n').split('\\t') for l in open('refGene.gtf','r') if not l.startswith('#'))\ngtf = (GTFRecord(l) for l in lines)\n\nfor g in gtf:\n assert g.attrs['transcript_id'] in transcript_tsskey, 'ERROR: %s not found in transcript ids' % g.attrs['transcript_id']\n tsskey = transcript_tsskey[g.attrs['transcript_id']]\n assert tsskey in tsskey_tssid, 'ERROR: %s not found in keys' % tsskey\n g.attrs['tss_id'] = tsskey_tssid[tsskey]\n if g.attrs['transcript_id'] in transcript_pkey:\n pkey = transcript_pkey[g.attrs['transcript_id']]\n assert pkey in pkey_pid, 'ERROR: %s not found in keys' % pkey\n g.attrs['p_id'] = pkey_pid[pkey]\n print >>sys.stdout, str(g)\n\nprint >>sys.stderr, 'Complete.'\n","repo_name":"gwcbi/cbi_reference_genomes","sub_path":"scripts/add_tss_id_hg38full.py","file_name":"add_tss_id_hg38full.py","file_ext":"py","file_size_in_byte":3772,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10174692626","text":"from asyncio.log import logger\nfrom copy import copy\nfrom email.errors import HeaderParseError\nfrom email.quoprimime import header_check\nfrom inspect import isfunction\nimport logging\nimport re\nfrom typing import List\nfrom .creatList import ListNode\nfrom src.common.logger import Logger\n\n\nclass OperateList(object):\n \"\"\"\"\n operate list\n \"\"\"\n def __init__(self) -> None:\n self.logger = Logger(\"Operate List\").getlogger(leavel=logging.DEBUG)\\\n\n def createList(self, length) -> ListNode:\n \"\"\"\n create list\n \"\"\"\n listnode = None\n for i in range(length,0,-1):\n listnode = ListNode(i, listnode)\n self.logger.info(\"Create list success !!!\")\n return listnode\n\n def printList(self, head):\n \"\"\"\n Print list\n \"\"\"\n if (head == None):\n self.logger.error(\"List is None !!!\")\n return head.val \n\n list_var = [] \n while head:\n list_var.append(head.val)\n head = head.next\n self.logger.info(f\"Print list: {list_var}\")\n\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"\n Revese list(iterate)\n \"\"\"\n # if (head == None):\n # self.logger.error(\"List is None !!!\")\n # return head \n re_list = None\n while head: \n tmp = head.next\n head.next = re_list\n re_list = head\n head = tmp\n self.logger.info(f\"Revese list success !!!\")\n return re_list\n\n def recursion_re_list(self, head: ListNode) -> ListNode:\n \"\"\"\n Revese list(recursion)\n \"\"\"\n if (head == None or head.next == None):\n return head\n newhead = self.recursion_re_list(head.next)\n head.next.next = head\n head.next = None\n return newhead\n\n def intersect_list(self, haedA: ListNode, headB: ListNode) -> ListNode:\n \"\"\"\n find intersect list\n \"\"\"\n if haedA == None and headB == None:\n return None\n copy_haedA = haedA\n copy_haedB = headB\n while copy_haedA != copy_haedA:\n if copy_haedA == None:\n copy_haedA = headB\n else:\n copy_haedA = copy_haedA.next\n if copy_haedB == None:\n copy_haedB = haedA\n else:\n copy_haedB = copy_haedB.next\n return copy_haedA\n\n def merge_2_list(self, headA: ListNode, headB: ListNode) -> ListNode:\n \"\"\"\n merge two ascending list\n \"\"\"\n #iteration\n #time:O(m+n),space:O(1)\n final_list = ListNode(-1)\n tmp_list = final_list\n while headA and headB:\n if headA.val > headB.val:\n tmp_list.next = headB\n headB = headB.next\n else:\n tmp_list.next = headA\n headA = headA.next\n tmp_list = tmp_list.next\n tmp_list.next = headB if headA == None else headA\n return final_list\n # #recursion\n # #time:O(m+n),space:O(m+n)\n # if headA == None:\n # return headB\n # if headB == None:\n # return headA\n # if headA.val > headB.val:\n # headB.next = self.merge_2_list(headA, headB.next)\n # return headB\n # else:\n # headA.next = self.merge_2_list(headA.next, headB)\n # return headA\n\n def partition_list(self, head: ListNode, x: int) -> ListNode:\n \"\"\"\n partition list,less then x in front\n \"\"\"\n if head == None:\n return None\n headA = ListNode\n headB = ListNode\n tmp_headA = headA\n tmp_headB = headB\n while head != None:\n if head.val < x:\n tmp_headA.next = head\n tmp_headA = tmp_headA.next\n else:\n tmp_headB.next = head\n tmp_headB = tmp_headB.next\n head = head.next\n tmp_headB.next = None\n tmp_headA.next = headB\n return headA\n\n def cycle_list(self, head:ListNode) -> ListNode:\n \"\"\"\n find the entry to cycle list \n \"\"\"\n fast, slow = head, head\n while fast != None and fast.next != None:\n fast, slow = fast.next.next, slow.next\n if fast == slow:\n fast = head\n while fast != slow:\n fast, slow = fast.next, slow.next\n return fast\n return None\n","repo_name":"kkboy666/python_leetcode","sub_path":"src/list/operateList.py","file_name":"operateList.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23369180153","text":"import sqlite3\nimport requests\nimport json\nconn =sqlite3.connect('D:/jp949/Documents/Cali rec/proyecto/base_proy.s3db')\n#def LeerURL():\n # url= \"https://la1.api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/6VwWJ3bgVMf3X7TqZgE0auUVOULT7ybPTUHPSZ0sNXvSShg?api_key=RGAPI-2badf617-dd24-431e-9050-16eec33d171d\"\n # return requests.get(url)\n\ndef insertdata(p_championId,p_championLevel,p_championPoints,p_lastPlayTime,p_championPointsSinceLastLevel):\n #project = ('Cool App with SQLite & Python', '2015-01-01', '2015-01-30'); \n ourvalues = (p_championId,p_championLevel,p_championPoints,p_lastPlayTime,p_championPointsSinceLastLevel)\n #Cursor Object\n conn =sqlite3.connect('D:/jp949/Documents/Cali rec/proyecto/base_proy.s3db')\n cursor=conn.cursor()\n query = ('INSERT INTO Campeon (championId,championLevel,championPoints,lastPlayTime,championPointsSinceLastLevel) '\n 'VALUES (?,?,?,?,?)')\n params = {\n 'championId': p_championId,\n 'championPoints': p_championLevel,\n 'championPoints': p_championPoints,\n 'lastPlayTime': p_lastPlayTime,\n 'championPointsSinceLastLevel': p_championPointsSinceLastLevel\n }\n cursor.execute(query,ourvalues)\n #conn.execute(query, params)\n # conn.execute(\"INSERT INTO Campeon (championId,championLevel,championPoints,lastPlayTime,championPointsSinceLastLevel) VALUES (221, 5, 44, 5454545, 454)\")\n #conn.execute(\"INSERT INTO Campeon (championId,championLevel) VALUES (p_championId,p_championLevel)\")\n\n conn.commit()\n conn.close()\n#url= \"https://la1.api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/6VwWJ3bgVMf3X7TqZgE0auUVOULT7ybPTUHPSZ0sNXvSShg/by-champion/19?api_key=RGAPI-213fa414-3a82-43e0-90df-5d96c1d98233\"\n#url= \"https://la1.api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/6VwWJ3bgVMf3X7TqZgE0auUVOULT7ybPTUHPSZ0sNXvSShg?api_key=RGAPI-2badf617-dd24-431e-9050-16eec33d171d\"\n\nRespuesta = requests.get(\"https://la1.api.riotgames.com/lol/champion-mastery/v4/champion-masteries/by-summoner/6VwWJ3bgVMf3X7TqZgE0auUVOULT7ybPTUHPSZ0sNXvSShg?api_key=RGAPI-2badf617-dd24-431e-9050-16eec33d171d\")\n#Respuesta = requests.get(url)\ndatos= Respuesta.json()\n#with open(datos):\n\nfor campeones in datos :\n insertdata(campeones['championId'],campeones['championLevel'],campeones['championPoints'],campeones['lastPlayTime'],campeones['championPointsSinceLastLevel'])\n print('championId', campeones['championId'])\n print('champeonLevel', campeones['championLevel'])\n \n print('')\n\n\n","repo_name":"black994/proyectoTestisng","sub_path":"pyson.py","file_name":"pyson.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26997430633","text":"#!/usr/bin/env python\n#\n# Main segment matching script\n# Requires all variant data processed in extract_sites.py\n# Requires argweaver output\n#\n# ===========================================================\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport argparse\nimport gzip\nimport os\nimport sys\nimport csv\n\n# ===========================================================\n\nEND_OF_CHR = 249250621\nOVERLAP = 5e5 # Assumed argweaver was run on 5Mb windows with overlap length OVERLAP\n\n# ===========================================================\n\n\ndef get_positions(filename):\n \"\"\"\n Opens argweaver output files *smc.gz\n Extracts positions of haplotype segment breakpoints\n\n :param filename: *smc.gz argweaver output file\n :return: list of (start, end) tuples\n :rtype: list\n\n \"\"\"\n positions = [] # few enough rows that this won't cause memory issues\n with gzip.open(filename, 'r') as f:\n\n f.next()\n regions = f.next().split('\\t')\n lower = int(regions[2]) + OVERLAP\n upper = int(regions[3]) - OVERLAP\n\n for line in f:\n\n if line.startswith(\"TREE\"):\n fields = line.split(\"\\t\")\n start, end = int(fields[1]), int(fields[2])\n\n if end <= lower or start >= upper:\n continue\n if start <= 1 or end >= END_OF_CHR:\n continue\n\n positions.append((start, end))\n\n return positions\n\n\ndef test_pos(l):\n \"\"\"\n Checks for shared private derived alleles at position l[0]\n\n :param l: line in seqs file\n :return: list of scores for each modern hap allele\n :rtype: list\n\n \"\"\"\n scores = [0]*4\n\n bases = ['A', 'C', 'T', 'G']\n\n mod = l[1:5]\n ref, aa, anc_pos, anc_neg = l[-4:]\n aa = aa.upper()\n\n for i, m in enumerate(mod):\n\n if m == '.':\n continue\n\n if m == 'R':\n m = ref\n\n if aa in bases and m != aa:\n if m == anc_pos and m != anc_neg:\n scores[i] = 1\n elif m == anc_neg and m != anc_pos:\n scores[i] = -1\n\n if scores == [0]*4:\n return None\n else:\n return scores\n\n\ndef get_segment_scores(seqs, positions):\n \"\"\"\n Counts private derived alleles shared by each modern segment with at most one of the ancient haps\n\n Input sites file (\"seqs\") has format:\n > POS PIMA-1_0 PIMA-1_1 PIMA-2_0 PIMA-2_1 REFERENCE ANCESTRAL ANZICK CK-13\n 852875\tC\tT\tC\tT\tC\tC\tC\tT\n ...\n ...\n\n :param seqs: name of input site file\n :param positions: tuple of start and end position of segments obtained from argweaver\n :return: position and list of (2) scores\n :rtype: (positions, scores)\n\n \"\"\"\n\n with open(seqs) as s:\n s_it = iter(s)\n s_it.next()\n line = s_it.next().rstrip().split('\\t')\n for pos in positions:\n exit_pos = False\n keep = [[] for _ in range(4)]\n seg_score = []\n\n while int(line[0]) < pos[0]:\n try:\n line = s_it.next().rstrip().split('\\t')\n except StopIteration:\n exit_pos = True\n break\n\n while int(line[0]) <= pos[1]:\n test = test_pos(line)\n if test is not None:\n for i, t in enumerate(test):\n keep[i].append(t)\n try:\n line = s_it.next().rstrip().split('\\t')\n except StopIteration:\n exit_pos = True\n break\n\n if exit_pos:\n continue\n\n for ind_score in keep:\n if not any(ind_score):\n seg_score.append('.')\n elif 1 in ind_score and -1 in ind_score: # infinite sites violation\n seg_score.append('.')\n else:\n seg_score.append(sum(ind_score))\n\n yield (pos[0], pos[1]), seg_score\n\n\n# ===========================================================\n\n\np = argparse.ArgumentParser()\np.add_argument('-l', '--lfn', help='list of file names of arg output')\np.add_argument('-f', '--filename', help='file name of arg output, usually directory/out.*.smc.gz')\np.add_argument('-s', '--sequences', help='sequences combined in sites file')\nargs = p.parse_args()\n\n\n# ===========================================================\n\n\nif __name__ == \"__main__\":\n\n POP = 'Pima'\n\n if args.filename and args.lfn:\n sys.exit('Supply only one of filename and lofn')\n\n if not os.path.exists('{p}_anzick_CK-13.out'.format(p=POP)):\n os.makedirs('{p}_anzick_CK-13.out'.format(p=POP))\n\n for sample in range(2000, 4010, 10):\n out_name = \"{p}_anzick_CK-13.out/{p}_anzick_CK-13.out.{s}.txt\".format(p=POP, s=sample)\n print(out_name)\n\n with open(args.lfn) as lfn:\n for l in lfn:\n arg_file = \"/home/td329/projects/NA/S_{p}-1-2_chr1/{l}/out.{s}.smc.gz\"\\\n .format(p=POP, l=l.rstrip(), s=sample)\n if not os.path.isfile(arg_file):\n continue\n\n with open(out_name, 'a') as out:\n print('writing:', out_name)\n out.write(\"#NAME {}\\n\".format(arg_file))\n for sc in get_segment_scores(args.sequences, get_positions(arg_file)):\n if sc[1] == ['.']*4:\n continue\n else:\n w = csv.writer(out, delimiter='\\t')\n w.writerow([sc[0]] + sc[1])\n\n\n# ===========================================================","repo_name":"td329/NA-hapmatch-2018","sub_path":"scripts/pymatch.py","file_name":"pymatch.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73918748353","text":"from cs50 import get_string\n\n\n# Ask for credit card number\ncc_number = get_string(\"Number: \")\n\n# Get number of digits of credit card number\ncc_size = len(cc_number)\n\n# Get digits of credit card number\ncc_digits = []\nfor i in range(cc_size):\n cc_digits.append(int(cc_number[i]))\n\n# Get sum from Luhn's algorithm\ncc_luhn_sum = 0\nfor i in range(cc_size):\n # If digit position is not in \"even from the last\"\n if ((cc_size - 1 - i) % 2 == 0):\n cc_luhn_sum += cc_digits[i]\n else:\n # If digit position is in \"even from the last\" and number of digits of its double is 2\n if len(str(2 * cc_digits[i])) == 2:\n ones_digit = int(str(2 * cc_digits[i])[1])\n tens_digit = int(str(2 * cc_digits[i])[0])\n cc_luhn_sum += ones_digit + tens_digit\n # If digit position is in \"even from the last\" and number of digits of its double is 1\n else:\n cc_luhn_sum += 2 * cc_digits[i]\n\n# Determine credit card type or if invalid\n# Criteria: Luhn algorithm, size of credit card number, and beginning digits\nif (cc_luhn_sum % 10 == 0) and (cc_size == 15) and (cc_digits[0] in [3, 4, 7]):\n print(\"AMEX\")\nelif (cc_luhn_sum % 10 == 0) and (cc_size == 16) and (cc_digits[0] == 5 and cc_digits[1] in [1, 2, 3, 4, 5]):\n print(\"MASTERCARD\")\nelif (cc_luhn_sum % 10 == 0) and (cc_size in [13, 16]) and (cc_digits[0] == 4):\n print(\"VISA\")\nelse:\n print(\"INVALID\")","repo_name":"ijborda/cs50","sub_path":"sentimental-credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27496632829","text":"# this is main.py in PyPoll\nimport os\nimport csv\nimport operator\nimport numpy as np\n\n# initialize the variables\nfilename = input(\"Wouild you like to see the Election Results:\")\ntotal_votes = 0\ncandidate = \"\"\nvote_details = {}\nwinner_votes = 0\nwinner = \"\"\ncandidate_list = {}\nvoter_id = []\nvote_count=[] #empty list to count votes from vote details\ncounter= 0\nvote_percentage = 0.0\n\n# set the path\nelection_data_csv = os.path.join(\"..\", \"election_data.csv\")\n\n# open the budget_data.csv and read the rows\nwith open(\"election_data.csv\", newline=\"\") as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n next(csvreader)\n\n #loop to count total votes by counting all rows\n for row in csvreader:\n candidate=row[2]\n if candidate not in vote_details:\n vote_details.update( {candidate:1}) #1 means the start\n elif candidate in vote_details:\n d={candidate:vote_details.get(candidate)+1}\n vote_details.update(d)\n\nprint(vote_details)\n\n#This returns the key that correspondent the max value\nwinner = max(vote_details.items(), key=operator.itemgetter(1))[0]\nprint(winner)\n\n\n#vote_percentage \n#loop to obtain percentages\ntotal_votes=sum(vote_details.values())\nfor candidate, votes in vote_details.items():\n vote_percentage=votes*100.0/total_votes\n print(candidate, votes, vote_percentage)\n\nvote_count=[candidate, votes, vote_percentage]\nprint(vote_count) \n\n\n#Print Election Results\ndashbreak=\"--------------------------------------------\"\nprint(\"Elections Results\")\nprint(dashbreak)\nprint(f\"Total Votes:{total_votes}\")\nprint(dashbreak)\nfor candidate, votes in vote_details.items():\n vote_percentage=votes*100.0/total_votes\n print(candidate, votes, vote_percentage)\nprint(dashbreak)\nprint(f\"Winner:{winner}\")\nprint(dashbreak)\n\n# Write it to a text file\nsave_file = filename.strip(\".csv\") + \"_result_txt\"\nfilepath = os.path.join(\"..\", \"PyPoll_output.txt\")\n\n# open the file and write rows with description\nwith open(filepath, 'w') as text:\n text.write(\"Election Results\" + \"\\n\")\n text.write(dashbreak + \"\\n\")\n text.write(f\"Total Votes:{total_votes}\" + \"\\n\")\n text.write(dashbreak + \"\\n\")\n for candidate, votes in vote_details.items():\n vote_percentage=votes*100.0/total_votes\n text.write(f\"{candidate} {votes} {vote_percentage}\" + \"\\n\")\n text.write(dashbreak + \"\\n\")\n text.write(f\"Winner:{winner}\" + \"\\n\")\n","repo_name":"ingris2020/Ingris-Python-Challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17540273022","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('create', views.add_message, name='add_message'),\n path('read', views.get_messages, name='get_messages'),\n path('update/', views.update_message, name='update_message'),\n path('delete', views.delete_message, name='delete_message'),\n]","repo_name":"netanelav/cloud-foundry","sub_path":"django-cf/project/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17089299903","text":"T = int(input())\r\nfor t in range(T):\r\n scores = list(map(int, input().split()))\r\n scores = sorted(scores)\r\n scores.pop(0)\r\n scores.pop(3)\r\n if scores[2] - scores[0] >= 4:\r\n print(\"KIN\")\r\n else:\r\n print(sum(scores))","repo_name":"hyoung0/algorithm","sub_path":"백준/Bronze/9076. 점수 집계/점수 집계.py","file_name":"점수 집계.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16828789606","text":"\r\nfrom tkinter import*\r\n\r\n# create root window\r\nroot = Tk()\r\n\r\n# root window title and dimension\r\nroot.title(\"Python.GUI.Project\")\r\nroot.geometry('350x200')\r\n\r\n# creating a function\r\ndef myfunc():\r\n Label(root, text=\"Saving...\").pack()\r\n\r\n# adding menu bar in root window\r\n# new item in menu bar called 'New'\r\n# adding more items in the menu bar\r\nmenu = Menu(root)\r\nitem = Menu(menu, tearoff=0)\r\nm1 = Menu(menu, tearoff=0)\r\nitem.add_command(label='New')\r\nmenu.add_cascade(label='File', menu=item)\r\nroot.config(menu=menu)\r\n\r\n# adding a label to the root window\r\nm1.add_command(label=\"Save\", command=myfunc)\r\nlbl = Label(root, text = \"Are you a Programmer?\")\r\nlbl.grid()\r\n\r\n# separating two labels in menu\r\nm1.add_separator()\r\nm1.add_command(label=\"Exit\", command=quit)\r\nmenu.add_cascade(label=\"File\", menu=m1)\r\n\r\n# adding one more drop down\r\nm2 = Menu(menu, tearoff=0)\r\n\r\n# adding label\r\nm2.add_command(label=\"Find\", command=myfunc)\r\n\r\n# separator\r\nm2.add_separator()\r\nm2.add_command(label=\"Exit\", command=quit)\r\nmenu.add_cascade(label=\"Edit\", menu=m2)\r\n\r\n# adding entry field\r\ntxt = Entry(root, width=10)\r\ntxt.grid(column =1, row=0)\r\n\r\ndef clicked():\r\n\r\n res = \"You wrote \" + txt.get()\r\n lbl.config(text = res)\r\n\r\n# button widget with red text\r\nbtn = Button(root, text = \"Click me\" ,\r\n fg = \"red\", command=clicked)\r\n\r\nbtn.grid(column=2, row=0)\r\n\r\nroot.mainloop()\r\n","repo_name":"tomaszziach/Basic-GUI","sub_path":"First GUI.py","file_name":"First GUI.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"165724353","text":"#Time complexity: O(m*n)\nclass Solution:\n def countisland(self, grid):\n def helper(i, j):\n if i < 0 or i >= ROW or j < 0 or j >= COL or grid[i][j] == \"0\":\n return\n grid[i][j] = \"0\"\n helper(i, j + 1)\n helper(i, j - 1)\n helper(i + 1, j)\n helper(i - 1, j)\n\n result = 0\n ROW, COL = len(grid), len(grid[0])\n for i in range(ROW):\n for j in range(COL):\n if grid[i][j] == \"1\":\n result += 1\n helper(i,j)\n return result\n\n\n\ndef main():\n grid = [\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"1\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"1\",\"1\"]\n ]\n a = Solution\n print(a.countisland(a, grid))\n\n grid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n ]\n print(a.countisland(a, grid))\n\n\n\nmain()","repo_name":"MiaWangGettinJob/Final-6205","sub_path":"Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10603026644","text":"#!/usr/bin/python3\n\ndef format_input(filename):\n \"\"\"Format the input to process\n\n :filename: The filename\n :returns: List of items.\n \"\"\"\n with open(filename) as file:\n data = []\n for line in file:\n data.append(line.rstrip('\\n'))\n return data\n\n\ndef decoder(lower, upper, range: tuple, code):\n \"\"\"decode the number by splitting in halves\n\n :lower: Character that represents lower\n :upper: Character that represents upper\n :range: Range to start splitting (0, 249)\n :code: string to decode.\n :returns: decoded number.\n \"\"\"\n left, right = range\n for letter in code:\n distance = (right - left) // 2\n if letter == lower:\n right = right - (distance + 1)\n elif letter == upper:\n left = left + (distance + 1)\n # print(code, left, right)\n return left\n\n\ndef main():\n data = format_input('input.txt')\n result = []\n max_id = 0\n for code in data:\n row = decoder('F', 'B', (0, 127), code[:-3])\n column = decoder('L', 'R', (0, 7), code[-3:])\n id = row * 8 + column\n result.append((row, column, id))\n if id > max_id:\n max_id = id\n print(\"Puzzle 1 max id is\", max_id)\n\n result.sort(key=lambda x: x[2])\n results_ids = [item[2] for item in result]\n min_id = results_ids[0]\n max_id = results_ids[-1]\n for id in range(min_id, max_id + 1):\n if id not in results_ids:\n print(\"Puzzle 2 missing id is\", id)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"muniter/advent-of-code","sub_path":"2020/05/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74881213954","text":"#!/usr/bin/env python\n\n\n'''\nUsage:\n atriumconsole.py --config --channel --message-data=...\n atriumconsole.py --config --channel -s\n'''\n\nimport os, sys\nimport json\nfrom multiprocessing import Process\nimport docopt\nimport redis\nimport utils\nfrom snap import common\nfrom abc import ABC\n\n\ndef parse_cli_params(params_array):\n data = {}\n if len(params_array):\n params_string = params_array[0]\n nvpair_tokens = params_string.split(',')\n for nvpair in nvpair_tokens:\n if ':' not in nvpair:\n raise Exception('parameters passed to warp must be in the format .')\n\n tokens = nvpair.split(':') \n key = tokens[0]\n value = tokens[1]\n data[key] = value\n\n return data\n\n\n\ndef main(args):\n print(common.jsonpretty(args))\n\n channel_id = 'atriumd_ipc_rcv_channel'\n msg_params = args['--message-data']\n\n\n redis_params = {\n 'host': '172.25.0.2',\n 'port': 6379,\n 'db': 0\n }\n\n redis_client = redis.StrictRedis(**redis_params)\n\n if args['-s']:\n raw_input = []\n for line in utils.read_stdin():\n raw_input.append(line)\n json_string = ''.join(raw_input)\n msg_dict = json.loads(json_string)\n\n else:\n msg_dict = parse_cli_params(msg_params)\n \n num_subscribers = redis_client.publish(channel_id, json.dumps(msg_dict))\n print(f'message sent to {num_subscribers} subscribers.')\n\n\nif __name__ == '__main__':\n args = docopt.docopt(__doc__)\n main(args)","repo_name":"carbonmike/pulse","sub_path":"atriumconsole.py","file_name":"atriumconsole.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23409062226","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '3.0.5dev'\n\ntests_require = [\n 'Products.Silva [test]',\n ]\n\n\nsetup(name='silva.app.news',\n version=version,\n description=\"News extension for Silva 3\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n classifiers=[\n \"Framework :: Zope2\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='news silva zope2',\n author='Infrae',\n author_email='info@infrae.com',\n url='https://github.com/silvacms/silva.app.news',\n license='BSD',\n package_dir={'': 'src'},\n packages=find_packages('src'),\n namespace_packages=['silva', 'silva.app'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'Products.Silva',\n 'Products.SilvaExternalSources',\n 'feedparser',\n 'five.grok',\n 'icalendar',\n 'js.jquery',\n 'js.jqueryui',\n 'grokcore.chameleon',\n 'python-dateutil',\n 'setuptools',\n 'silva.app.document',\n 'silva.core.conf',\n 'silva.core.editor',\n 'silva.core.interfaces',\n 'silva.core.references',\n 'silva.core.services',\n 'silva.core.smi',\n 'silva.core.upgrade',\n 'silva.core.views',\n 'silva.core.xml',\n 'silva.fanstatic',\n 'silva.ui',\n 'z3locales',\n 'zeam.form.base',\n 'zeam.form.silva',\n 'zeam.form.ztk',\n 'zeam.utils.batch',\n 'zope.cachedescriptors',\n 'zope.component',\n 'zope.i18nmessageid',\n 'zope.interface',\n 'zope.intid',\n 'zope.lifecycleevent',\n 'zope.publisher',\n 'zope.schema',\n 'zope.traversing',\n ],\n entry_points = \"\"\"\n [zeam.form.components]\n recurrence = silva.app.news.widgets.recurrence:register\n tree = silva.app.news.widgets.tree:register\n path = silva.app.news.widgets.path:register\n [Products.SilvaExternalSources.sources]\n news = silva.app.news.codesources\n \"\"\",\n tests_require = tests_require,\n extras_require = {'test': tests_require},\n )\n","repo_name":"silvacms/silva.app.news","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3324828142","text":"from flask import Flask, render_template, redirect, url_for, request\nimport os\nimport requests, base64\nimport json\n# import matplotlib.pyplot as plt\n\napp = Flask(__name__)\n# API_GET_ID_USER = \"http://localhost:7071/api/HttpGetIdUser\"\nAPI_GET_ID_USER = \"https://funcp9app.azurewebsites.net/api/HttpGetIdUser\"\n# API_GET_RECO = \"http://localhost:7071/api/HttpContentBaseReco\"\nAPI_GET_RECO = \"https://funcp9app.azurewebsites.net/api/HttpContentBaseReco\"\n\n@app.route('/')\ndef index():\n response = requests.get(url = API_GET_ID_USER)\n # On récupère la liste des id des utilisateurs et on la passe à notre render\n liste_id = json.loads(response.text)\n return render_template('index.html', liste_id=liste_id)\n\n@app.route('/reco', methods=[\"POST\"])\ndef get_reco():\n print(\"get_reco\")\n id_user = request.form.get('id_user')\n param_reco = request.form.get('param_reco')\n param_ref = request.form.get('param_ref')\n param_nb_reco1 = request.form.get('param_nb_reco1')\n param_nb_reco2 = request.form.get('param_nb_reco2')\n\n data = {\"iduser\": id_user,\"paramreco\": param_reco,\"paramref\": param_ref,\"nbreco1\": param_nb_reco1,\"nbreco2\": param_nb_reco2}\n data_json = json.dumps(data)\n r = requests.post(url = API_GET_RECO, json = data_json)\n result = json.loads(r.text)\n print(result)\n return result\n \n\nif __name__ == \"__main__\":\n app.run(host ='0.0.0.0', port = 5000)","repo_name":"julien-IA/projet9-appweb","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32225015241","text":"from itertools import zip_longest\nfrom typing import List\nclass Solution:\n def addToArrayForm(self, num: List[int], k: int) -> List[int]:\n if k == 0:\n return num\n k_list = []\n while k != 0:\n k_list.append(k % 10)\n k = k // 10\n \n num = num[::-1]\n carry = 0\n ans = []\n for d1, d2 in zip_longest(num, k_list, fillvalue= 0):\n add = d1 + d2 + carry\n res = add % 10\n carry = add // 10\n ans.append(res)\n if carry != 0:\n ans.append(carry)\n return ans[::-1]\n\nsol = Solution()\nnum = [2,1,5]\nk = 806\nres = sol.addToArrayForm(num, k)\nprint(res)\n\n\n ","repo_name":"chrisbyd/leetcode_chris","sub_path":"string/989.py","file_name":"989.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36098265537","text":"def getMultiples(n):\n result = []\n for i in range(1, n + 1):\n intermediate = []\n for j in range(1, 6):\n intermediate.append(i * j)\n result.append(intermediate)\n return result\n\n\nn = int(input(\"Enter Number of Iterations: \"))\nprint(f\"Resultant List: {getMultiples(n)}\")\n","repo_name":"apassan23/Python","sub_path":"lists_multiples.py","file_name":"lists_multiples.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16948989502","text":"#!/usr/bin/python3\n\"\"\" Module Rectangle\"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" Class Rectangle \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n def integer_validator(self, name, value):\n \"\"\"\n Validates an integer\n \"\"\"\n if type(value) is not int:\n raise TypeError(\"{} must be an integer\".format(name))\n if value <= 0:\n raise ValueError(\"{} must be > 0\".format(name))\n\n @property\n def width(self):\n \"\"\" getter width \"\"\"\n return self.__width\n\n @width.setter\n def width(self, width):\n \"\"\" setter width \"\"\"\n if type(width) != int:\n raise TypeError(\"width must be an integer\")\n if width <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = width\n\n @property\n def height(self):\n \"\"\" getter heigth \"\"\"\n return self.__height\n\n @height.setter\n def height(self, height):\n \"\"\" setter height \"\"\"\n if type(height) != int:\n raise TypeError(\"height must be an integer\")\n if height <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = height\n\n @property\n def x(self):\n \"\"\" getter x \"\"\"\n return self.__x\n\n @x.setter\n def x(self, x):\n \"\"\" setter x\"\"\"\n if type(x) != int:\n raise TypeError(\"x must be an integer\")\n if x < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = x\n\n @property\n def y(self):\n \"\"\" getter y \"\"\"\n return self.__y\n\n @y.setter\n def y(self, y):\n \"\"\" setter y \"\"\"\n if type(y) != int:\n raise TypeError(\"y must be an integer\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = y\n\n def area(self):\n \"\"\"\n Returns the area\n \"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\" print rectangle\"\"\"\n rec = \"\"\n if self.__width == 0 or self.height == 0:\n return rec\n\n for y in range(self.__y):\n print()\n\n for i in range(self.__height):\n print((' ' * self.__x) + ('#' * self.__width))\n\n def __str__(self):\n \"\"\"Return class representation \"\"\"\n txt = '[Rectangle] ({}) {}/{} - {}/{}'\n txt = txt.format(self.id, self.__x, self.__y, self.width, self.height)\n return txt\n\n def update(self, *args, **kwargs):\n \"\"\" Udpdate attributes \"\"\"\n if args:\n params = ('id', 'width', 'height', 'x', 'y')\n index = 0\n for arg in args:\n setattr(self, params[index], arg)\n index += 1\n else:\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def to_dictionary(self):\n \"\"\" returns the dictionary representation of a Rectangle \"\"\"\n return {'x': self.x, 'y': self.y, 'id': self.id,\n 'height': self.height, 'width': self.width}\n","repo_name":"hiwiA/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70106796995","text":"import xmltodict\nimport gzip\nimport duckdb\nimport polars as pl\n\nfrom airflow import DAG\n\nfrom pendulum import datetime\nfrom airflow.decorators import task\nfrom airflow.providers.amazon.aws.hooks.s3 import S3Hook\n\nDATA_EXTRACT_PATH = \"./data/2_extract\"\n\ndefault_args = {\n \"depends_on_past\": False,\n \"email_on_failure\": True,\n \"email_on_retry\": False,\n \"retries\": 1,\n # 'queue': 'bash_queue',\n # 'pool': 'backfill',\n # 'priority_weight': 10,\n # 'end_date': datetime(2016, 1, 1),\n # 'wait_for_downstream': False,\n # 'sla': timedelta(hours=2),\n # 'execution_timeout': timedelta(seconds=300),\n # 'on_failure_callback': some_function, # or list of functions\n # 'on_success_callback': some_other_function, # or list of functions\n # 'on_retry_callback': another_function, # or list of functions\n # 'sla_miss_callback': yet_another_function, # or list of functions\n # 'trigger_rule': 'all_success'\n}\n\ncon = duckdb.connect(\"file.db\")\ncon.execute(\n \"\"\"\n CREATE OR REPLACE TABLE collectivites \n (siret_coll INTEGER PRIMARY KEY, \n libelle_collectivite VARCHAR, \n nature_collectivite VARCHAR,\n departement VARCHAR\n );\n \"\"\"\n)\n\n\n@task\ndef ls_s3(hook):\n hook = S3Hook(aws_conn_id=\"minio_s3_conn\")\n keys = hook.list_keys(bucket_name=\"data\")\n return keys\n\n\n@task\ndef process_collectivite(key_doc: str, hook):\n object_s3 = hook.get_key(bucket_name=\"data\", key=key_doc)\n dict_from_xml = xmltodict.parse(\n gzip.GzipFile(fileobj=object_s3.get()[\"Body\"]), dict_constructor=dict\n )\n temp_df = pl.DataFrame(get_infos_coll(dict_from_xml))\n # coll_df = coll_df.vstack(temp_df)\n print(temp_df)\n\n # coll_df.write_csv(DATA_EXTRACT_PATH, separator=\",\")\n return {\"xml processed\": key_doc}\n\n\n@task\ndef process_coll(keys: list, hook):\n for file in keys:\n object_s3 = hook.get_key(bucket_name=\"data\", key=file)\n dict_from_xml = xmltodict.parse(\n gzip.GzipFile(fileobj=object_s3.get()[\"Body\"]), dict_constructor=dict\n )\n temp_df = pl.DataFrame(\n get_infos_coll(dict_from_xml),\n schema={\n \"siret_coll\": pl.UInt32,\n \"libelle_collectivite\": pl.Utf8,\n \"nature_collectivite\": pl.Categorical,\n \"departement\": pl.Categorical,\n },\n )\n # coll_df = coll_df.vstack(temp_df)\n print(temp_df)\n\n # coll_df.write_csv(DATA_EXTRACT_PATH, separator=\",\")\n return {\"xml processed\": len(keys)}\n\n\ndef get_infos_coll(dict_from_xml: dict):\n infos_dict = dict()\n dict_entete_doc = dict_from_xml[\"DocumentBudgetaire\"][\"EnTeteDocBudgetaire\"]\n infos_dict[\"siret_coll\"] = dict_entete_doc[\"IdColl\"][\"@V\"]\n infos_dict[\"libelle_collectivite\"] = dict_entete_doc[\"LibelleColl\"][\"@V\"]\n infos_dict[\"nature_collectivite\"] = dict_entete_doc[\"NatCEPL\"][\"@V\"]\n infos_dict[\"departement\"] = dict_entete_doc.get(\"Departement\", {}).get(\"@V\", None)\n\n return infos_dict\n\n\nwith DAG(\n dag_id=\"etl_with_minio\",\n schedule_interval=\"@daily\",\n start_date=datetime(2021, 1, 1, tz=\"UTC\"),\n catchup=False,\n tags=[\"ETL\", \"AB\", \"Minio\"],\n default_args=default_args,\n) as dag:\n s3_hook = S3Hook(aws_conn_id=\"minio_s3_conn\")\n keys = ls_s3(s3_hook)\n process_coll(keys, s3_hook)\n # next_task = [process_collectivite(key, s3_hook) for key in keys]\n","repo_name":"Naxxos/airflow-ab","sub_path":"dags/etl_with_minio.py","file_name":"etl_with_minio.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23544207821","text":"#returns string: minimum # flips (if possible)\n# n: pancake sequence (+/-), size of flipper\ndef solve(input):\n pancakes = input.split()[0]\n pancakes_list = list(pancakes)\n size = int(input.split()[1])\n result = 0\n\n for i in range(0, (len(pancakes)-(size-1))):\n if pancakes_list[i] == \"-\": #flip\n for j in range(i, i+size):\n if (pancakes_list[j] == \"-\"):\n pancakes_list[j] = \"+\"\n else:\n pancakes_list[j] = \"-\"\n result += 1\n\n for x in pancakes_list:\n if x == \"-\":\n return \"IMPOSSIBLE\"\n return str(result)\n\nif __name__ == \"__main__\":\n test_case_num = raw_input()\n for i in range(1, int(test_case_num)+1):\n input = raw_input()\n result = solve(input)\n print(\"Case #\" + str(i) + \": \" + result)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2695.py","file_name":"2695.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24338895635","text":"from .base import MissionChallenge\nfrom ..unlocks import IncreasedXPUnlockable\n\n\nclass MainStoryMissionChallenge(MissionChallenge):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"Main Story Completed\"\n self.unlockable = IncreasedXPUnlockable()\n self.required_mission = \"GD_Episode17.M_Ep17_KillJack\"\n\n def save_challenge(self, save_dict: dict) -> None:\n super().save_challenge(save_dict)\n save_dict[self.outer_dict_key][self.name] = {\n \"completed\": self.completed,\n \"completions\": self.total_completions,\n }\n","repo_name":"juso40/bl2sdk_Mods","sub_path":"RogueLands/challenges/mission/main_story.py","file_name":"main_story.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"61"} +{"seq_id":"10221998896","text":"from flask import Flask, request, redirect, url_for\nfrom sudoku_solver import solveSudoku\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main():\n html = '
'\n for lis in ['abc','def','ghi']:\n for i in range(3):\n for c in lis:\n for j in [3*i+1,3*i+2,3*i+3]:\n html+=''\n html+=' '\n html+='
'\n html+='
'\n html+= ''\n html+= '
'\n return html\n\n@app.route('/solving', methods=['POST'])\ndef process():\n toSolve = ''\n for c in 'abcdefghi':\n for i in range(1,10):\n toSolve+=str(request.form.get(c+str(i)))\n # return toSolve\n return redirect(url_for('solve', sudoku=toSolve))\n\n@app.route('/test')\ndef formTest():\n return 'Nothing being tested!'\n\n@app.route('/')\ndef solve(sudoku):\n solved = solveSudoku(sudoku)\n response = ''\n if solved == 'Solver Timed out':\n response+= solved\n else:\n response+= '
'\n for i in [0,27,54]:\n for j in [0,3,6]:\n for k in [0,9,18]:\n for l in range(3):\n response+=''\n response+=' '\n response+='
'\n response+='
'\n response+='
'\n response+='
'\n # if solved == 'Solver Timed out':\n # response+= solved\n # else:\n # solution = solved.split('\\n')\n # response+= '

'+solution[0]+'

'+'\\n'\n # response+= '

'+solution[1]+'

'+'\\n'\n # response+= '

'+solution[2]+'

'+'\\n'\n # response+= '

'+solution[3]+'

'+'\\n'\n # response+= '

'+solution[4]+'

'+'\\n'\n # response+= '

'+solution[5]+'

'+'\\n'\n # response+= '

'+solution[6]+'

'+'\\n'\n # response+= '

'+solution[7]+'

'+'\\n'\n # response+= '

'+solution[8]+'

'+'\\n'\n # response+= '

'+solution[9]+'

'+'\\n'\n # response+= '

'+solution[10]+'

'\n # response+='\\n'\n # response+='
'\n # return response\n return response\n","repo_name":"CRenwick57/Sudoku-Solver","sub_path":"sudoku_web_app.py","file_name":"sudoku_web_app.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29824975841","text":"import mediapipe as mp\nimport numpy as np\nimport cv2\n\n\ndef out(capIn):\n mpHands = mp.solutions.hands\n hands = mpHands.Hands()\n\n imgRGB = cv2.cvtColor(capIn, cv2.COLOR_BGR2RGB)\n results = hands.process(imgRGB)\n olist = list()\n if results.multi_hand_landmarks:\n for handLms in results.multi_hand_landmarks:\n for idHand, lm in enumerate(handLms.landmark):\n h, w, c = capIn.shape\n cx, cy = int(lm.x * w), int(lm.y * h)\n cv2.putText(capIn, str(int(idHand)), (cx, cy), cv2.FONT_HERSHEY_PLAIN, 3,\n (255, 0, 255), 3)\n olist.append([cx, cy])\n else:\n print(\"error invalid picture\\nexiting\")\n return False, []\n\n hop = olist[0]\n for i, _ in enumerate(olist):\n if not i:\n olist[i] = [0, 0]\n continue\n x = olist[i][0] - hop[0]\n y = olist[i][1] - hop[1]\n olist[i] = [x, y]\n featuresList = [int(o) for f in olist for o in f]\n\n return True, np.absolute(featuresList / np.linalg.norm(featuresList))\n\n\ndef drawContours(capIn):\n\n img = np.copy(capIn)\n\n # convert to grayscale\n grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # applying gaussian blur\n value = (35, 35)\n blurred = cv2.GaussianBlur(grey, value, 0)\n\n # thresholding: Otsu's Binarization method\n _, thresh1 = cv2.threshold(blurred, 127, 255,\n cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n\n contours, hierarchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n contours_image = np.copy(img)\n\n # find contour with max area\n maxContour = max(contours, key=lambda x: cv2.contourArea(x))\n cv2.drawContours(contours_image, maxContour, -1, (255, 0, 255), 3)\n\n return thresh1, contours_image, maxContour\n\n\ndef drawDefects(maxContour, img):\n\n hull = cv2.convexHull(maxContour, returnPoints=False)\n hull[::-1].sort(axis=0)\n convexityDefects = cv2.convexityDefects(maxContour, hull)\n\n defectsImg = np.copy(img)\n\n for defect in convexityDefects:\n s, e, f, d = defect[0]\n start = tuple(maxContour[s][0])\n end = tuple(maxContour[e][0])\n far = tuple(maxContour[f][0])\n\n defectsImg = cv2.line(defectsImg, start, end, (0, 0, 255), 3)\n defectsImg = cv2.line(defectsImg, start, far, (255, 0, 0), 3)\n defectsImg = cv2.line(defectsImg, end, far, (0, 255, 0), 3)\n\n return defectsImg\n","repo_name":"atf01/Hand-Gesture-recognizer","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26223517637","text":"#HW 08\r\n\r\n#Problem 1\r\ndef file_copy(in_file, out_file):\r\n\r\n cFrom = open(in_file,'r')\r\n cTo = open(out_file,'w')\r\n\r\n for lines in cFrom: \r\n cTo.write(lines)\r\n\r\n cFrom.close()\r\n cTo.close()\r\n\r\n#Problem 2\r\ndef file_stats(in_file):\r\n lines = 0\r\n w = 0\r\n c = 0\r\n for i in in_file:\r\n lines += 1\r\n w += len(i.split())\r\n c += len(i)\r\n print(\"Lines: \" + str(lines) + \"\\nWords: \" + str(w) + \"\\nCharacters: \" +str(C))\r\n\r\n#Problem 3\r\ndef repeat_words(input_file, output_file):\r\n inF = open(input_file)\r\n outF = open(output_file,'w')\r\n lineList = inF.readlines()\r\n for line in lineList:\r\n wordList = line.split()\r\n cleanList = []\r\n for word in wordList:\r\n cleanList.append(word.strip(string.punctuation))\r\n for word in cleanList:\r\n if cleanList.count(word) >1:\r\n outF.write(word + \" \")\r\n outF.write(\"\\n\")\r\n \r\n","repo_name":"Ozypher/PythonDump","sub_path":"PYTHON/hw08sol.py","file_name":"hw08sol.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36744115703","text":"# -*- coding: utf-8 -*-\n# @File : predict.py\n# @Author: Runist\n# @Time : 2020/5/20 10:23\n# @Software: PyCharm\n# @Brief: 模型预测\n\nimport tensorflow as tf\nfrom main import siamese_network\nimport config as cfg\nimport cv2 as cv\nimport numpy as np\nimport os\n\n\ndef main():\n left_path = \"./dataset/images_background/Arcadian/character07/0007_11.png\"\n right_path = \"./dataset/images_background/Arcadian/character07/0007_03.png\"\n font = cv.FONT_HERSHEY_SIMPLEX\n\n left_img = cv.imread(left_path, cv.IMREAD_GRAYSCALE)\n right_img = cv.imread(right_path, cv.IMREAD_GRAYSCALE)\n image = np.concatenate((left_img, right_img), axis=1)\n\n model = siamese_network()\n model.load_weights(cfg.model_path)\n left_img = tf.expand_dims(left_img, axis=0)\n left_img = tf.expand_dims(left_img, axis=-1)\n right_img = tf.expand_dims(right_img, axis=0)\n right_img = tf.expand_dims(right_img, axis=-1)\n\n left_img = tf.cast(left_img, tf.float32)\n right_img = tf.cast(right_img, tf.float32)\n\n result = model.predict([left_img, right_img])\n\n if result > cfg.similar_threshold:\n cv.putText(image, \"True\", (8, 18), font, 0.7, (0, 0, 0), 1)\n else:\n cv.putText(image, \"False\", (8, 18), font, 0.7, (0, 0, 0), 1)\n\n print(\"similarity: {:.2f}%\".format(result[0][0] * 100))\n cv.imshow(\"result\", image)\n cv.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Runist/SiameseNet","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"2992494719","text":"import asyncio\nimport datetime\n\nfrom notify_api.core import config as app_config\nfrom notify_api.db.models.notification_status import NotificationStatusEnum\n\nfrom notify_service.worker import cb_subscription_handler, job_handler, qsm\n\n\nasync def queue_worker(event_loop):\n \"\"\"Run monitor Nats queue for email.\"\"\"\n await qsm.run(loop=event_loop,\n config=app_config,\n callback=cb_subscription_handler)\n\n\nasync def pending_job_worker():\n \"\"\"Run handle send PENDING email job every 5 minutes.\"\"\"\n while True:\n await asyncio.sleep(app_config.PENDING_EMAIL_TIME_FRAME)\n await job_handler(NotificationStatusEnum.PENDING)\n\n\nasync def failure_job_worker():\n \"\"\"Run handle resend FAILURE email job every 10 minutes.\"\"\"\n while True:\n await asyncio.sleep(app_config.FAILURE_EMAIL_TIME_FRAME)\n await job_handler(NotificationStatusEnum.FAILURE)\n\n\nasync def archive_job_worker(dt):\n \"\"\"Run handle archive delieved email job every day.\"\"\"\n while True:\n now = datetime.datetime.now()\n remaining = (dt - now).total_seconds()\n if remaining < 86400:\n break\n # asyncio.sleep doesn't like long sleeps, so don't sleep more\n # than a day at a time\n await asyncio.sleep(86400)\n await job_handler(NotificationStatusEnum.DELIVERED)\n\nif __name__ == '__main__':\n event_loop = asyncio.get_event_loop()\n\n tasks = [asyncio.Task(queue_worker(event_loop)),\n asyncio.Task(pending_job_worker()),\n asyncio.Task(failure_job_worker())]\n event_loop.run_until_complete(asyncio.gather(*tasks))\n\n try:\n event_loop.run_forever()\n finally:\n event_loop.close()\n","repo_name":"peter-freshworks/sbc-auth","sub_path":"queue_services/notify-queue/notify_queue.py","file_name":"notify_queue.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23439862721","text":"from sys import stdin\r\nimport math\r\n\r\nfile = stdin\r\n\r\ndef read_line():\r\n return file.readline().strip()\r\n\r\ndef read_int():\r\n return int(read_line())\r\n\r\ndef word_to_list(word):\r\n lst = list(word)\r\n prev = 0\r\n letter = []\r\n count = []\r\n for w in lst:\r\n if w != prev:\r\n letter.append(w)\r\n count.append(1)\r\n prev = w\r\n else:\r\n count[-1] = count[-1] + 1\r\n return [letter, count]\r\ndef find_deviation(counts):\r\n counts.sort()\r\n mid = len(counts)//2\r\n mid_element = counts[mid]\r\n diff = 0\r\n for count in counts:\r\n diff = diff + abs(mid_element - count)\r\n return diff\r\ndef process(length, lst):\r\n table_head = None\r\n doable = True\r\n count_arrays = []\r\n for word in lst:\r\n [letter, count] = word_to_list(word)\r\n if table_head == None:\r\n table_head = letter\r\n for x in range(len(table_head)):\r\n count_arrays.append([])\r\n\r\n elif table_head != letter:\r\n doable = False\r\n break\r\n for x in range(len(count)):\r\n count_arrays[x].append(count[x])\r\n if doable:\r\n diff = 0\r\n for counts in count_arrays:\r\n diff = diff + find_deviation(counts)\r\n return str(diff)\r\n else:\r\n return \"Fegla Won\"\r\n\r\ncases = read_int()\r\nfor case_no in range(1, cases+1):\r\n n = read_int()\r\n lst = []\r\n for ns in range(n):\r\n line = read_line()\r\n lst.append(line)\r\n out = process(n, lst)\r\n print(\"Case #{0}: {1}\".format(case_no, out))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_142/458.py","file_name":"458.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31950290916","text":"from lib2to3.pgen2.token import OP\nimport os\nfrom flask import Flask, flash, request, Response, jsonify, send_file, redirect, render_template\nfrom werkzeug.utils import secure_filename\nimport fileHandler as fileHandler\nimport speechHandler as speechHandler\nfrom random import randrange\nfrom languageProcessing.ProcessingEngine.processingEngine import Processing\nfrom languageProcessing.parseUtils.parseHandler import Parsing\n\n\n\nUPLOAD_FOLDER = './uploads'\n\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = './uploads'\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n file = request.files['file']\n if file:\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['./uploads'], filename))\n return redirect(url_for('upload'))\n return render_template('index.html')\n\ndef allowed_file(filename):\n FileTypes = {'txt', 'pdf', 'docx', 'pptx'}\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in FileTypes\n\n# @app.route('/upload', methods=['POST'])\n# def upload_file():\n# try:\n# if 'file' not in request.files:\n# flash('No file part')\n# return Response(\"Bad Request\",status=400)\n\n# file = request.files['file']\n\n# filename_fixed = file.filename.replace(\" \", \"_\")\n\n# if file and allowed_file(filename_fixed):\n# filename = secure_filename(filename_fixed)\n# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n# randNum = randrange(1,900)\n# parse_tree_name = \"parse-tree-\"+str(randNum)+\".pdf\" \n# text = fileHandler.file_read(filename, parse_tree_name)\n# result = Processing.runAnalysis(text)\n# fileName_fix = (filename.rsplit( \".\", 1 )[ 0 ] )\n# speechHandler.text_to_speech(text)\n# path = 'parseDocs/' + parse_tree_name\n# Parsing.parser(result, parse_tree_name)\n# Parsing.print_named_entities(result)\n# if(result):\n# return send_file(path, as_attachment=True)\n# else:\n# return Response(\"Something went wrong, please check if your file is valid.\",status=400)\n# except:\n# return Response(\"Something went wrong, please check if your file is valid.\",status=500)\n\n\n\n@app.route('/text', methods=['POST'])\ndef upload_text():\n text = request.form['text']\n\n randNum = randrange(1,900)\n parse_tree_name = \"parse-tree-\"+str(randNum)+\".pdf\" \n result = Processing.runAnalysis(text)\n randNum = randrange(1,900)\n parse_tree_name = \"parse-tree-\"+str(randNum)+\".pdf\"\n path = 'parseDocs/' + parse_tree_name\n Parsing.parser(result, parse_tree_name)\n Parsing.print_named_entities(result)\n filename = secure_filename(\"temp\" + str(randNum))\n res = [x[0] for x in result[0]]\n output = \" \".join(res)\n speechHandler.text_to_speech(output)\n data = {'audio':filename+\".mp3\",\n 'text':result}\n #return send_file(path, as_attachment=True)\n return render_template('index.html')\n\napp.run(port=5000)","repo_name":"In-sp3ctr3/Python-TTS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13035970173","text":"import argparse\nimport sys\nimport time\n\nimport bosdyn.client\nimport bosdyn.client.estop\nimport bosdyn.client.lease\nimport bosdyn.client.util\nfrom bosdyn.api import (arm_command_pb2, geometry_pb2, robot_command_pb2, synchronized_command_pb2,\n trajectory_pb2)\nfrom bosdyn.client.frame_helpers import ODOM_FRAME_NAME\nfrom bosdyn.client.robot_command import (RobotCommandBuilder, RobotCommandClient,\n block_until_arm_arrives, blocking_stand)\nfrom bosdyn.util import seconds_to_duration\n\n\ndef force_wrench(config):\n \"\"\"Commanding a force / wrench with Spot's arm.\"\"\"\n\n # See hello_spot.py for an explanation of these lines.\n bosdyn.client.util.setup_logging(config.verbose)\n\n sdk = bosdyn.client.create_standard_sdk('ForceTrajectoryClient')\n robot = sdk.create_robot(config.hostname)\n bosdyn.client.util.authenticate(robot)\n robot.time_sync.wait_for_sync()\n\n assert robot.has_arm(), 'Robot requires an arm to run this example.'\n\n # Verify the robot is not estopped and that an external application has registered and holds\n # an estop endpoint.\n assert not robot.is_estopped(), 'Robot is estopped. Please use an external E-Stop client, ' \\\n 'such as the estop SDK example, to configure E-Stop.'\n\n lease_client = robot.ensure_client(bosdyn.client.lease.LeaseClient.default_service_name)\n with bosdyn.client.lease.LeaseKeepAlive(lease_client, must_acquire=True, return_at_exit=True):\n # Now, we are ready to power on the robot. This call will block until the power\n # is on. Commands would fail if this did not happen. We can also check that the robot is\n # powered at any point.\n robot.logger.info('Powering on robot... This may take a several seconds.')\n robot.power_on(timeout_sec=20)\n assert robot.is_powered_on(), 'Robot power on failed.'\n robot.logger.info('Robot powered on.')\n\n # Tell the robot to stand up. The command service is used to issue commands to a robot.\n # The set of valid commands for a robot depends on hardware configuration. See\n # RobotCommandBuilder for more detailed examples on command building. The robot\n # command service requires timesync between the robot and the client.\n robot.logger.info('Commanding robot to stand...')\n command_client = robot.ensure_client(RobotCommandClient.default_service_name)\n blocking_stand(command_client, timeout_sec=10)\n robot.logger.info('Robot standing.')\n\n # Unstow the arm\n unstow = RobotCommandBuilder.arm_ready_command()\n\n # Issue the command via the RobotCommandClient\n unstow_command_id = command_client.robot_command(unstow)\n robot.logger.info('Unstow command issued.')\n\n block_until_arm_arrives(command_client, unstow_command_id, 3.0)\n robot.logger.info('Unstow command finished.')\n\n # Demonstrate an example force trajectory by ramping up and down a vertical force over\n # 10 seconds\n\n f_x0 = 0 # Newtons\n f_y0 = 0\n f_z0 = 0\n\n f_x1 = 0 # Newtons\n f_y1 = 0\n f_z1 = -10 # push down\n\n # We won't have any rotational torques\n torque_x = 0\n torque_y = 0\n torque_z = 0\n\n # Duration in seconds.\n trajectory_duration = 5\n\n # First point on the trajectory\n force0 = geometry_pb2.Vec3(x=f_x0, y=f_y0, z=f_z0)\n torque0 = geometry_pb2.Vec3(x=torque_x, y=torque_y, z=torque_z)\n\n wrench0 = geometry_pb2.Wrench(force=force0, torque=torque0)\n t0 = seconds_to_duration(0)\n traj_point0 = trajectory_pb2.WrenchTrajectoryPoint(wrench=wrench0, time_since_reference=t0)\n\n # Second point on the trajectory\n force1 = geometry_pb2.Vec3(x=f_x1, y=f_y1, z=f_z1)\n torque1 = geometry_pb2.Vec3(x=torque_x, y=torque_y, z=torque_z)\n\n wrench1 = geometry_pb2.Wrench(force=force1, torque=torque1)\n t1 = seconds_to_duration(trajectory_duration)\n traj_point1 = trajectory_pb2.WrenchTrajectoryPoint(wrench=wrench1, time_since_reference=t1)\n\n # Build the trajectory\n trajectory = trajectory_pb2.WrenchTrajectory(points=[traj_point0, traj_point1])\n\n # Build the full request, putting all axes into force mode.\n arm_cartesian_command = arm_command_pb2.ArmCartesianCommand.Request(\n root_frame_name=ODOM_FRAME_NAME, wrench_trajectory_in_task=trajectory,\n x_axis=arm_command_pb2.ArmCartesianCommand.Request.AXIS_MODE_FORCE,\n y_axis=arm_command_pb2.ArmCartesianCommand.Request.AXIS_MODE_FORCE,\n z_axis=arm_command_pb2.ArmCartesianCommand.Request.AXIS_MODE_FORCE,\n rx_axis=arm_command_pb2.ArmCartesianCommand.Request.AXIS_MODE_FORCE,\n ry_axis=arm_command_pb2.ArmCartesianCommand.Request.AXIS_MODE_FORCE,\n rz_axis=arm_command_pb2.ArmCartesianCommand.Request.AXIS_MODE_FORCE)\n arm_command = arm_command_pb2.ArmCommand.Request(\n arm_cartesian_command=arm_cartesian_command)\n synchronized_command = synchronized_command_pb2.SynchronizedCommand.Request(\n arm_command=arm_command)\n robot_command = robot_command_pb2.RobotCommand(synchronized_command=synchronized_command)\n\n # Send the request\n command_client.robot_command(robot_command)\n robot.logger.info('Force trajectory command issued...')\n\n time.sleep(5.0 + trajectory_duration)\n\n # Power the robot off. By specifying \"cut_immediately=False\", a safe power off command\n # is issued to the robot. This will attempt to sit the robot before powering off.\n robot.power_off(cut_immediately=False, timeout_sec=20)\n assert not robot.is_powered_on(), 'Robot power off failed.'\n robot.logger.info('Robot safely powered off.')\n\n\ndef main(argv):\n \"\"\"Command line interface.\"\"\"\n parser = argparse.ArgumentParser()\n bosdyn.client.util.add_base_arguments(parser)\n options = parser.parse_args(argv)\n try:\n force_wrench(options)\n return True\n except Exception as exc: # pylint: disable=broad-except\n logger = bosdyn.client.util.get_logger()\n logger.exception('Threw an exception')\n return False\n\n\nif __name__ == '__main__':\n if not main(sys.argv[1:]):\n sys.exit(1)\n","repo_name":"boston-dynamics/spot-sdk","sub_path":"python/examples/arm_force_control/force_trajectory.py","file_name":"force_trajectory.py","file_ext":"py","file_size_in_byte":6372,"program_lang":"python","lang":"en","doc_type":"code","stars":2148,"dataset":"github-code","pt":"61"} +{"seq_id":"10183903059","text":"from aiogram import types, Dispatcher\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup\nfrom config import bot, dp, photos, ADMINS\nfrom keyboards.client_kb import start_markup\nfrom random import choice\nfrom database.bot_db import sql_command_random\n\nasync def start_handler(message: types.Message):\n await bot.send_message(message.chat.id, f'привет, @{message.from_user.username}, \\nкомманды:'\n f'\\n/start\\n/meme\\n/prikol\\n чтобы закрепить сообщение !pin')\n\n\n\n\n\n\n\n\nasync def meme_handler(message: types.Message):\n photo = open(f'memes/{choice(photos)}', 'rb')\n\n await bot.send_photo(message.chat.id, photo=photo)\n\n\n\n\nasync def q1(message: types.Message):\n markup = InlineKeyboardMarkup()\n button_call_1 = InlineKeyboardButton('NEXT', callback_data='button_call_1')\n markup.add(button_call_1)\n question = 'сколько раз я вчера кушал?'\n answers = ['1', '2', '3', '4', 'глинтвейн', 'завтра']\n await bot.send_poll(\n chat_id=message.from_user.id,\n question=question,\n options=answers,\n is_anonymous=False,\n type='quiz',\n correct_option_id=3,\n explanation='я ел 4 раза',\n open_period=14,\n reply_markup=markup\n\n )\n\n\n\n\n\ndef register_hendlers_client(dp: Dispatcher):\n dp.register_message_handler(start_handler, commands=['start'])\n dp.register_message_handler(meme_handler, commands=['meme'])\n dp.register_message_handler(q1, commands=['prikol'])\n","repo_name":"beluywolk/BOT-SEREGA","sub_path":"handlers/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9033384682","text":"#class definition\nclass parentc:\n\n #create variable\n x = 0;\n name = \"\"\n\n #create constructor\n def __init__(self, nam):\n print(self, 'constructed')\n self.name = nam\n print(\"name: \", nam)\n\n def party(self):\n self.x = self.x + 1\n print(\"count: \",self.x)\n\n #create destructor\n def __del__(self):\n print(self, 'destructed')\n\n#extended/child class\nclass chclass(parentc):\n points = 0\n def td(self):\n self.points = self.points + 7\n self.party()\n print(self.name,\" and \", self.points)\n\ns = parentc(\"nihar\")\ns.party();\n\nc = chclass(\"mamulu\")\nc.party()\nc.td()\n","repo_name":"pivotghub/ALGODS","sub_path":"PYTHON/coursera/ClassConceptOne.py","file_name":"ClassConceptOne.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23485587341","text":"file = open('A-large.in')\nnum_of_cases = int(file.readline())\noutput_file = open('output1.txt', 'w')\n\nfor case in range(1,num_of_cases+1):\n\tvalue = file.readline()\n\tcheck = {'0':0, '1':0, '2':0, '3':0, '4':0, '5':0, '6':0, '7':0, '8':0, '9':0, }\n\n\tif int(value) != 0:\n\t\tfor digit in value:\n\t\t\tcheck[digit] = 1\n\n\t\ti = 1\n\t\tmult = ''\n\t\twhile(len(set(check.values()))!=1):\n\t\t\ti += 1\n\t\t\tmult = str(long(value)*i)\n\t\t\tfor digit in mult:\n\t\t\t\tcheck[digit] = 1\n\n\t\tprint >> output_file, \"Case #%d: %s\" % (case, mult)\n\n\telse:\n\t\tprint >> output_file, \"Case #%d: INSOMNIA\" % (case)\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_177/4248.py","file_name":"4248.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30056735483","text":"import logging\nimport os\nimport re\nimport json\nimport numpy as np\nimport cv2\nimport time\nfrom .color2height import color2height\nfrom .mapLoc2EPSG import map_epsg2loc\nfrom osgeo import gdal, osr, ogr\nimport affine\n\n\nDATETIME_FORMAT = '%Y-%m-%d' # '%Y-%m-%d_%H-%M-%S'\n\n\ndef map_contour_meter_pix(img_shape, contours_m, x0_m, y0_m, x1_m, y1_m):\n imgw_m = x1_m - x0_m\n imgh_m = y1_m - y0_m\n\n imgh_px = img_shape[0]\n imgw_px = img_shape[1]\n\n # Map pixel coords to local coords\n maxx_px = imgw_px - 1\n maxy_px = imgh_px - 1\n contours_px = []\n coef_w = imgw_m/maxx_px\n coef_h = imgh_m/maxy_px\n for cntr in contours_m:\n cntr_px = []\n for pnt in cntr:\n x_m = pnt[0]\n y_m = pnt[1]\n # x_m = x0_m + x_px*coef_w\n x_px = int((x_m - x0_m)/coef_w)\n # y_m = y1_m - y_px*coef_h # Y-axis in image is opposite to Y-axis in geoloc CS\n y_px = int((y1_m - y_m)/coef_h)\n cntr_px.append([x_px, y_px])\n contours_px.append(cntr_px)\n return contours_px\n\n\ndef retrieve_pixel_value(geo_coords, data_source):\n \"\"\" Map points from CRS to image space \"\"\"\n\n # Get matrix to transform from CRS to image/pixel space\n forward_transform = affine.Affine.from_gdal(*data_source.GetGeoTransform())\n reverse_transform = ~forward_transform\n\n result = []\n for p in geo_coords:\n ppx = reverse_transform * p\n result.append((int(ppx[0] + 0.5), int(ppx[1] + 0.5)))\n\n return result\n\n\ndef get_via_item(proj_dir, mppx, src_shp_fname='shp.shp', src_epsg=4326):\n \"\"\" Create VIA item based on project content and geometry poygones stored in shape-file \"\"\"\n\n geotiff_fname = os.path.relpath(os.path.join(proj_dir, './orthophoto/orthophoto_export.tif'))\n in_shape_file = os.path.join(proj_dir, src_shp_fname)\n\n for f in [in_shape_file, geotiff_fname]:\n if not os.path.isfile(f):\n logging.error('There is no input file {}'.format(f))\n return None\n\n ds = gdal.Open(geotiff_fname)\n ds_epsg = int(osr.SpatialReference(wkt=ds.GetProjection()).GetAttrValue('AUTHORITY', 1))\n\n ds_transofrmation = ds.GetGeoTransform()\n ds_mppx = abs(ds_transofrmation[1])\n coords_scale = ds_mppx / mppx\n\n # input SpatialReference\n in_spat_ref = osr.SpatialReference()\n in_spat_ref.ImportFromEPSG(src_epsg)\n\n # output SpatialReference\n out_spat_ref = osr.SpatialReference()\n out_spat_ref.ImportFromEPSG(ds_epsg)\n\n # create the CoordinateTransformation\n coord_trans = osr.CoordinateTransformation(in_spat_ref, out_spat_ref)\n\n # get the input layer\n driver = ogr.GetDriverByName('ESRI Shapefile')\n in_dataset = driver.Open(in_shape_file, 0) # 0 means read-only. 1 means writeable.\n in_layer = in_dataset.GetLayer()\n # inFeatureCount = in_layer.GetFeatureCount()\n\n attribute_names = [field.name for field in in_layer.schema]\n\n size = int(-1)\n regions = list()\n file_attributes = dict()\n for inFeature in in_layer:\n # get the input geometry\n geom = inFeature.GetGeometryRef()\n if geom is None:\n logging.warning('Feature has no geometry. Skip feature')\n continue\n\n # Map the geometry from shape-file space to geo-tiff space\n geom.Transform(coord_trans)\n\n pts = geom.GetGeometryRef(0).GetPoints()\n\n # Map points from geotiff space into pixel space\n contour_px = retrieve_pixel_value(pts, ds)\n\n # Store results to VIA sub-structure\n shape_attributes = dict()\n shape_attributes['name'] = 'polygon'\n shape_attributes['all_points_x'] = [int(px[0] * coords_scale) for px in contour_px]\n shape_attributes['all_points_y'] = [int(px[1] * coords_scale) for px in contour_px]\n if len(attribute_names) > 0:\n region_attributes = dict(zip(attribute_names, [inFeature[field_name] for field_name in attribute_names]))\n else:\n region_attributes = dict()\n attributes = dict({'shape_attributes': shape_attributes, 'region_attributes': region_attributes})\n regions.append(attributes)\n in_layer.ResetReading() # You must call ResetReading if you want to start iterating over the layer again.\n\n f_item = dict({'filename': geotiff_fname, 'size': size, 'regions': regions, 'file_attributes': file_attributes})\n\n return tuple((geotiff_fname, f_item))\n\n\ndef map_shapefiles_to_via(rootdir, customer_list, proj_list, src_shp_fname, mppx):\n via_items = dict()\n for customer in customer_list:\n for proj_id in proj_list:\n proj_path = os.path.join(rootdir, customer, str(proj_id))\n\n item = get_via_item(proj_path, mppx, src_shp_fname=src_shp_fname, src_epsg=4326)\n if item is not None:\n # Overwrite special field by uniq id\n item[1]['filename'] = '../imgs/{}_{}.png'.format(customer, proj_id)\n via_items[item[0]] = item[1]\n\n return via_items\n\n\ndef get_geoloc_gata(gdalinfo, mapdata):\n # Get absolute values\n with open(gdalinfo, \"r\") as f:\n for line in f:\n if line.startswith('Upper Left'):\n _ = [float(value.strip()) for value in re.search(r'\\((.*?)\\)', line).group(1).split(',')]\n if line.startswith('Lower Left'):\n ll = [float(value.strip()) for value in re.search(r'\\((.*?)\\)', line).group(1).split(',')]\n if line.startswith('Upper Right'):\n ur = [float(value.strip()) for value in re.search(r'\\((.*?)\\)', line).group(1).split(',')]\n if line.startswith('Lower Right'):\n lr = [float(value.strip()) for value in re.search(r'\\((.*?)\\)', line).group(1).split(',')]\n\n odm_center = mapdata['ODMCenter']\n\n # Relocate coordinates to the ODMCenter\n # x0_m = ul[0] - odm_center[0]\n # y1_m = ul[1] - odm_center[1]\n x0_m = ll[0] - odm_center[0]\n # y0_m = ll[1] - odm_center[1]\n # x1_m = ur[0] - odm_center[0]\n y1_m = ur[1] - odm_center[1]\n x1_m = lr[0] - odm_center[0]\n y0_m = lr[1] - odm_center[1]\n\n return x0_m, y0_m, x1_m, y1_m\n\n\ndef get_raster_info(img_fname):\n gtif = gdal.Open(img_fname)\n img_shape = [gtif.RasterYSize, gtif.RasterXSize]\n\n # Load as a gdal image to get geotransform (world file) info\n geo_trans = gtif.GetGeoTransform()\n\n mppx = np.fabs((geo_trans[1], geo_trans[5]))\n # tiepoint = np.array((geo_trans[0], geo_trans[3]))\n\n # Obtain length UNITS\n prj = gtif.GetProjection()\n srs = osr.SpatialReference(wkt=prj)\n unit = srs.GetAttrValue('unit') # todo: could return None. In that way behavior is not correct\n if unit is None:\n logging.error('Try to read raster info from file {} which does not contain these data'.format(img_fname))\n return None, None\n # logging.info('GeoTIFF length units: {}'.format(unit))\n scale_to_meter = 1.0\n if unit != 'metre':\n scale_to_meter = 0.3048\n\n mppx = mppx * scale_to_meter\n\n return img_shape, mppx\n\n\ndef create_orthophoto(dataset_path, dst_mppx, dest_img_fname):\n logging.info('Mapping orthographic image into {}'.format(dest_img_fname))\n\n recreated = False\n src_img_fname = os.path.join(dataset_path, 'orthophoto/orthophoto_export.tif')\n if not os.path.isfile(src_img_fname):\n logging.error('File {} does not exist'.format(src_img_fname))\n return False, recreated\n\n if not os.path.isfile(dest_img_fname):\n src_img_shape, src_mppx = get_raster_info(src_img_fname)\n if src_img_shape is None:\n logging.error('Cannot create orthophoto because file {} does not have raster info'.format(src_img_fname))\n return False, recreated\n\n rescale = src_mppx / dst_mppx\n gdal.Translate(dest_img_fname, src_img_fname,\n options=\"-outsize {} {} -ot Byte -r bilinear\".\n format(int(src_img_shape[1] * rescale[0]), int(src_img_shape[0] * rescale[1])))\n recreated = True\n\n return True, recreated\n\n\ndef create_heightmap_color(dataset_path, dst_img_shape, dest_himg_fname):\n fname = os.path.join(dataset_path, 'dem/color_relief/color_relief.tif')\n if not os.path.isfile(fname):\n logging.error('File {} does not exist'.format(fname))\n return False\n himg_bgr = cv2.imread(fname)[:, :, :3]\n if himg_bgr is None:\n logging.error('Cannot read file {}'.format(fname))\n return False\n himg_gray = color2height(os.path.join(dataset_path, 'dem/color_relief/color_relief.txt'), himg_bgr)\n himg_gray_resized = cv2.resize(himg_gray, (dst_img_shape[1], dst_img_shape[0]))\n cv2.imwrite(dest_himg_fname, himg_gray_resized)\n return True\n\n\ndef create_heightmap_dsm(dataset_path, dst_img_shape, dest_himg_fname):\n src_himg_fname = os.path.join(dataset_path, 'dem/dsm.tif')\n if not os.path.isfile(src_himg_fname):\n logging.error('File {} does not exist'.format(src_himg_fname))\n return False\n # Param '-scale' without parameters rescales the value's range from min/max to 0/255\n gdal.Translate(dest_himg_fname, src_himg_fname,\n options=\"-outsize {} {} -ot Byte -scale -r bilinear\".\n format(int(dst_img_shape[1]), int(dst_img_shape[0])))\n return True\n\n\ndef create_heightmap(dataset_path, dst_img_shape, dest_himg_fname):\n logging.info('Mapping heightmap image into {}'.format(dest_himg_fname))\n\n # Using DSM(even u8c1) provides better smoothed results rather using colored height-map\n if create_heightmap_dsm(dataset_path, dst_img_shape, dest_himg_fname):\n return True\n logging.info('There are no dsm-file. Try to operate with colored depth map')\n return create_heightmap_color(dataset_path, dst_img_shape, dest_himg_fname)\n\n\ndef build_from_project(dataset_path, dst_mppx, dest_img_fname, dest_himg_fname):\n is_success, bgr_recreated = create_orthophoto(dataset_path, dst_mppx, dest_img_fname)\n if not is_success:\n return False\n\n if dest_himg_fname is not None:\n if not os.path.isfile(dest_himg_fname) or bgr_recreated:\n dst_img_shape, _ = get_raster_info(dest_img_fname)\n if dst_img_shape is None:\n logging.error('Cannot create heightmap because file {} does not have raster info'.\n format(dest_img_fname))\n return False\n\n is_success = create_heightmap(dataset_path, dst_img_shape, dest_himg_fname)\n if not is_success:\n return False\n\n return True\n\n\ndef prepare_dataset(rootdir, destdir, dst_mppx, data_subset, img_fnames=None):\n logging.info('Prepare dataset...')\n\n # Create destination folders\n dest_img_folder = os.path.join(destdir, 'imgs')\n dest_himg_folder = os.path.join(destdir, 'himgs')\n dest_mask_folder = os.path.join(destdir, 'masks.{}'.format(data_subset))\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n if not os.path.exists(dest_img_folder):\n os.makedirs(dest_img_folder)\n if not os.path.exists(dest_himg_folder):\n os.makedirs(dest_himg_folder)\n if not os.path.exists(dest_mask_folder):\n os.makedirs(dest_mask_folder)\n\n # Collect customers\n customers = []\n filenames = os.listdir(rootdir) # get all files' and folders'\n for filename in filenames: # loop through all the files and folders\n if os.path.isdir(os.path.join(rootdir, filename)): # check whether the current object is a folder or not\n customers.append(filename)\n\n # Iterate customers\n img_fname_list = []\n dataset_strings_collection = []\n for customer in customers:\n customer_folder = os.path.join(rootdir, customer)\n # Collect datasets\n filenames = os.listdir(customer_folder) # get all files' and folders'\n datasets = []\n for filename in filenames: # loop through all the files and folders\n if os.path.isdir(os.path.join(customer_folder, filename)):\n datasets.append(filename)\n\n for dataset in datasets:\n dataset_path = os.path.join(customer_folder, dataset)\n uniq_fname = customer + '_' + dataset # ATTENTION: do not use DOTS '.' in filename\n uniq_fname = uniq_fname.replace('.', '_')\n\n if img_fnames is not None:\n if uniq_fname + '.png' not in img_fnames:\n continue\n\n logging.info('Iterate dataset {}'.format(dataset_path))\n #\n dest_img_fname = os.path.join(dest_img_folder, uniq_fname + '.png')\n dest_himg_fname = os.path.join(dest_himg_folder, uniq_fname + '.png')\n\n is_success = build_from_project(dataset_path, dst_mppx, dest_img_fname, dest_himg_fname)\n if not is_success:\n continue\n\n contour_fname = os.path.join(dataset_path, 'orthophoto/user_muckpile.json')\n if os.path.isfile(contour_fname):\n gdalinfo = os.path.join(dataset_path, 'orthophoto/tiles/gdalinfo.txt')\n if not os.path.isfile(gdalinfo):\n logging.error('File {} does not exist'.format(gdalinfo))\n continue\n mapdata_fname = os.path.join(dataset_path, 'orthophoto/tiles/mapdata.json')\n if not os.path.isfile(mapdata_fname):\n logging.error('File {} does not exist'.format(mapdata_fname))\n continue\n # Get mapdata\n with open(mapdata_fname, 'r') as f:\n mapdata = json.load(f)\n x0_m, y0_m, x1_m, y1_m = get_geoloc_gata(gdalinfo, mapdata)\n\n # Get last modification date of contours\n mod_timesince_epoc = os.path.getmtime(contour_fname)\n # Convert seconds since epoch to readable timestamp\n mod_time = time.strftime(DATETIME_FORMAT, time.localtime(mod_timesince_epoc))\n with open(contour_fname) as f:\n json_data = json.load(f)\n json_contours = json_data['contours']\n dst_img_shape, dst_f_mppx = get_raster_info(dest_img_fname)\n if dst_img_shape is None:\n logging.error('File {} does not have raster info'.format(dest_img_fname))\n continue\n\n mask = np.zeros(shape=(dst_img_shape[0], dst_img_shape[1], 1), dtype=np.uint8)\n\n for json_contour in json_contours:\n pts_wmerc = json_contour['pts_m']\n pts_m = map_epsg2loc(mapdata, np.array(pts_wmerc), 3857)\n pts_px = map_contour_meter_pix(dst_img_shape, [pts_m], x0_m, y0_m, x1_m, y1_m)\n pts_px = np.asarray(pts_px)\n cv2.fillPoly(mask, [pts_px], color=(255,))\n\n dest_mask_fname = os.path.join(dest_mask_folder, uniq_fname + '.png')\n cv2.imwrite(dest_mask_fname, mask)\n #\n dataset_strings_collection.append(customer + '/' + dataset + ',' + mod_time)\n img_fname_list.append('../imgs/{}'.format(os.path.basename(dest_img_fname))) # todo: /imgs ?\n with open(os.path.join(destdir, 'dataset_list.txt'), 'w') as f:\n for item in dataset_strings_collection:\n f.write(\"%s\\n\" % item)\n with open(os.path.join(dest_mask_folder, \"image_list.txt\"), 'w') as f:\n for item in img_fname_list:\n f.write(\"%s\\n\" % item)\n","repo_name":"oradzhabov/bigimage","sub_path":"kutils/PrepareData.py","file_name":"PrepareData.py","file_ext":"py","file_size_in_byte":15488,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41718081723","text":"''' \r\nCe programme effectue 9 prédictions MNIST à partir \r\ndu model tf-keras dans situé dans le répertoire 'models'.\r\n'''\r\n\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nimport cv2 #Gestion des images \r\nimport numpy as np #Gestion des arrays\r\nimport random #Géneration de nombre aléatoires\r\n\r\n#Chargement de la dataset MNIST :\r\nmnist=tf.keras.datasets.mnist\r\n(x_train,y_train),(x_test,y_test)=mnist.load_data()\r\n\r\n#Chargement du model :\r\nmodel=tf.keras.models.load_model('./models/mnist_model.model')\r\n\r\n#Affichage de quelques prédictions : \r\nfor i in range(9):\r\n random_index=random.randint(0,len(x_test))\r\n random_image=x_test[random_index] #Choix d'une image parmis le testset\r\n img_array=random_image.reshape(-1,28,28) #mise en forme de l'image\r\n prediction=model.predict(img_array) #Prédiction à partir du model chargé\r\n output=np.argmax(prediction[0]) #Mise en forme de la prédiction\r\n answer=y_test[random_index] #Récupération de la réponse dans l'array des labels\r\n plt.subplot(3,3,i+1) #Split des affichages\r\n plt.axis(\"off\") #Désactivation des axes\r\n if output==answer: #Vérification de la prédiction\r\n plt.title('Correct',color='g') #Prédiction correcte -> texte vert\r\n else:\r\n plt.title('Error : '+str(output),color='r') #Prédiction incorrecte -> texte rouge et affichage de la réponse\r\n plt.imshow(random_image,cmap='gray_r') #Affichage de l'image sélectionnée\r\nplt.show()#Affichage Matplot\r\n","repo_name":"adrienpillou/Machine_Learning_1AE","sub_path":"MNIST/predictions.py","file_name":"predictions.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6598197622","text":"import streamlit as st\nimport tensorflow as tf\nfrom PIL import Image\nimport numpy as np\nimport requests\nfrom io import BytesIO\nimport time\n\nidx2label = {\n 0: \"tench\",\n 1: \"English springer\",\n 2: \"cassette player\",\n 3: \"chain saw\",\n 4: \"church\",\n 5: \"French horn\",\n 6: \"garbage truck\",\n 7: \"gas pump\",\n 8: \"golf ball\",\n 9: \"parachute\"\n}\n\nmodel = tf.keras.models.load_model(\"best.hdf5\")\n\ndef predict(image):\n image = tf.image.resize(image, (160, 160))\n batch = tf.expand_dims(image, 0)\n res = model(batch)\n my_bar = st.progress(0)\n for precent in range(100):\n time.sleep(0.01)\n my_bar.progress(precent + 1)\n return idx2label[tf.argmax(res[0]).numpy()], tf.sigmoid(res[0])[tf.argmax(res[0]).numpy()].numpy()\n\n\nst.markdown(\"

Imagenette classifier

\",\n unsafe_allow_html=True)\n\nlabels = list(idx2label.values())\n\n\nres = st.sidebar.selectbox(\"Image from?\", options=[\"URL\", \"Local\"])\n\nif res == \"Local\":\n\n st.markdown(\"

Upload an image

\",\n unsafe_allow_html=True)\n\n uploaded_file = st.file_uploader(label='', type=[\"jpg\", \"png\"])\n\n bt = st.button(\"Predict\")\n\n if(uploaded_file is not None):\n st.image(uploaded_file, use_column_width=True)\n pred, confidence = \"\", 0\n if bt:\n with st.spinner(\"Classifying...\"):\n img = Image.open(uploaded_file)\n img = np.array(img)\n\n res = tf.convert_to_tensor(img, dtype=tf.float32)\n\n with tf.device(\"/CPU:0\"):\n conf_idx = tf.argmax(tf.sigmoid(res[0]))\n pred, confidence = predict(res)\n\n st.success(\n f\"It's a {pred} with a confidence of {confidence * 100:.4f}%\")\n\nelse:\n # get image from URL\n st.markdown(\"

Enter an image URL

\",\n unsafe_allow_html=True)\n\n url = st.text_input(\"Image URL\")\n bt = st.button(\"Predict\")\n\n if url:\n st.image(url, use_column_width=True)\n pred, confidence = \"\", 0\n if bt:\n with st.spinner(\"Classifying...\"):\n img = requests.get(url).content\n img = Image.open(BytesIO(img))\n img = np.array(img)\n\n res = tf.convert_to_tensor(img, dtype=tf.float32)\n\n with tf.device(\"/CPU:0\"):\n conf_idx = tf.argmax(tf.sigmoid(res[0]))\n pred, confidence = predict(res)\n\n st.success(\n f\"It's a {pred} with a confidence of {confidence * 100:.4f}%\")\n\nst.sidebar.markdown(f\"

Currently classification supported for the follwing classes below

\",\n unsafe_allow_html=True)\n\n# Create a unordered list from the labels\nst.sidebar.markdown(f\"
    {''.join(['
  • {}
  • '.format(label) for label in labels])}
\",\n unsafe_allow_html=True)\n","repo_name":"Abhiswain97/Tensorflow-projects","sub_path":"Imagenette_classification/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3371184979","text":"# def add():\n# print(\"hello\")\n# print(add()) \n# def add():\n# print(\"yes\")\n# return \"sunil\"\n# print(add())\n# def mul(a,b):\n# return a*b\n# print(mul(10,2)) \n###find greatest numb\ndef great(a,b,c):\n if a>b and a>c:\n return f\"{a} is greater\"\n elif b>c and b>a:\n return b\n else:\n return c \nprint(great(3,4,55))\nprint(great(34,4,5)) \nprint(great(2,34,5)) \n\n\n","repo_name":"codekirpa/python-prog","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1568397958","text":"\nfrom flask import Flask, request, render_template\nfrom predict import recomendacao\n\napp = Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n solution = None\n if request.method == 'POST':\n\n peso = float(request.form.get('peso'))\n largura = float(request.form.get('largura'))\n comprimento = float(request.form.get('comprimento'))\n profundidade = float(request.form.get('profundidade'))\n L_imagem = 223270777\n \n solution = recomendacao(peso, largura, comprimento, profundidade, L_imagem)\n \n return render_template('index.html', solution=solution)\n else:\n return render_template('index.html', solution=solution)\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"CaioHOF/API_Mobi","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70111069955","text":"\nclass Solution(object):\n def maxSubArrayHelper(self,nums, l, r):\n if l > r:\n return -2147483647\n m = (l+r) / 2\n\n leftMax = sumNum = 0\n for i in range(m - 1, l - 1, -1): # 從中間向左遍歷\n sumNum += nums[i]\n leftMax = max(leftMax, sumNum)\n\n rightMax = sumNum = 0\n for i in range(m + 1, r + 1): # 從中間向右遍歷\n sumNum += nums[i]\n rightMax = max(rightMax, sumNum)\n\n leftAns = self.maxSubArrayHelper(nums, l, m - 1)\n rightAns = self.maxSubArrayHelper(nums, m + 1, r)\n\n return max(leftMax + nums[m] + rightMax, max(leftAns, rightAns))\n\n def maxSubArray(self, nums):\n return self.maxSubArrayHelper(nums, 0, len(nums) - 1)\n\n#################################################################\n#以下為暴力解\n\n# sum_list=[]\n# for i in range(len(nums)):\n# for j in range(1,len(nums)-i+1):\n# sum_list.append(sum(nums[i:i+j]))\n#\n# print(max(sum_list))","repo_name":"frankye1000/LeetCode","sub_path":"python/Maximum Subarray.py","file_name":"Maximum Subarray.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28726904391","text":"'''\n Write a Python Scipt that captures images\n from your webcam video stream \n Extracts all faces from the image frame(using HaarCascade)\n Stores the Fcae information into numpy arrays\n\n 1.Read and show video stream, capture images\n 2. Detect faces and show bounding box\n 3. Flatten the largest face image(grayscale) and save in a numpy array\n 4. Repeat the above for multiple people to generate training data\n\n'''\n\nimport cv2\nimport numpy as np\n\n#Intit Camera\ncap = cv2.VideoCapture(0)\n\n# Face Detection\nface_cascade = cv2.CascadeClassifier(\"/Users/ashwanisoni/Documents/Courses/Python/opencv-test/haarcascade_frontalface_alt.xml\")\nskip = 0\nface_data = []\ndataset_path = './data/'\nfile_name = input(\"Enter the name of person\")\n\nwhile True:\n\n ret, frame = cap.read()\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if ret == False:\n continue\n\n faces = face_cascade.detectMultiScale(gray_frame, 1.3, 5)\n faces = sorted(faces, key = lambda f:f[2]*f[3])\n\n # print(faces)\n for face in faces[-1:]:\n (x, y, w, h) = face\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)\n\n #Extract (Crop out the required face) : Region of Interest\n offset = 10\n face_section = frame[y-offset:y+h+offset, x-offset:x+w+offset]\n face_section = cv2.resize(face_section, (100, 100))\n \n skip += 1\n # Store every 10th face\n if skip%10==0:\n face_data.append(face_section)\n print(len(face_data))\n cv2.imshow(\"section\",face_section)\n\n cv2.imshow(\"dekhle\", frame)\n\n\n key_pressed = cv2.waitKey(1) & 0xFF\n if key_pressed == ord('q'):\n break\n\n# Convert our face list array into a nupy array\nface_data = np.asarray(face_data)\nface_data = face_data.reshape((face_data.shape[0], -1))\nprint(face_data.shape)\n\nnp.save(dataset_path+file_name+'.npy', face_data)\nprint(\"Data Successfully saved!!\")\ncap.release()\ncv2.destroyAllWindows()","repo_name":"ASHWANI-SONI/python","sub_path":"opencv-test/face_data_collect.py","file_name":"face_data_collect.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2626229697","text":"from PIL import Image\nfrom os.path import isfile, join\n\nfrom PIL import Image\nfrom tqdm import tqdm\n\nTRAIN_DATA_PATH = r'C:\\Users\\opacho\\Documents\\dataset_CARE_LABEL_2'\nTARGER_DATA_PATH = r'C:\\Users\\opacho\\Documents\\dataset_care_label_resize'\n\ndef train_images(data_path: str):\n return [f for f in listdir(data_path) if isfile(join(data_path, f))]\n\ndef resize_images(path: str, target_path: str):\n list_images = train_images(path)\n for i in tqdm(list_images, 'Resize_Images'):\n img = Image.open(join(path, i))\n new_image = img.resize((256, 256))\n new_image.save(join(target_path, i))\n\nif __name__ == '__main__':\n resize_images(path=TRAIN_DATA_PATH, target_path=TARGER_DATA_PATH)\n\n\n","repo_name":"OpachoAnd/badges_for_washing","sub_path":"preprocessing_photo.py","file_name":"preprocessing_photo.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20276941835","text":"import dataframe\nfrom datetime import datetime\nfrom datetime import timedelta\nimport time\nimport util\n\nfrom stockCrawler import USAStockCrawler, KoreaStockCrawler\nfrom sqliteStockDB import DayPriceDB, DayPriceFloatDB\nfrom stockData import StockData, BuyState\nfrom tradeStrategy import MaTradeStrategy, LarryRTradeStrategy, MACDTradeStrategy\n\n# 봇 설정\nclass BotConfig:\n def crawlingTime(self):\n pass\n\n#---------------------------------------------------------#\nclass KoreaBotConfig(BotConfig):\n def __init__(self):\n self.telegramToken_ = \"1080369141:AAFfXa9y70x-wqR2nJBKCVMNLmNFpm8kwA0\"\n self.telegramId_ = \"108036914\" \n \n self.isFileLoad_ = False\n #self.listFileName_ = \"Kr_watchList.txt\"\n self.crawler_ = KoreaStockCrawler()\n self.dayPriceDB_ = DayPriceDB(\"KoreaStockData.db\", \"day_price\")\n self.chartDir_ = \"chart_Korea/\"\n self.baseWebSite_ = \"http://finance.daum.net/quotes/A%s\"\n self.strategy_ = MACDTradeStrategy()\n self.isStock_ = True\n self.limitSize_ = 250\n \n def crawlingTime(self):\n now = time.localtime()\n startHour = 16\n startMin = 30\n if now.tm_wday < 5:\n if now.tm_hour == startHour and now.tm_min >= startMin: \n return True\n return False\n\n#---------------------------------------------------------#\nclass USABotConfig(BotConfig):\n def __init__(self):\n self.telegramToken_ = \"1080369141:AAFfXa9y70x-wqR2nJBKCVMNLmNFpm8kwA0\"\n self.telegramId_ = \"108036914\" \n\n self.isFileLoad_ = False\n #self.listFileName_ = \"USA_watchList.txt\"\n self.crawler_ = USAStockCrawler()\n self.dayPriceDB_ = DayPriceFloatDB(\"USAStockData.db\",\"day_price\")\n self.chartDir_ = \"chart_USA/\"\n self.baseWebSite_ = \"https://finance.yahoo.com/quote/%s\"\n self.strategy_ = MACDTradeStrategy()\n self.isStock_ = True\n self.limitSize_ = 200\n\n def crawlingTime(self):\n now = time.localtime()\n startHour = 7\n startMin = 0\n if 0 < now.tm_wday and now.tm_wday < 6:\n if now.tm_hour == startHour and now.tm_min >= startMin: \n return True\n return False\n","repo_name":"munifico/StockCrawler_py","sub_path":"botConfig.py","file_name":"botConfig.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"37369150660","text":"import pyautogui\nimport time\nfrom pynput import mouse\nimport os\nfrom PIL import Image\ndef on_click(x, y, button, pressed):\n if pressed:\n color = pyautogui.pixel(x, y)\n print(f\"Clicked at ({x}, {y}) with color {color}\")\n\n r, g, b = color\n img = Image.new('RGB', (300, 300), color=color)\n desktop_path = os.path.join(os.path.expanduser(\"~\"), \"Desktop\")\n filename = f\"color {r} {g} {b}.jpg\"\n img.save(os.path.join(desktop_path, filename), \"JPEG\", quality=90)\n\nlistener = mouse.Listener(on_click=on_click)\n\nlistener.start()\n\n\nwhile True:\n try:\n time.sleep(1)\n except KeyboardInterrupt:\n break\nlistener.stop()","repo_name":"MstfOztrk/Desktop-Color-Picker","sub_path":"colorpicker.py","file_name":"colorpicker.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19916664817","text":"\"\"\" This script is used to clean team offense data and load into database \"\"\"\nimport logging\nfrom pathlib import Path\n\nimport pandas as pd\nfrom sqlalchemy.engine import Connection\n\nfrom naccbis.common import metrics, utils\nfrom naccbis.common.splits import Split\n\n\nclass TeamOffenseETL:\n \"\"\"ETL class for team offense\"\"\"\n\n CSV_DIR = Path(\"csv/\")\n\n def __init__(\n self,\n year: int,\n split: Split,\n load_db: bool,\n conn: Connection,\n inseason: bool = False,\n ) -> None:\n self.year = year\n self.split = split\n self.load_db = load_db\n self.conn = conn\n self.inseason = inseason\n self.data: pd.DataFrame\n\n def extract(self) -> None:\n table = f\"raw_team_offense_{self.split}\"\n if self.inseason:\n table += \"_inseason\"\n logging.info(\"Reading data from %s\", table)\n self.data = pd.read_sql_table(table, self.conn)\n logging.info(\"Read %s records from %s\", len(self.data), table)\n if self.year:\n self.data = self.data[self.data[\"season\"] == self.year]\n\n def transform(self) -> None:\n self.data = metrics.basic_offensive_metrics(self.data)\n columns = [\n \"name\",\n \"season\",\n \"g\",\n \"pa\",\n \"ab\",\n \"r\",\n \"h\",\n \"x2b\",\n \"x3b\",\n \"hr\",\n \"rbi\",\n \"bb\",\n \"so\",\n \"hbp\",\n \"tb\",\n \"xbh\",\n \"sf\",\n \"sh\",\n \"gdp\",\n \"sb\",\n \"cs\",\n \"go\",\n \"fo\",\n \"go_fo\",\n \"hbp_p\",\n \"bb_p\",\n \"so_p\",\n \"iso\",\n \"babip\",\n \"avg\",\n \"obp\",\n \"slg\",\n \"ops\",\n \"sar\",\n ]\n if self.inseason:\n columns.insert(2, \"date\")\n self.data = self.data[columns]\n\n def load(self) -> None:\n table = f\"team_offense_{self.split}\"\n if self.inseason:\n table += \"_inseason\"\n\n if self.load_db:\n logging.info(\"Loading data into database\")\n utils.db_load_data(\n self.data, table, self.conn, if_exists=\"append\", index=False\n )\n else:\n filename = f\"{table}.csv\"\n logging.info(\"Dumping to csv\")\n self.data.to_csv(self.CSV_DIR / filename, index=False)\n\n def run(self) -> None:\n logging.info(\"Running %s\", type(self).__name__)\n logging.info(\"Year: %s Split: %s Load: %s\", self.year, self.split, self.load_db)\n self.extract()\n self.transform()\n self.load()\n","repo_name":"troymoench/naccbis","sub_path":"naccbis/cleaning/CleanTeamOffense.py","file_name":"CleanTeamOffense.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39341197856","text":"from django.conf.urls import patterns, url\nfrom repos.views import DeleteRepoItem\nfrom repos.models import CodeRepository\n\nurlpatterns = patterns('',\n\n url(r'^repos/(?P\\w+)/',\n DeleteRepoItem.as_view(\n model=CodeRepository,\n success_url='/repos/view/repos/',\n page_title='ITSY Delete Code Repo',\n page_heading='Delete Code Repository:'\n ),\n name='repos'),\n)","repo_name":"yetkinozturk/ITSY","sub_path":"repos/urls/delete.py","file_name":"delete.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"61"} +{"seq_id":"70900990593","text":"import numpy as np\nimport six.moves.cPickle as pickle\n\nfrom PBP_net_lista_single_matrices.SingleBayesianListaHandler import SingleBayesianListaHandler\nfrom compare_mnist.mnist_data import MnistData\nfrom freqLISTA.FrequentistListaHandler import FrequentistListaHandler\n\nimport matplotlib.pyplot as plt\n\n\nclass MnistSequentialComparator(object):\n def __init__(self, K, L, learning_rate=0.0001):\n\n self.data = MnistData(K=K)\n self.data.check_download()\n self.data.learn_dictionary()\n #self.data.generate_random_design_matrix()\n self.D = self.data.train_data.shape[1]\n\n self.freq_lista = FrequentistListaHandler(D=self.D, K=K, L=L, X=self.data.X, learning_rate=learning_rate)\n #self.bayesian_lista = BayesianListaHandler(D=self.D, K=K, L=L, X=self.data.X)\n self.shared_bayesian_lista = SingleBayesianListaHandler(D=self.D, K=K, L=L, X=self.data.X)\n\n self.freq_train_loss = []\n #self.bayesian_train_loss = []\n self.shared_bayesian_train_loss = []\n\n self.freq_validation_loss = []\n #self.bayesian_validation_loss = []\n self.shared_bayesian_validation_loss = []\n\n def train_iteration(self):\n\n # self.freq_train_loss.append(\n # self.freq_lista.train_iteration(beta_train=self.data.train_data, y_train=self.data.y_train))\n # self.bayesian_train_loss.append(\n # self.bayesian_lista.train_iteration(beta_train=self.data.train_data, y_train=self.data.y_train))\n # self.shared_bayesian_train_loss.append(\n # self.shared_bayesian_lista.train_iteration(beta_train=self.data.train_data, y_train=self.data.y_train))\n\n self.freq_train_loss.append(\n self.freq_lista.train_iteration_nmse(beta_train=self.data.train_data, y_train=self.data.y_train))\n #self.bayesian_train_loss.append(\n # self.bayesian_lista.train_iteration_nmse(beta_train=self.data.train_data, y_train=self.data.y_train))\n self.shared_bayesian_train_loss.append(\n self.shared_bayesian_lista.train_iteration_nmse(beta_train=self.data.train_data, y_train=self.data.y_train,\n sample_mean=False))\n\n # self.freq_validation_loss.append(\n # self.freq_lista.test(beta_test=self.data.validation_data, y_test=self.data.y_validation))\n # self.bayesian_validation_loss.append(\n # self.bayesian_lista.test(beta_test=self.data.validation_data, y_test=self.data.y_validation))\n # self.shared_bayesian_validation_loss.append(\n # self.shared_bayesian_lista.test(beta_test=self.data.validation_data, y_test=self.data.y_validation))\n\n self.freq_validation_loss.append(\n self.freq_lista.test_nmse(beta_test=self.data.validation_data, y_test=self.data.y_validation))\n #self.bayesian_validation_loss.append(\n # self.bayesian_lista.test_nmse(beta_test=self.data.validation_data, y_test=self.data.y_validation))\n self.shared_bayesian_validation_loss.append(\n self.shared_bayesian_lista.test_nmse(beta_test=self.data.validation_data, y_test=self.data.y_validation))\n\n\nif __name__ == '__main__':\n\n np.random.seed(1)\n\n K = 100\n L = 10\n\n # batch_size = 5000\n # validation_size = 100\n\n saved_comparator_file_name = 'comparator_with_learnt_dictionary_10000_train_50_iter.pkl'\n\n\n if not saved_comparator_file_name:\n comparator = MnistSequentialComparator(K, L, learning_rate=0.0001)\n else:\n comparator = pickle.load(open(saved_comparator_file_name, 'rb'))\n\n\n\n\n #n_iter = 50\n\n #for _ in tqdm(range(n_iter)):\n # comparator.train_iteration()\n\n plt.semilogy(comparator.freq_train_loss, label=\"freq train loss\")\n #plt.semilogy(comparator.bayesian_train_loss, label=\"bayes train loss\")\n plt.semilogy(comparator.shared_bayesian_train_loss, label=\"shared bayes train loss\")\n\n plt.semilogy(comparator.freq_validation_loss, label=\"freq valid loss\")\n #plt.semilogy(comparator.bayesian_validation_loss, label=\"bayes valid loss\")\n plt.semilogy(comparator.shared_bayesian_validation_loss, label=\"shared bayes valid loss\")\n\n plt.legend()\n plt.show()\n\n #with open('test_S_convergence.pkl', 'wb') as f:\n # pickle.dump(comparator, f)\n\n# train size = 1000, validation size = 100, K = 100 with random matrix X on the first 4 iterations gives promising results","repo_name":"danilkuzin/BayesianLISTA","sub_path":"bayesian_lista_src/compare_mnist/compare_mnist_script.py","file_name":"compare_mnist_script.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23621983726","text":"'''\nName - Rishabh Patel\nBranch - Computer Engineering(CO)\nRoll No. - 0827CO191046\n\n'''\n\nprint(\"\\t\\t********Program to check whether a given number is an Armstrong number or not.********\\n\\n\")\nnum = input(\"\\tEnter The Number To Check Armstrong number : \")\nArmStrong = 0\ninNumber = int(num)\nstrlen = len(num)\nfor i in range(strlen):\n ArmStrong = ArmStrong + int(num[i])**strlen\nif(inNumber == ArmStrong): print(\"\\n\\tThe Given Number Is \\\"ArmStrong Number\\\" : \", inNumber,\" = \", ArmStrong)\nelse: print(\"\\n\\tThe Given Number Is \\\"Not ArmStrong Number\\\" : \", inNumber ,\" != \", ArmStrong)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#Name Rishabh Patel","repo_name":"pishabh625/Python70Programs","sub_path":"P24_Is_ArmStrong_Number.py","file_name":"P24_Is_ArmStrong_Number.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36703351510","text":"import argparse\nimport torch\nimport torch.nn as nn\nfrom dataset import EYEDataset\nimport numpy as np\nfrom torch.autograd import Variable\nfrom PIL import Image\nimport torchvision.models as models\nfrom model import FCN8s\n\n\narg_parser = argparse.ArgumentParser()\narg_parser.add_argument(\"-c\", \"--cuda\", action='store_true', default=False)\narg_parser.add_argument(\"-n\",\"--nb_worker\", type=int, default=4,help='# of workers')\nargs = arg_parser.parse_args()\n\nvgg16 = models.vgg16(pretrained=True) \nfcn = FCN8s(vgg16)\n\nEYE_PATH='./256fcn/'\ndef test(args,fcn=None):\n model_file_f = \"./models/b5.e200.fcn8s.pkl\"\n\n if model_file_f: \n fcn.load_state_dict(torch.load(model_file_f)) \n \n \n ds_val = EYEDataset(EYE_PATH, set='test')\n print(\"Loaded {} test data.\".format(len(ds_val)))\n #vis = visdom.Visdom(port=8097)\n if args.cuda:\n fcn =fcn.cuda(0)\n \n test_loader = torch.utils.data.DataLoader(dataset=ds_val,\n batch_size=1,num_workers=args.nb_worker) #,shuffle=False\n\n total_loss_f = 0.\n for i, (images,gts,base_name) in enumerate(test_loader):\n print(\"Loaded {}ed pic.\".format(base_name[0]))\n if args.cuda:\n images = Variable(images.cuda(0))\n gts = Variable(gts.cuda(0))\n with torch.no_grad():\n outputs =fcn(images)\n #loss_f = nn.CrossEntropyLoss()(outputs,gts)\n #total_loss_f +=loss_f.item()\n #print(\"loss:{}\".format(loss_f))\n predicted = ds_val.decode_segmap(outputs[0].cpu().data.numpy().argmax(0))\n predicted = (predicted *255.0).astype(np.uint8)\n predicted = Image.fromarray(predicted)\n pred_path = './output/'+base_name[0]+'.png'\n predicted.save(pred_path)\n #print(\"avg-loss:{}\".format(total_loss_f/len(test_loader)))\n\nif __name__ == '__main__':\n test(args,fcn)\n","repo_name":"sldz5/semantic-segmentation-on-Glaucoma-image","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"27516016974","text":"#K번째 약수\nimport sys\n# sys.stdin=open(\"input.txt\", \"r\")\n# n, k=map(int, input().split())\nn= 5\nk= 3\nsum = 0\nfor i in range(1,n+1):\n if n%i == 0:\n sum += 1\n if sum == k:\n print(k)\n break\nif sum != k:\n print(-1)\n \n#K번째 큰수\nsys.stdin=open(\"input.txt\", \"r\")\nn, k=map(int, input().split())\na=list(map(int, input().split()))\n\na.sort(reverse=True)\ncount, sum = 0, 0\nfor i in a:\n count += 1\n sum += i\n if count == k:\n print(sum)\n break\n \n \n \n#대표값\n# sys.stdin=open(\"input.txt\", \"r\")\n# n=int(input())\n# a=list(map(int, input().split()))\naverage=sum(a)/n\naverage=average+0.5\naverage=int(average)\ndifference = 55555\ncount = 0\n\nfor i in a:\n count += 1\n k = abs(average - i)\n if difference > k:\n difference = k\n output = [i]\n elif difference == k:\n output.append(k)\nprint(output)\n\n\n\n\n# sys.stdin=open(\"input.txt\", \"r\")\n# n, m=map(int, input().split())\nn, m = 6,5\ncount=[0]*(n+m+3)\nmax=0\nfor i in range(1, n+1):\n for j in range(1, m+1):\n count[i+j]=count[i+j]+1\n\nfor i in range(n+m+1):\n if count[i]>max:\n max=count[i]\n \nfor i in range(n+m+1):\n if count[i]==max:\n print(i, end=' ')\n","repo_name":"ondine0615/pasta","sub_path":"section2/장재석1-5문제풀이(2번못품).py","file_name":"장재석1-5문제풀이(2번못품).py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10251290307","text":"import serial\nimport modbus_tk.modbus_rtu as mr\nimport modbus_tk.defines as md\n\nmaster=mr.RtuMaster(serial.Serial(port=\"COM2\",baudrate=19200,bytesize=8,parity='N',stopbits=1,xonxoff=0))\nmaster.set_timeout(5.0)\nmaster.set_verbose(True)\n\n# master.execute(slave=1,function_code=md.READ_COILS,starting_address=1,quantity_of_x=5)\nhold_value=master.execute(slave=1,function_code=md.READ_HOLDING_REGISTERS,starting_address=110,quantity_of_x=5,output_value=5)\nprint(hold_value)\n\ncoils_value=master.execute(slave=1,function_code=md.READ_COILS,starting_address=1,quantity_of_x=5,output_value=0)\nprint(coils_value)\n\ndef main():\n \"\"\"main\"\"\"\n logger = modbus_tk.utils.create_logger(name=\"console\", record_format=\"%(message)s\")\n\n #Create the master\n master=mr.RtuMaster(serial.Serial(port=\"COM2\",baudrate=19200,bytesize=8,parity='N',stopbits=1,xonxoff=0))\n master.set_timeout(5.0)\n master.set_verbose(True)\n \n try:\n logger.info(\"running...\")\n logger.info(\"enter 'quit' for closing the server\")\n\n server.start()\n\n slave_1 = server.add_slave(1)\n slave_1.add_block('0', cst.HOLDING_REGISTERS, 0, 100)\n while True:\n cmd = sys.stdin.readline()\n args = cmd.split(' ')\n\n if cmd.find('quit') == 0:\n sys.stdout.write('bye-bye\\r\\n')\n break\n\n elif args[0] == 'add_slave':\n slave_id = int(args[1])\n server.add_slave(slave_id)\n sys.stdout.write('done: slave %d added\\r\\n' % (slave_id))\n\n elif args[0] == 'add_block':\n slave_id = int(args[1])\n name = args[2]\n block_type = int(args[3])\n starting_address = int(args[4])\n length = int(args[5])\n slave = server.get_slave(slave_id)\n slave.add_block(name, block_type, starting_address, length)\n sys.stdout.write('done: block %s added\\r\\n' % (name))\n\n elif args[0] == 'set_values':\n slave_id = int(args[1])\n name = args[2]\n address = int(args[3])\n values = []\n for val in args[4:]:\n values.append(int(val))\n slave = server.get_slave(slave_id)\n slave.set_values(name, address, values)\n values = slave.get_values(name, address, len(values))\n sys.stdout.write('done: values written: %s\\r\\n' % (str(values)))\n\n elif args[0] == 'get_values':\n slave_id = int(args[1])\n name = args[2]\n address = int(args[3])\n length = int(args[4])\n slave = server.get_slave(slave_id)\n values = slave.get_values(name, address, length)\n sys.stdout.write('done: values read: %s\\r\\n' % (str(values)))\n\n else:\n sys.stdout.write(\"unknown command %s\\r\\n\" % (args[0]))\n \n except :\n logger.info(\"发生错误\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"dagangge/myPythonCode","sub_path":"Modbus/PMRTUMaster.py","file_name":"PMRTUMaster.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75221055554","text":"from functools import lru_cache\nimport sys\n\n\n# def calc(arr, i, n):\n# ret = 0\n# for j in range(n):\n# if i == j:\n# continue\n# ret += (arr[i] + arr[j])**2\n# return ret\n\n\n# 3*a^2 + 3*b^2 + 3*c^2 + 3*d^2 + 2*a*(b+c+d) + 2*b*(c+d) + 2*c*(d)\n\ndef calcTot(bs):\n ret = (n - 1) * sum(i**2 + j**2 for i, j in zip(aaa, bbb))\n tot_a = tot_b = 0\n for i in range(n):\n tot_a += [aaa, bbb][(bs >> i) & 1][i]\n tot_b += [bbb, aaa][(bs >> i) & 1][i]\n for i in range(n):\n tot_a -= [aaa, bbb][(bs >> i) & 1][i]\n tot_b -= [bbb, aaa][(bs >> i) & 1][i]\n ret += 2 * [aaa, bbb][(bs >> i) & 1][i] * tot_a\n ret += 2 * [bbb, aaa][(bs >> i) & 1][i] * tot_b\n return ret\n\n\n@lru_cache\ndef work(i, bs):\n if i == n:\n return calcTot(bs)\n return min(work(i + 1, bs), work(i + 1, bs | (1 << i)))\n\n\ndef run():\n global n, aaa, bbb\n n = int(input())\n aaa = list(map(int, input().split()))\n bbb = list(map(int, input().split()))\n print(work(0, 0))\n work.cache_clear()\n\n\ndef main():\n t = int(input())\n for _ in range(t):\n run()\n\n\nif __name__ == \"__main__\":\n # with open(\"_input.txt\", \"r\") as fin, open(\"_output.txt\", \"w\") as fout:\n # sys.stdin = fin\n # sys.stdout = fout\n # main()\n main()\n","repo_name":"HassnHamada/CP-Solutions","sub_path":"1637D.py","file_name":"1637D.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39372178456","text":"from . import *\nfrom datetime import datetime\n\n@routes.route(\"/registrar_evento_vista\")\ndef registrar_evento_vista():\n try:\n if \"iduser\" in session:\n if session[\"esEmp\"] == 1:\n usuario_emp = db.session.query(Usuario_emp).filter(Usuario_emp.id == session[\"iduser\"])[0]\n lista_locales = usuario_emp.locales\n return render_template(\"create_event.html\", lista_locales=lista_locales)\n else: \n return redirect(\"/\")\n else:\n return redirect(\"/\")\n except:\n e = sys.exc_info()[0]\n print(\"Unexpected error: \", e)\n raise\n \n@routes.route(\"/registrar_evento_vista/registrar-evento\", methods=[\"POST\"])\ndef registrar_evento():\n try:\n if \"iduser\" in session:\n if session[\"esEmp\"] == 1:\n if request.method == \"POST\":\n nombre = request.form[\"nombre\"]\n local_id = request.form[\"local\"]\n fechaInicio = request.form[\"fechaInicio\"]\n fechaFinal = request.form[\"fechaFin\"]\n horaInicio = request.form[\"horaInicio\"]\n horaFinal = request.form[\"horaFin\"]\n precio = request.form[\"precio\"]\n descripcion = request.form[\"descripcion\"]\n\n fecha1= fechaInicio + \" \" + horaInicio + \":00\"\n fecha2= fechaFinal + \" \" + horaFinal + \":00\"\n\n evento = Evento(\n nombre=nombre,\n descripcion=descripcion,\n precio=precio,\n fechaInicio=fecha1,\n fechaFin=fecha2\n )\n\n local_query = db.session.query(Local).filter(Local.id == local_id)[0]\n local_query.eventos.append(evento)\n \n db.session.add(evento)\n db.session.commit()\n flash(\"Evento creado exitosamente\", \"exito_evento\")\n return redirect(\"/eventos\")\n\n else:\n return redirect(\"/\")\n\n else:\n return redirect(\"/\")\n except:\n e = sys.exc_info()[0]\n print(\"Unexpected error: \", e)\n raise\n","repo_name":"andres4640/VAOS","sub_path":"routes/registro_evento.py","file_name":"registro_evento.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70223094914","text":"import numpy as np\n\nfrom scipy.ndimage.morphology import distance_transform_edt\nimport vigra\n\nfrom ...features import superpixel_stacked, from_affinities_to_hmap, size_filter\n\n\n# watershed on distance transform:\n# seeds are generated on the inverted distance transform\n# the probability map is used for growing\nclass WatershedOnDistanceTransform(object):\n def __init__(self, threshold, sigma_seeds,\n preserve_membrane=True, min_segment_size=0,\n stacked_2d=False, n_threads=1,\n from_boundary_maps=False):\n self.threshold = threshold\n self.sigma_seeds = sigma_seeds\n self.preserve_membrane = preserve_membrane\n self.min_segment_size = min_segment_size\n self.stacked_2d = stacked_2d\n self.n_threads = n_threads\n self.from_boundary_maps = from_boundary_maps\n\n # wrap vigra local maxima properly\n @staticmethod\n def local_maxima(image, *args, **kwargs):\n assert image.ndim in (2, 3), \"Unsupported dimensionality: {}\".format(image.ndim)\n if image.ndim == 2:\n return vigra.analysis.localMaxima(image, *args, **kwargs)\n if image.ndim == 3:\n return vigra.analysis.localMaxima3D(image, *args, **kwargs)\n\n def signed_distance_transform(self, hmap):\n # get the distance transform of the pmap\n binary_membranes = (hmap >= self.threshold)\n distance_to_membrane = distance_transform_edt(np.logical_not(binary_membranes))\n # Instead of computing a negative distance transform within the thresholded membrane areas,\n # Use the original probabilities (but inverted)\n if self.preserve_membrane:\n distance_to_membrane[binary_membranes] = -hmap[binary_membranes]\n # Compute the negative distance transform and substract it from the distance transform\n else:\n distance_to_nonmembrane = distance_transform_edt(binary_membranes)\n # Combine the inner/outer distance transforms\n distance_to_nonmembrane[distance_to_nonmembrane > 0] -= 1\n distance_to_membrane[:] -= distance_to_nonmembrane\n return distance_to_membrane.astype('float32')\n\n def seeds_from_distance_transform(self, distance_transform):\n # we are not using the dt after this point, so it's ok to smooth it\n # and later use it for calculating the seeds\n if self.sigma_seeds > 0.:\n distance_transform = vigra.filters.gaussianSmoothing(distance_transform, self.sigma_seeds)\n # If any seeds end up on the membranes, we'll remove them.\n # This is more likely to happen when the distance transform was generated with preserve_membrane_pmaps=True\n membrane_mask = (distance_transform < 0)\n seeds = self.local_maxima(\n distance_transform, allowPlateaus=True, allowAtBorder=True, marker=np.nan\n )\n seeds = np.isnan(seeds).astype('uint32')\n seeds[membrane_mask] = 0\n\n return vigra.analysis.labelMultiArrayWithBackground(seeds)\n\n def wsdt_superpixel(self, hmap):\n # first, we compute the signed distance transform\n dt = self.signed_distance_transform(hmap)\n # next, get the seeds via maxima on the (smoothed) distance transform\n seeds = self.seeds_from_distance_transform(dt)\n # run watershed on the pmap wit dt seeds\n segmentation, seg_max = vigra.analysis.watershedsNew(hmap, seeds=seeds)\n # apply size filter\n if self.min_segment_size > 0:\n segmentation, seg_max = size_filter(\n hmap, segmentation, self.min_segment_size\n )\n return segmentation, seg_max\n\n def __call__(self, affinities):\n affinities = np.require(affinities, dtype='float32')\n if self.stacked_2d:\n # take the max over inplane nearest affinity channels\n if self.from_boundary_maps:\n assert affinities.ndim == 3\n hmap = affinities\n else:\n assert affinities.ndim == 4\n hmap = np.maximum(affinities[1], affinities[2])\n segmentation, _ = superpixel_stacked(hmap,\n self.wsdt_superpixel,\n self.n_threads)\n else:\n # take the max over all 3 nearest affinity channels\n if self.from_boundary_maps:\n hmap = affinities\n else:\n hmap = np.maximum(affinities[0], affinities[1])\n hmap = np.maximum(hmap, affinities[2])\n segmentation, _ = self.wsdt_superpixel(hmap)\n return segmentation\n\n\nclass IntersectWithBoundaryPixels(object):\n def __init__(self, offsets,\n boundary_threshold=0.5, # 1.0 all boundary, 0.0 no boundary\n used_offsets=None,\n offset_weights=None):\n self.offsets = offsets\n self.used_offsets = used_offsets\n self.offset_weights = offset_weights\n self.boundary_threshold = boundary_threshold\n\n def __call__(self, affinities, dtws_segm):\n print(\"FInd hmap\")\n hmap = from_affinities_to_hmap(affinities, self.offsets, self.used_offsets,\n self.offset_weights)\n pixel_segm = np.arange(np.prod(dtws_segm.shape), dtype='uint64').reshape(dtws_segm.shape) + dtws_segm.max()\n boundary_mask = (1.-hmap) < self.boundary_threshold\n\n print(\"Relabel volume\")\n dtws_segm = vigra.analysis.labelVolume((dtws_segm * np.logical_not(boundary_mask)).astype('uint32'))\n\n # fig, ax = segm_vis.get_figure(2, 2, figsize=(14,14))\n\n # segm_vis.plot_output_affin(ax[0,0], affinities, nb_offset=1, z_slice=1)\n # segm_vis.plot_output_affin(ax[0,1], affinities, nb_offset=2, z_slice=1)\n # segm_vis.plot_gray_image(ax[0,1], hmap,z_slice=1)\n # segm_vis.plot_gray_image(ax[0,1], affinities[2],z_slice=1)\n\n # segm_vis.plot_segm(ax[1,0], dtws_segm, z_slice=1)\n\n new_segmentation = np.where(boundary_mask, pixel_segm, dtws_segm)\n\n # segm_vis.plot_segm(ax[1,1], new_segmentation, z_slice=1)\n # segm_vis.save_plot(fig, \"./\", \"debug_plot.pdf\")\n print(\"Relabel consecutive\")\n new_segmentation = vigra.analysis.relabelConsecutive(new_segmentation)[0]\n\n print(\"Check new number of nodes!\", new_segmentation.max())\n\n # from ... import vis as vis\n # import matplotlib.pyplot as plt\n #\n # fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(7, 7))\n # for a in fig.get_axes():\n # a.axis('off')\n #\n # # affs_repr = np.linalg.norm(affs_repr, axis=-1)\n # # ax.imshow(affs_repr, interpolation=\"none\")\n #\n # vis.plot_gray_image(ax[0,0], hmap, z_slice=1)\n # vis.plot_gray_image(ax[0, 1], boundary_mask.astype('float32'), z_slice=1)\n # vis.plot_segm(ax[1,0], new_segmentation, z_slice=1, highlight_boundaries=False)\n # vis.plot_segm(ax[1, 1], pixel_segm, z_slice=1, highlight_boundaries=False)\n #\n # pdf_path = \"./hmap.pdf\"\n # fig.savefig(pdf_path)\n\n\n return new_segmentation\n\n\nclass WatershedOnDistanceTransformFromAffinities(WatershedOnDistanceTransform):\n\n def __init__(self, offsets, threshold, sigma_seeds,\n used_offsets=None,\n offset_weights=None,\n return_hmap=False,\n invert_affinities=False,\n intersect_with_boundary_pixels=False,\n boundary_pixels_kwargs=None,\n **super_kwargs):\n if 'from_boundary_maps' in super_kwargs:\n assert super_kwargs['from_boundary_maps']\n super_kwargs.pop('from_boundary_maps')\n super(WatershedOnDistanceTransformFromAffinities, self).__init__(threshold,\n sigma_seeds,\n from_boundary_maps=True,\n **super_kwargs)\n if isinstance(offsets, list):\n offsets = np.array(offsets)\n else:\n assert isinstance(offsets, np.ndarray)\n\n self.offsets = offsets\n # Consistency of these inputs is checked in from_affinities_to_hmap\n self.used_offsets = used_offsets\n self.offset_weights = offset_weights\n self.return_hmap = return_hmap\n self.invert_affinities = invert_affinities\n self.intersect_with_boundary_pixels = intersect_with_boundary_pixels\n if self.intersect_with_boundary_pixels:\n boundary_pixels_kwargs = boundary_pixels_kwargs if boundary_pixels_kwargs is not None else {}\n self.intersect = IntersectWithBoundaryPixels(offsets, **boundary_pixels_kwargs)\n\n def __call__(self, affinities, foreground_mask=None):\n \"\"\"\n Here we expect real affinities (1: merge, 0: split).\n If the opposite is passed, set option `invert_affinities == True`\n \"\"\"\n assert affinities.shape[0] == len(self.offsets), \"{}, {}\".format(affinities.shape[0], len(self.offsets))\n assert affinities.ndim == 4, \"{}\".format(affinities.ndim)\n\n if self.invert_affinities:\n affinities = 1. - affinities\n\n print(affinities.mean())\n\n print(\"Predict hmap\")\n hmap = from_affinities_to_hmap(affinities, self.offsets, self.used_offsets,\n self.offset_weights)\n print(\"Run WSDT\")\n segmentation = super(WatershedOnDistanceTransformFromAffinities, self).__call__(hmap)\n\n # Intersect with boundary pixels:\n if self.intersect_with_boundary_pixels:\n print(\"Intersecting with pixels\")\n segmentation = self.intersect(affinities, segmentation)\n\n # Mask with background (e.g. ignore GT-label):\n if foreground_mask is not None:\n assert foreground_mask.shape == segmentation.shape, \"{}, {}\".format(segmentation.shape, foreground_mask.shape)\n segmentation = segmentation.astype('int64')\n segmentation = np.where(foreground_mask, segmentation, np.ones_like(segmentation)*(-1))\n\n if segmentation.max() > np.uint32(-1):\n print(\"!!WARNING!! uint32 limit reached!\")\n\n\n # from ... import vis as vis\n # import matplotlib.pyplot as plt\n #\n # fig, ax = plt.subplots(ncols=3, nrows=1, figsize=(7, 7))\n # for a in fig.get_axes():\n # a.axis('off')\n #\n # # affs_repr = np.linalg.norm(affs_repr, axis=-1)\n # # ax.imshow(affs_repr, interpolation=\"none\")\n #\n # vis.plot_segm(ax[0], dtws, z_slice=1)\n # vis.plot_segm(ax[1], intersect, z_slice=1, highlight_boundaries=False)\n # vis.plot_segm(ax[2], segmentation, z_slice=1, highlight_boundaries=False)\n #\n # pdf_path = \"./wsdt.pdf\"\n # fig.savefig(pdf_path)\n\n\n if self.return_hmap:\n return segmentation, hmap\n else:\n return segmentation\n\n\n","repo_name":"abailoni/segmfriends","sub_path":"segmfriends/algorithms/WS/wsdt.py","file_name":"wsdt.py","file_ext":"py","file_size_in_byte":11111,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73791496195","text":"import discord\nimport asyncio\nimport requests\nfrom discord.ext.commands import Bot\nfrom discord.ext import commands\nimport datetime\nimport time\n#readable = time.ctime(1668696846)\n\n\nPREFIX = (\"-\")\nintents = discord.Intents.default()\nintents.members = True\nhelp_command = commands.DefaultHelpCommand(\n no_category = 'Commands'\n)\nbot = commands.Bot(case_insensitive=True,intents=intents, command_prefix=PREFIX,help_command = help_command)\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=\"your house\"))\n print(\"on!\")\n \n@bot.event\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n print('Missing argument')\n\n@bot.command()\nasync def btc(ctx):\n \"\"\" Shows current price of BTC\"\"\"\n price = requests.get('https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD').json()[\"USD\"]\n pricey = str(price)\n msg = pricey + ' USD'\n await ctx.send('*'+msg+'*')\n \n\n@bot.command()\nasync def btcb(ctx, what):\n \"\"\" Shows wallet balance of bitcoin wallet\"\"\"\n total = requests.get('https://blockchain.info/q/addressbalance/' + what)\n count = int(total.text)\n bal = str(count/100000000)\n msg = bal + ' BTC'\n await ctx.send('*'+msg+'*')\n \n@bot.command()\nasync def eth(ctx):\n \"\"\" Shows current price of ETH\"\"\"\n price = requests.get('https://min-api.cryptocompare.com/data/price?fsym=ETH&tsyms=USD').json()[\"USD\"]\n pricey = str(price)\n msg = pricey + ' USD'\n await ctx.send('*'+msg+'*')\n \n@bot.command()\nasync def ethb(ctx, what):\n \"\"\" Shows wallet balance of ethereum wallet\"\"\"\n total = requests.get('https://api-eu1.tatum.io/v3/ethereum/account/balance/' + what).json()['balance']\n bal = str(total)\n msg = bal + ' ETH'\n await ctx.send('*'+msg+'*')\n \n@bot.command()\nasync def ltc(ctx):\n \"\"\" Shows current price of LTC\"\"\"\n price = requests.get('https://min-api.cryptocompare.com/data/price?fsym=LTC&tsyms=USD').json()[\"USD\"]\n pricey = str(price)\n msg = pricey + ' USD'\n await ctx.send('*'+msg+'*')\n \n@bot.command()\nasync def xmr(ctx):\n \"\"\" Shows current price of XMR\"\"\"\n price = requests.get('https://min-api.cryptocompare.com/data/price?fsym=XMR&tsyms=USD').json()[\"USD\"]\n pricey = str(price)\n msg = pricey + ' USD'\n await ctx.send('*'+msg+'*')\n\n@bot.command()\nasync def dbdsi(ctx, *what):\n \"\"\" Shows info about survivor in Dead by Daylight\"\"\"\n sur = ''.join(what)\n res = requests.get('https://dead-by-api.herokuapp.com/api/survs/' + sur)\n av = res.json()['data'][0]['imgs']['portrait']\n name = res.json()['data'][0]['name']\n licensed = res.json()['data'][0]['licensed']\n dif = res.json()['data'][0]['difficulty']\n nat = res.json()['data'][0]['nationality']\n dlc = res.json()['data'][0]['dlc']\n prk = res.json()['data'][0]['perks_names']\n img = res.json()['data'][0]['imgs']['store']\n rl = res.json()['data'][0]['role']\n embed = discord.Embed(title=f\"{name}\", description=\"Survivor Information\", color=0x000000)\n embed.add_field(name='Licensed', value=f\"{licensed}\", inline=True)\n embed.add_field(name='Difficulty', value=f\"{dif}\", inline=True)\n embed.add_field(name='Role', value=f\"{rl}\", inline=True)\n embed.add_field(name='Nationality', value=f\"{nat}\", inline=True)\n embed.add_field(name='DLC', value=f\"{dlc}\", inline=True)\n embed.add_field(name='Perks', value=f\"{prk}\", inline=True)\n embed.timestamp = datetime.datetime.utcnow()\n embed.set_image(url=img)\n embed.set_thumbnail(url=av)\n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url)\n #print(av)\n await ctx.send(embed=embed)\n \n@bot.command()\nasync def dbdki(ctx, *what):\n \"\"\" Shows info about killer in Dead by Daylight\"\"\"\n kil = ''.join(what)\n res = requests.get('https://dead-by-api.herokuapp.com/api/killers/the' + kil)\n av = res.json()['data'][0]['imgs']['portrait']\n name = res.json()['data'][0]['fullName']\n licensed = res.json()['data'][0]['licensed']\n dif = res.json()['data'][0]['difficulty']\n nat = res.json()['data'][0]['nationality']\n dlc = res.json()['data'][0]['dlc']\n prk = res.json()['data'][0]['perks_names']\n img = res.json()['data'][0]['imgs']['store']\n rl = res.json()['data'][0]['realm']\n pat = res.json()['data'][0]['powerAttackType']\n wp = res.json()['data'][0]['weapon']\n ms = res.json()['data'][0]['moveSpeed']\n tr = res.json()['data'][0]['terrorRadius']\n hg = res.json()['data'][0]['height']\n embed = discord.Embed(title=f\"{name}\", description=\"Survivor Information\", color=0x000000)\n embed.add_field(name='Licensed', value=f\"{licensed}\", inline=True)\n embed.add_field(name='Difficulty', value=f\"{dif}\", inline=True)\n embed.add_field(name='Realm', value=f\"{rl}\", inline=True)\n embed.add_field(name='Nationality', value=f\"{nat}\", inline=True)\n embed.add_field(name='DLC', value=f\"{dlc}\", inline=True)\n embed.add_field(name='Power Attack Type', value=f\"{pat}\", inline=True)\n embed.add_field(name='Weapon', value=f\"{wp}\", inline=True)\n embed.add_field(name='Movement Speed', value=f\"{ms}\", inline=True)\n embed.add_field(name='Terror Radius', value=f\"{tr}\", inline=True)\n embed.add_field(name='Height', value=f\"{hg}\", inline=True)\n embed.add_field(name='Perks', value=f\"{prk}\", inline=True)\n embed.timestamp = datetime.datetime.utcnow()\n embed.set_image(url=img)\n embed.set_thumbnail(url=av)\n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url)\n #print(av)\n await ctx.send(embed=embed)\n\n\n\n\n@bot.command()\nasync def dbdid(ctx):\n \"\"\" Shows ItemID for item from Dead by Daylight\"\"\"\n res = requests.get('https://dead-by-api.herokuapp.com/api/items?fields=code')\n flash = res.json()['data'][0]['code']\n i1 = res.json()['data'][1]['code']\n i2 = res.json()['data'][2]['code']\n i3 = res.json()['data'][3]['code']\n i4 = res.json()['data'][4]['code']\n i5 = res.json()['data'][5]['code']\n i6 = res.json()['data'][6]['code']\n i7 = res.json()['data'][7]['code']\n i8 = res.json()['data'][8]['code']\n i9 = res.json()['data'][9]['code']\n i10 = res.json()['data'][10]['code']\n i11 = res.json()['data'][11]['code']\n i12 = res.json()['data'][12]['code']\n i13 = res.json()['data'][13]['code']\n i14 = res.json()['data'][14]['code']\n i15 = res.json()['data'][15]['code']\n i16 = res.json()['data'][16]['code']\n i17 = res.json()['data'][17]['code']\n i18 = res.json()['data'][18]['code']\n i19 = res.json()['data'][19]['code']\n i20 = res.json()['data'][20]['code']\n i21 = res.json()['data'][21]['code']\n i22 = res.json()['data'][22]['code']\n i23 = res.json()['data'][23]['code']\n i24 = res.json()['data'][24]['code']\n i25 = res.json()['data'][25]['code']\n i26 = res.json()['data'][26]['code']\n i27 = res.json()['data'][27]['code']\n i28 = res.json()['data'][28]['code']\n i29 = res.json()['data'][29]['code']\n i30 = res.json()['data'][30]['code']\n i31 = res.json()['data'][31]['code']\n \n embed = discord.Embed(title=f\"Items\", description=\"Item IDS\", color=0x000000)\n embed.add_field(name='Ids', value=f\"{flash},{i1},{i2},{i3},{i4},{i5},{i6},{i7},{i8},{i9},{i10},{i11},{i12},{i13},{i14},{i15},{i16},{i17},{i18},{i19},{i20},{i21},{i22},{i23},{i24},{i25},{i26},{i27},{i28},{i29},{i30},{i31}\", inline=True)\n embed.timestamp = datetime.datetime.utcnow()\n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url)\n #print(av)\n await ctx.send(embed=embed)\n \n \n \n\n@bot.command()\nasync def dbdii(ctx, what):\n \"\"\" Shows info about Item in Dead by Daylight\"\"\"\n res = requests.get('https://dead-by-api.herokuapp.com/api/items/' + what)\n name = res.json()['data'][0]['name']\n #print(name)\n rar = res.json()['data'][0]['rarity']\n des = res.json()['data'][0]['description']\n img = res.json()['data'][0]['icon']\n embed = discord.Embed(title=f\"{name}\", description=\"Item Information\", color=0x000000)\n embed.add_field(name='Rarity', value=f\"{rar}\", inline=True)\n embed.add_field(name='Description', value=f\"{des}\", inline=True)\n embed.timestamp = datetime.datetime.utcnow()\n embed.set_thumbnail(url=img)\n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def dbdrr(ctx):\n \"\"\" Shows info about Rank Reset in Dead by Daylight\"\"\"\n res = requests.get('https://dbd.tricky.lol/api/rankreset')\n resets = res.json()['rankreset']\n dat = time.ctime(resets)\n #print(name)\n embed = discord.Embed(title=f\"Rank Reset\", description=\"Date Info\", color=0x000000)\n embed.add_field(name='Resets at:', value=f\"{dat}\", inline=True)\n embed.timestamp = datetime.datetime.utcnow()\n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def dbdsos(ctx):\n \"\"\" Shows Shrine of Secrets in Dead by Daylight\"\"\"\n res = requests.get('https://dbd.tricky.lol/api/shrine')\n \n perk1 = res.json()['perks'][0]['id']\n perk12 = res.json()['perks'][1]['id']\n perk13 = res.json()['perks'][2]['id']\n perk14 = res.json()['perks'][3]['id']\n \n ress = requests.get('https://dbd.tricky.lol/api/perkinfo?perk=' + perk1 +'&pretty')\n ress1 = requests.get('https://dbd.tricky.lol/api/perkinfo?perk=' + perk12 +'&pretty')\n ress2 = requests.get('https://dbd.tricky.lol/api/perkinfo?perk=' + perk13 +'&pretty')\n ress3 = requests.get('https://dbd.tricky.lol/api/perkinfo?perk=' + perk14 +'&pretty')\n \n perks1 = ress.text.replace(' ', '')\n perks2 = ress1.text.replace(' ', '')\n perks3 = ress2.text.replace(' ', '')\n perks4 = ress3.text.replace(' ', '')\n resets = res.json()['end']\n dat = time.ctime(resets)\n \n \n \n \n embed = discord.Embed(title=f\"Shrine of Secrets\", description=f\"**Ends at** {dat}\", color=0x000000)\n embed.add_field(name=f'{perk1}', value=f\"{perks1}\", inline=True)\n embed.add_field(name=f'{perk12}', value=f\"{perks2}\", inline=True)\n embed.add_field(name=f'{perk13}', value=f\"{perks3}\", inline=True)\n embed.add_field(name=f'{perk14}', value=f\"{perks4}\", inline=True)\n embed.timestamp = datetime.datetime.utcnow()\n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url)\n await ctx.send(embed=embed)\n\n@bot.command()\nasync def serverinfo(ctx):\n embed = discord.Embed(title=f\"{ctx.guild.name}\", description=\"Server Information\", color=0x000000)\n embed.timestamp = datetime.datetime.utcnow()\n embed.add_field(name='Server ID', value=f\"{ctx.guild.id}\", inline=True)\n embed.add_field(name='Created On', value=ctx.guild.created_at.strftime(\"%b %d %Y\"), inline=True)\n embed.add_field(name='Owner', value=f\"{ctx.guild.owner.mention}\", inline=True)\n embed.add_field(name='Members', value=f'{ctx.guild.member_count} Members', inline=True)\n embed.add_field(name='Channels', value=f'{len(ctx.guild.text_channels)} Text | {len(ctx.guild.voice_channels)} Voice', inline=True)\n embed.add_field(name='Region', value=f'{ctx.guild.region}', inline=True)\n #embed.set_thumbnail(url=ctx.guild.icon_url) \n embed.set_footer(text=f'{ctx.author.name}', icon_url=ctx.message.author.avatar_url) \n await ctx.send(embed=embed)\n \nbot.run('')\n","repo_name":"culturally/discord-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11477,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23625203721","text":"#!/usr/bin/env python\n\n'''\n\ngoogle code jam 2011\nround 1b\nproblem a\nRPI\n\nnotes:\n\n\n'''\n\nimport sys\nimport time\n\nfin = sys.stdin\nfout = sys.stdout\n#fin = open('sample.in', 'r')\n#fout = open('sample.out', 'w')\n\ntimeit = 1\ndebugv = 0\n\ndef main():\n\tT = int(fin.readline())\n\tfor case in xrange(1,T+1):\n\t\tprocessCase(case)\n\ndef processCase(case):\n\tN = int(fin.readline())\n\tschedule = [fin.readline().strip() for row in xrange(N)]\n\t\n\tdebug(\"schedule: %s\\n\" % schedule)\n\t\n\tnWins = [float(len([None for x in row if x == '1'])) for row in schedule]\n\tnGames = [float(len([None for x in row if x != '.'])) for row in schedule]\n\t\n\tdebug(\"nWins: %s\\n\" % nWins)\n\tdebug(\"nGames: %s\\n\" % nGames)\n\t\n\tWP = [nW / nG for nW, nG in zip(nWins, nGames)]\n\tdebug(\"WP: %s\\n\" % WP)\n\t\n\tOWP = [\n\t\tsum([\n\t\t\t(nWins[j] - int(schedule[j][i])) / (nGames[j] - 1) \n\t\t\tfor j in xrange(N) \n\t\t\tif schedule[i][j] != '.'\n\t\t]) / nGames[i]\n\t\tfor i in xrange(N)\n\t]\n\tdebug(\"OWP: %s\\n\" % OWP)\n\t\n\tOOWP = [\n\t\tsum([\n\t\t\tOWP[j]\n\t\t\tfor j in xrange(N)\n\t\t\tif schedule[i][j] != '.'\n\t\t]) / nGames[i]\n\t\tfor i in xrange(N)\n\t]\n\tdebug(\"OOWP: %s\\n\" % OOWP)\n\t\n\tRPI = [\n\t\t0.25 * wp + 0.5 * owp + 0.25 * oowp\n\t\tfor wp, owp, oowp in zip(WP, OWP, OOWP)\n\t]\n\t\n\tfout.write(\"Case #%d:\\n\" % (case))\n\tfor rpi in RPI:\n\t\tfout.write(\"%.12f\\n\" % (rpi))\n\n\ndef debug(m):\n\tif debugv:\n\t\tsys.stderr.write(m)\n\nstartTime = time.clock()\nmain()\nif timeit: sys.stderr.write(\"completed in %f seconds\\n\" % (time.clock() - startTime))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_81/392.py","file_name":"392.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72034220033","text":"# Created by Winstijn Smit on 9 May 2023.\n\n# Following lines are to avoid cyclic dependencies in type checking.\nfrom __future__ import annotations\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from collector import Collector\n\nfrom PyQt5.QtWidgets import QMessageBox\n\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plotter\nimport matplotlib.widgets as widgets\nimport os\nimport time\n\nCOLLECTION_PATH = \"./dataset\"\n\n\nclass GestureData:\n\n data: list\n\n # Create static method that sets a gesture from a dictionary.\n @staticmethod\n def load_from_dict(obj: dict):\n gesture_data = GestureData(0, 0, 0)\n gesture_data.set(obj)\n return gesture_data\n \n # Sets values from a dictionary.\n def set(self, obj: dict):\n valid_keys = [\"resistance\", \"sample_rate\", \"duration\", \"samples\", \"data\", \"candidate\", \"hand\", \"gesture_type\", \"target_gesture\", \"timestamp\"]\n for key in obj:\n if key not in valid_keys:\n raise Exception(\"Invalid key '\" + key + \"'\")\n setattr(self, key, obj[key])\n\n\n def __init__(self, resistance: int, sample_rate: int, duration: float) -> None:\n self.resistance = resistance\n self.sample_rate = sample_rate\n self.duration = duration\n self.samples = int(duration * sample_rate)\n self.set_metadata() # Just initialize the metadata values.\n self.timestamp = time.time()\n self.data = [] # Initialize the data list to an empty array.\n\n def set_metadata(self, candidate: str = \"Unknown Canidate\", hand: str = \"unknown\",\n gesture_type=\"unknown\", target_gesture=\"unknown\") -> None:\n self.candidate = candidate\n self.hand = hand\n self.gesture_type = gesture_type\n self.target_gesture = target_gesture\n\n # Removes this GestureData from the file it is stored in.\n # This assumes it is saved at the default location.\n # Might need refactoring for when this is not the case.\n def remove_from_dataset(self) -> None:\n path = self.get_pickle_path()\n remove_entry_at(path, self.timestamp)\n\n # Add a sample to the data.\n def add_sample(self, r0, r1, r2) -> None:\n self.data.append([int(r0), int(r1), int(r2)])\n\n # Uses a collctor to read all the samples retrieved from the serial port.\n def collect(self, collector: Collector, log=False) -> None:\n # Get all measurements samples from the serial port.\n for i in range(self.samples):\n # Read a line from the serial port.\n # Get the binary result.\n r0 = collector.readuint16()\n r1 = collector.readuint16()\n r2 = collector.readuint16()\n self.add_sample(r0, r1, r2)\n\n if log:\n print(\"[Measurement \" + str(i) + \"] \" + str(r0) + \", \" + str(r1) + \", \" + str(r2))\n\n def get_directory_path(self, folder=COLLECTION_PATH) -> str:\n return os.path.join(folder, self.gesture_type, self.target_gesture, self.hand)\n\n # Getter for candidate name without spaces\n def get_formatted_candidate(self) -> str:\n return self.candidate.lower().replace(\" \", \"_\")\n\n def get_pickle_path(self, folder=COLLECTION_PATH):\n # Create the path to the file\n candidate = self.get_formatted_candidate()\n directory = self.get_directory_path(folder)\n return os.path.join(directory, \"candidate_\" + candidate + \".pickle\")\n\n def save_to_file(self, folder=COLLECTION_PATH, path=None) -> None:\n # Create the directory if it does not exist.\n data_dict = {\n \"timestamp\": self.timestamp,\n \"candidate\": self.candidate,\n \"hand\": self.hand,\n \"gesture_type\": self.gesture_type,\n \"target_gesture\": self.target_gesture,\n \"resistance\": self.resistance,\n \"sample_rate\": self.sample_rate,\n \"duration\": self.duration,\n \"samples\": self.samples,\n \"data\": np.array(self.data)\n }\n\n if path == None:\n path = self.get_pickle_path(folder)\n\n create_directories(path) # Create the directories if they do not exist.\n print(\"Saving gesture data to file at: \" + str(path))\n\n with open(path, \"ab+\") as file:\n pickle.dump(data_dict, file)\n\n # Plots the data contained in the GestureData on a graph.\n def plot(self, show=True, candidate = None, target_gesture = None) -> None:\n # Set metadata of the plot.\n if (candidate == None):\n candidate = self.candidate\n if (target_gesture == None):\n target_gesture = self.target_gesture\n\n # Create the plot, together with a section for the metadata.\n fig, plt = plotter.subplots(1)\n fig.subplots_adjust(bottom=0.3)\n\n # Plot the data.\n plt.plot(self.data)\n\n # Set the labels of the axes.\n plt.set_xlabel(\"Samples\")\n plt.set_ylabel(\"Photodiode reading\")\n\n # Set the metadata of the plot.\n fig.text(0.1,0.15,'Sampling Rate: ' + str(self.sample_rate) + 'Hz')\n fig.text(0.1,0.10,'Time: ' + str(self.duration) + 's')\n fig.text(0.1,0.05,'Resistance: ' + str(self.resistance / 1000) + 'kOhm')\n\n # Set the title of the plot.\n title = target_gesture + \" by \" + candidate\n plt.set_title(title) \n \n # If \"d\" is pressed, prompt the user to remove the gesture from the dataset.\n def on_key(event): \n if event.key == \"d\":\n # Prompt the user to confirm the removal using Qt \n result = prompt_remove_dataset()\n if result == QMessageBox.Ok:\n print(\"Removing gesture from dataset...\")\n remove_entry_at(self.get_pickle_path(), self.timestamp)\n plotter.close()\n else:\n print(\"Canceling gesture removal...\")\n\n fig.canvas.mpl_connect('key_press_event', on_key)\n\n\n # Save location of the image.\n path = \"plots/\" + title.lower().replace(\" \", \"_\") + \".png\"\n create_directories(path)\n plotter.savefig(path)\n\n if (show):\n plotter.show()\n\n\ndef prompt_remove_dataset():\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Warning)\n msg.setText(\"Are you sure you want to remove this gesture from the dataset?\")\n msg.setWindowTitle(\"Remove gesture from dataset\")\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n msg.setEscapeButton(QMessageBox.Cancel)\n return msg.exec_()\n\n# Remove a dataset entry at a certain path with a certain timestamp.\n# Note that this is not the best way to do this, but it works for now.\ndef remove_entry_at(path: str, timestamp: float):\n if os.path.exists(path):\n data = read_pickle(path)\n # Check if the data is in the list.\n # Map the data so we only have the timestamp\n data_time = list(map(lambda x: x.timestamp, data))\n\n # Use the timestamp to find the index.\n index = data_time.index(timestamp) # Use the timestamp to find the index.\n if (index != -1):\n print(\"=== Removed one entry from dataset at '\" + path + \"'\")\n # Remove the index from the list.\n data = data[:index] + data[index+1:]\n \n os.remove(path) # remove the original file first\n print(\"Resaving the left over data:\")\n write_pickle(path, data) # Rewrite the file again.\n\ndef create_directories(path: str) -> None:\n # Create directory structure if it doesn't exist yet\n path = os.path.dirname(path)\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef read_pickle(path: str) -> list[GestureData]:\n # Read the pickle file.\n data = []\n with open(path, \"rb\") as file:\n # As many times as possible try to read a pickle object.\n try:\n while True: \n gd_dict = pickle.load(file)\n data.append(GestureData.load_from_dict(gd_dict))\n except EOFError:\n pass\n return data\n\ndef write_pickle(path: str, data: list[GestureData]) -> None:\n for gd in data:\n gd.save_to_file(path=path) # Save all the gesture data to a file.\n","repo_name":"arnedebeer/CSE3000-DataCollection","sub_path":"data_collection_interface/gesture_data.py","file_name":"gesture_data.py","file_ext":"py","file_size_in_byte":8226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33011103305","text":"import tele_util\nimport datetime\n\n\ndef getData(group_id, req):\n t = datetime.date.today().replace(day=1)\n end = t - datetime.timedelta(days=1)\n start = end.replace(day=1)\n data = {'chat_id': str(group_id), 'start': start.strftime('%Y-%m-%d'),'end': end.strftime('%Y-%m-%d'),}\n if req.args.get('scaleend'):\n data['end']=req.args.get('scaleend')\n if req.args.get('scalestart'):\n data['start']=req.args.get('scalestart')\n return data\n\ndef getUser(data):\n sql = '''\n select nme, count(*) as posts, sum(len)\n , sum(case type when 't' then 1 else 0 end) as t\n , sum(case type when 'a' then 1 else 0 end) as a\n , sum(case type when 's' then 1 else 0 end) as s\n , sum(case type when 'p' then 1 else 0 end) as p\n , sum(case type when 'v' then 1 else 0 end) as v\n , sum(case type when 'i' then 1 else 0 end) as i\n , sum(case type when 'p' then 1 else 0 end) as o\n , sum(case type when 'd' then 1 else 0 end) as d\n , sum(case type when 'c' then 1 else 0 end) as c\n , sum(case type when 'N' then 1 else 0 end) as n\n from msglog l\n left join user_name u on u.user_id=l.user_id\n where l.chat_id=%(chat_id)s and l.date between %(start)s and %(end)s\n group by nme, u.user_id order by posts desc;'''\n users = []\n for r in tele_util.readSQL(sql, data=data):\n tele_util.typ3s\n types = {}\n for idx,(_,name) in enumerate(tele_util.typ3s.items()):\n if r[idx+3]!=0:\n types[name] = int(r[idx+3])\n user = {'name': r[0], 'posts': r[1],'textlen': r[2],'type': types,}\n users.append(user)\n return users\n\ndef getChart1(data):\n sql = '''\n select nme,\n count(*)/(select count(*) from msglog where l.chat_id=%(chat_id)s and l.date between %(start)s and %(end)s) * 100\n from msglog l left join user_name u on u.user_id=l.user_id\n where l.chat_id=%(chat_id)s and l.date between %(start)s and %(end)s\n group by chat_id,nme;'''\n chart1=[]\n for r in tele_util.readSQL(sql, data=data):\n chart1.append([r[0], r[1]])\n return chart1\n\ndef getChart2(data):\n sql = '''\n select sum(case type when 't' then 1 else 0 end) as t\n , sum(case type when 'a' then 1 else 0 end) as a\n , sum(case type when 's' then 1 else 0 end) as s\n , sum(case type when 'p' then 1 else 0 end) as p\n , sum(case type when 'v' then 1 else 0 end) as v\n , sum(case type when 'i' then 1 else 0 end) as i\n , sum(case type when 'p' then 1 else 0 end) as o\n , sum(case type when 'd' then 1 else 0 end) as d\n , sum(case type when 'c' then 1 else 0 end) as c\n , sum(case type when 'N' then 1 else 0 end) as n\n from msglog\n where chat_id=%(chat_id)s and date between %(start)s and %(end)s\n group by chat_id;'''\n chart2=[]\n rows = tele_util.readSQL(sql, data=data)\n if len(rows) == 0:\n return []\n r = rows[0]\n for idx,(_,name) in enumerate(tele_util.typ3s.items()):\n if r[idx] > 0:\n chart2.append([name, r[idx]])\n return chart2\ndef getChart3(data):\n sql = '''\n select nme, sum(len)\n from msglog l left join user_name u on u.user_id=l.user_id\n where l.chat_id=%(chat_id)s and l.date between %(start)s and %(end)s\n group by chat_id,nme;'''\n chart3=[]\n for r in tele_util.readSQL(sql, data=data):\n chart3.append([r[0], r[1]])\n return chart3\n\ndef getLinedata(data, users):\n userlinedata={}\n for u in users:\n d={}\n for i in range(0,24):\n d['%02d' % i]=0\n userlinedata[u['name']]=d\n sql = '''\n select u.nme, SUBSTRING(time from 1 for 2) as h, count(*) as c\n from msglog l left join user_name u on u.user_id=l.user_id\n where l.chat_id=%(chat_id)s and l.date between %(start)s and %(end)s\n group by l.chat_id, l.user_id, SUBSTRING(time from 1 for 2) order by l.user_id;\n '''\n for row in tele_util.readSQL(sql, data=data):\n nme, h, cnt = row\n userlinedata.get(nme, {})[h] = cnt\n\n linedata = []\n for _,t in userlinedata.items():\n line = []\n for _, val in t.items():\n line.append(val)\n linedata.append(line)\n return linedata\n","repo_name":"swa9bot/paw","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39082637683","text":"import tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist=input_data.read_data_sets(\"/tmp/data/\",one_hot=True)\r\nhidden_nodes_1=1000\r\nhidden_nodes_2=1000\r\nhidden_nodes_3=1000\r\nnumber_classes=10\r\nbatch=100\r\nX=tf.placeholder('float',[None,784])\r\nY=tf.placeholder('float')\r\ndef network_model(data):\r\n layer_1 = {'weight':tf.Variable(tf.random_normal([784, hidden_nodes_1])),\r\n 'bias':tf.Variable(tf.random_normal([hidden_nodes_1]))}\r\n\r\n layer_2 = {'weight':tf.Variable(tf.random_normal([hidden_nodes_1,hidden_nodes_2])),\r\n 'bias':tf.Variable(tf.random_normal([hidden_nodes_2]))}\r\n\r\n layer_3 = {'weight':tf.Variable(tf.random_normal([hidden_nodes_2,hidden_nodes_3])),\r\n 'bias':tf.Variable(tf.random_normal([hidden_nodes_3]))}\r\n\r\n output_layer = {'weight':tf.Variable(tf.random_normal([hidden_nodes_3, number_classes])),\r\n 'bias':tf.Variable(tf.random_normal([number_classes]))}\r\n \r\n l1 = tf.add(tf.matmul(data,layer_1['weight']), layer_1['bias'])\r\n l1 = tf.nn.relu(l1)\r\n\r\n l2 = tf.add(tf.matmul(l1,layer_2['weight']), layer_2['bias'])\r\n l2 = tf.nn.relu(l2)\r\n\r\n l3 = tf.add(tf.matmul(l2,layer_3['weight']), layer_3['bias'])\r\n l3 = tf.nn.relu(l3)\r\n\r\n output = tf.matmul(l3,output_layer['weight']) + output_layer['bias']\r\n\r\n return output\r\n\r\ndef train(x):\r\n pred=network_model(x)\r\n cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=Y))\r\n optimizer=tf.train.AdamOptimizer().minimize(cost)\r\n n_epochs=50\r\n with tf.Session() as sess:\r\n sess.run(tf.global_variables_initializer())\r\n for epoch in range(n_epochs):\r\n loss=0\r\n for _ in range(int(mnist.train.num_examples/batch)):\r\n epoch_x,epoch_y=mnist.train.next_batch(batch)\r\n _,c=sess.run([optimizer,cost],feed_dict={X:epoch_x, Y:epoch_y})\r\n loss+=c\r\n print('Epoch',epoch,'loss',loss)\r\n correct = tf.equal(tf.argmax(pred, 1), tf.argmax(Y, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n print('Accuracy:',accuracy.eval({X:mnist.test.images, Y:mnist.test.labels}))\r\ntrain(X)","repo_name":"srinidhi151/Book","sub_path":"Part 3/Chapter 14/Tensorflow_with_MNIST.py","file_name":"Tensorflow_with_MNIST.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"6721335118","text":"from unittest import mock\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nfrom wfa_cardinality_estimation_evaluation_framework.common.analysis import relative_error\nimport wfa_cardinality_estimation_evaluation_framework.common.random\nfrom wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import ExactMultiSet\nfrom wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import LosslessEstimator\nfrom wfa_cardinality_estimation_evaluation_framework.simulations.frequency_set_generator import HomogeneousPmfMultiSetGenerator\nfrom wfa_cardinality_estimation_evaluation_framework.simulations.frequency_set_generator import HomogeneousMultiSetGenerator\nfrom wfa_cardinality_estimation_evaluation_framework.simulations.frequency_set_generator import HeterogeneousMultiSetGenerator\nfrom wfa_cardinality_estimation_evaluation_framework.simulations.frequency_set_generator import PublisherConstantFrequencySetGenerator\n\nclass FrequencySetGeneratorTest(parameterized.TestCase):\n\n def test_homogeneous_pmf_multiset_generator_single_set(self):\n pmfgen = HomogeneousPmfMultiSetGenerator(\n 100, [2], [[1]], np.random.RandomState(1))\n hists = []\n for s in pmfgen:\n e = ExactMultiSet()\n e.add_ids(s)\n hists.append(LosslessEstimator()([e]))\n self.assertLen(hists, 1)\n self.assertEqual(hists[0], [2])\n\n def test_homogeneous_pmf_multiset_generator_multiple_sets(self):\n pmfgen = HomogeneousPmfMultiSetGenerator(\n\n 100, [2,1,2], [[1], [0,1], [0,0,1]], np.random.RandomState(1))\n hists = []\n for s in pmfgen:\n e = ExactMultiSet()\n e.add_ids(s)\n hists.append(LosslessEstimator()([e]))\n self.assertLen(hists, 3)\n self.assertEqual(hists[0], [2])\n self.assertEqual(hists[1], [1,1])\n self.assertEqual(hists[2], [2,2,2])\n\n def test_truncated_poisson_pmf(self):\n h = HomogeneousMultiSetGenerator(10, [1], [1], np.random.RandomState(1))\n e = np.exp(1)\n self.assertEqual(h._truncated_poisson_pmf(1, 1), [1])\n pmf1 = h._truncated_poisson_pmf(1,4)\n self.assertLen(pmf1, 4)\n self.assertAlmostEqual(pmf1[0], 0.3678794)\n self.assertAlmostEqual(pmf1[1], 0.3678794)\n self.assertAlmostEqual(pmf1[2], 0.1839397)\n self.assertAlmostEqual(pmf1[3], 0.0803014)\n pmf2 = h._truncated_poisson_pmf(2,3)\n self.assertLen(pmf2, 3)\n self.assertAlmostEqual(pmf2[0], 0.1353353)\n self.assertAlmostEqual(pmf2[1], 0.2706706)\n self.assertAlmostEqual(pmf2[2], 0.5939942)\n \n @parameterized.parameters((100, [1, 2], (5,1)), (3, [1,], (1,)))\n def test_homogeneous_multiset_generator_freq_cap(\n self, freq_cap, set_sizes, freq_rates):\n gen = HomogeneousMultiSetGenerator(\n universe_size=4,\n set_sizes=set_sizes,\n freq_rates=freq_rates,\n freq_cap=freq_cap,\n random_state=np.random.RandomState(1))\n output_multiset_ids_list = [multiset_ids for multiset_ids in gen]\n output_multiset_sizes = [len(set(m)) for m in output_multiset_ids_list]\n self.assertEqual(output_multiset_sizes, set_sizes)\n\n def test_homogeneous_multiset_generator_raise_unequal_length_input(self):\n # Test if raise error when set_sizes and freq_rate_list do not have\n # equal length.\n with self.assertRaises(AssertionError):\n _ = HomogeneousMultiSetGenerator(\n universe_size=4,\n set_sizes=[1, 1],\n freq_rates=[1],\n freq_cap=3,\n random_state=np.random.RandomState(1))\n\n def test_homogeneous_multiset_generator_raise_invalid_freq_rate(self):\n # Test if raise error when freq_rate is invalid.\n with self.assertRaises(AssertionError):\n _ = HomogeneousMultiSetGenerator(\n universe_size=4,\n set_sizes=[1, 1],\n freq_rates=[-1, 1],\n freq_cap=3,\n random_state=np.random.RandomState(1))\n\n @parameterized.parameters(0, -1)\n def test_homogeneous_multiset_generator_raise_invalid_freq_cap(self,\n freq_cap):\n # Test if raise error when freq_cap is invalid.\n with self.assertRaises(AssertionError):\n _ = HomogeneousMultiSetGenerator(\n universe_size=4,\n set_sizes=[1, 1],\n freq_rates=[1, 1],\n freq_cap=freq_cap,\n random_state=np.random.RandomState(1))\n\n def test_homogeneous_multiset_generator_factory_with_num_and_size(self):\n f = HomogeneousMultiSetGenerator.get_generator_factory_with_num_and_size(\n 100, 3, 5, [1, 2, 3], 10)\n gen = f(np.random.RandomState(1))\n self.assertLen(list(gen), 3)\n\n def test_homogeneous_multiset_generator_factory_with_set_size_list(self):\n f = HomogeneousMultiSetGenerator.get_generator_factory_with_set_size_list(\n 100, [1, 2, 3], [1, 2, 3], 10)\n gen = f(np.random.RandomState(1))\n self.assertLen(list(gen), 3)\n\n def test_heterogeneous_multi_set_generator_with_frequency_cap(self):\n g = HeterogeneousMultiSetGenerator(\n 1000, [100], [(1,1)], np.random.RandomState(1), freq_cap=1)\n e = ExactMultiSet()\n for ids in g:\n e.add_ids(ids)\n h = LosslessEstimator()([e])\n self.assertEqual(h, [100])\n\n def test_heterogeneous_multi_set_generator_test_impression_count(self):\n g = HeterogeneousMultiSetGenerator(\n 1000, [10], [(1,1)], np.random.RandomState(1))\n e = ExactMultiSet()\n for ids in g:\n e.add_ids(ids)\n h = LosslessEstimator()([e])\n self.assertEqual(h[0], 10)\n self.assertGreater(len(h), 1)\n\n def test_heterogeneous_multiset_generator_factory_with_num_and_size(self):\n f = HeterogeneousMultiSetGenerator.get_generator_factory_with_num_and_size(\n 100, 3, 5, [(1,2), (3,4), (5,6)], 10)\n gen = f(np.random.RandomState(1))\n self.assertLen(list(gen), 3)\n\n def test_heterogeneous_multiset_generator_factory_with_set_size_list(self):\n f = HeterogeneousMultiSetGenerator.get_generator_factory_with_set_size_list(\n 100, [1, 2, 3], [(1,2), (3,4), (5,6)], 10)\n gen = f(np.random.RandomState(1))\n self.assertLen(list(gen), 3)\n\n def test_publisher_constant_frequency_set_generator(self):\n gen = PublisherConstantFrequencySetGenerator(\n 100, [1, 2, 3], 3, np.random.RandomState(1))\n hists = []\n for s in gen:\n e = ExactMultiSet()\n e.add_ids(s)\n hists.append(LosslessEstimator()([e]))\n self.assertLen(hists, 3)\n self.assertEqual(hists[0], [1,1,1])\n self.assertEqual(hists[1], [2,2,2])\n self.assertEqual(hists[2], [3,3,3])\n\n def test_publisher_constant_frequency_factory_with_num_and_size(self):\n f = PublisherConstantFrequencySetGenerator.get_generator_factory_with_num_and_size(\n 100, 3, 3, 3)\n gen = f(np.random.RandomState(1))\n self.assertLen(list(gen), 3)\n\n def test_publisher_constant_frequency_factory_with_set_size_list(self):\n f = PublisherConstantFrequencySetGenerator.get_generator_factory_with_set_size_list(\n 100, [1, 2, 3], 3)\n gen = f(np.random.RandomState(1))\n self.assertLen(list(gen), 3)\n\n \nif __name__ == '__main__':\n absltest.main()\n","repo_name":"world-federation-of-advertisers/cardinality_estimation_evaluation_framework","sub_path":"src/simulations/tests/frequency_set_generator_test.py","file_name":"frequency_set_generator_test.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"61"} +{"seq_id":"20871953687","text":"from django.db.models.query import QuerySet\nfrom .models import Project, QuestionIterTwo, Questions, Answers, Question, FinalQuestion\nfrom django.forms import ModelForm, fields, TextInput, DateInput, widgets, Textarea, Select, ClearableFileInput, ModelMultipleChoiceField, CheckboxSelectMultiple\nfrom django.contrib.auth.models import User\n\nclass CustomMMCF(ModelMultipleChoiceField):\n def label_from_instance(self, user: User):\n return \"%s\" % user.get_full_name\n\nclass CreateProjectForm(ModelForm):\n # def __init__(self, *args, **kwargs):\n # \"\"\" Grants access to the request object so that only members of the current user\n # are given as options\"\"\"\n\n # self.request = kwargs.pop('request')\n # super(CreateProjectForm, self).__init__(*args, **kwargs)\n # self.fields['members'].queryset = User.objects.filter(\n # user=self.request.user.groups.all()[1])\n\n class Meta:\n model = Project\n members = CustomMMCF(queryset=User.objects.all(), widget=CheckboxSelectMultiple)\n fields = ['title', 'info', 'image' , 'members' , 'date']\n\n widgets = {\n \"title\": TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Название'\n }),\n \"info\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Информация о проекте'\n }),\n \"image\": ClearableFileInput(),\n \"date\": DateInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Дата создания'\n })\n }\n\nclass CreateQuestionsForm(ModelForm):\n class Meta:\n model = Questions\n fields = ['project', 'expert', 'title', 'question1', 'question2', 'question3', 'question4', 'question5']\n\n widgets = {\n \"project\": Select(attrs={\n 'class': 'form-control',\n 'placeholder': 'Проект'\n }),\n \"expert\": Select(attrs={\n 'class': 'form-control',\n 'placeholder': 'Автор вопросов'\n }),\n \"title\": TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Название'\n }),\n \"question1\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Вопрос 1'\n }),\n \"question2\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Вопрос 2'\n }),\n \"question3\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Вопрос 3'\n }),\n \"question4\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Вопрос 4'\n }),\n \"question5\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Вопрос 5'\n }),\n }\n\nclass CreateAnswersForm(ModelForm):\n class Meta:\n model = Answers\n fields = ['project', 'questions', 'expert', 'comment1', 'comment2', 'comment3',\n 'comment4', 'comment5']\n\n widgets = {\n \"project\": Select(attrs={\n 'class': 'form-control',\n 'placeholder': 'Проект'\n }),\n \"questions\": Select(attrs={\n 'class': 'form-control',\n 'placeholder': 'Список вопросов'\n }),\n \"expert\": Select(attrs={\n 'class': 'form-control',\n 'placeholder': 'Автор вопросов'\n }),\n \"comment1\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Комментарий'\n }),\n \"comment2\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Комментарий',\n 'value': ''\n }),\n \"comment3\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Комментарий',\n 'value': ''\n }),\n \"comment4\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Комментарий',\n 'value': ''\n }),\n \"comment5\": Textarea(attrs={\n 'class': 'form-control',\n 'placeholder': 'Комментарий',\n 'value': ''\n }),\n }\n\nclass CreteQuestionsIterTwo(ModelForm):\n class Meta:\n model = QuestionIterTwo\n questions = ModelMultipleChoiceField(queryset=Question.objects.all(), widget=CheckboxSelectMultiple)\n fields = ['title', 'questions']\n\n widgets = {\n \"title\": TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Название'\n }),\n }\n\nclass CreteQuestionsIterThree(ModelForm):\n\n def __init__(self, *args, **kwargs):\n \"\"\" Grants access to the request object so that only members of the current user\n are given as options\"\"\"\n\n self.request = kwargs.pop('request')\n super(CreteQuestionsIterThree, self).__init__(*args, **kwargs)\n self.fields['questions'].queryset = Question.objects.filter(\n iter=2)\n\n class Meta:\n model = QuestionIterTwo\n questions = ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple)\n fields = ['title', 'questions']\n\n widgets = {\n \"title\": TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Название'\n }),\n }\n\nclass CreteQuestionsIterFour(ModelForm):\n\n def __init__(self, *args, **kwargs):\n \"\"\" Grants access to the request object so that only members of the current user\n are given as options\"\"\"\n\n self.request = kwargs.pop('request')\n super(CreteQuestionsIterFour, self).__init__(*args, **kwargs)\n self.fields['questions'].queryset = Question.objects.filter(\n iter=3)\n\n class Meta:\n model = QuestionIterTwo\n questions = ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple)\n fields = ['title', 'questions']\n\n widgets = {\n \"title\": TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Название'\n }),\n }\n\nclass CreateFinalQuestion(ModelForm):\n class Meta:\n model = FinalQuestion\n fields = ['project', 'title']\n\n widgets = {\n \"project\": Select(attrs={\n 'class': 'form-control',\n 'placeholder': 'Проект'\n }),\n \"title\": TextInput(attrs={\n 'class': 'form-control',\n 'placeholder': 'Название'\n }),\n }","repo_name":"MegakoMar/MakeDecisions","sub_path":"projects/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37123327821","text":"from pathlib import Path\n\nfrom rasa.shared.utils.cli import print_success\nfrom rasa.shared.nlu.training_data.formats import NLGMarkdownReader\nfrom rasa.shared.nlu.training_data.formats.rasa_yaml import RasaYAMLWriter\nfrom rasa.utils.converter import TrainingDataConverter\n\n\nclass NLGMarkdownToYamlConverter(TrainingDataConverter):\n @classmethod\n def filter(cls, source_path: Path) -> bool:\n \"\"\"Checks if the given training data file contains NLG data in `Markdown` format\n and can be converted to `YAML`.\n\n Args:\n source_path: Path to the training data file.\n\n Returns:\n `True` if the given file can be converted, `False` otherwise\n \"\"\"\n return NLGMarkdownReader.is_markdown_nlg_file(source_path)\n\n @classmethod\n async def convert_and_write(cls, source_path: Path, output_path: Path) -> None:\n \"\"\"Converts the given training data file and saves it to the output directory.\n\n Args:\n source_path: Path to the training data file.\n output_path: Path to the output directory.\n \"\"\"\n reader = NLGMarkdownReader()\n writer = RasaYAMLWriter()\n\n output_nlg_path = cls.generate_path_for_converted_training_data_file(\n source_path, output_path\n )\n\n yaml_training_data = reader.read(source_path)\n writer.dump(output_nlg_path, yaml_training_data)\n\n print_success(f\"Converted NLG file: '{source_path}' >> '{output_nlg_path}'.\")\n","repo_name":"jugnugs/rasa-jp-tokenizer","sub_path":"rasa/nlu/training_data/converters/nlg_markdown_to_yaml_converter.py","file_name":"nlg_markdown_to_yaml_converter.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"70857056513","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom jvcom.ml_ops.model import predict , make_trainer\nfrom jvcom.ml_ops.registry import load_local_model\nfrom google.cloud import storage\nimport os\nfrom zipfile import ZipFile\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"], # Allows all origins\n allow_credentials=True,\n allow_methods=[\"*\"], # Allows all methods\n allow_headers=[\"*\"], # Allows all headers\n)\n\n# Load model.zip from Google Storage\n\nclient = storage.Client()\nbucket = client.bucket(os.environ.get('BUCKET_NAME'))\nblob = bucket.blob(\"model/\" + f\"model.zip\")\nblob.download_to_filename(f\"model.zip\")\nprint(blob)\n\nwith ZipFile(\"model.zip\", 'r') as zObject:\n\n # Extracting all the members of the zip into a specific location.\n zObject.extractall(path=\"model/\")\n\nTRAINER = make_trainer(load_local_model())\n\n# http://0.0.0.0:8000/pred?text=\n@app.get(\"/pred\")\ndef pred(text: str):\n\n y_pred = predict(text,TRAINER).predictions[0]\n response = {'proba': y_pred.tolist(),\n 'type': y_pred.tolist().index(max(y_pred.tolist()))}\n print(response)\n return response\n\n\n@app.get(\"/\")\ndef root():\n return {'greeting': 'Hello'}\n","repo_name":"enzoBouflet/Sexism_jv_forum","sub_path":"jvcom/api/fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39436154792","text":"class Solution:\r\n def isSubsequence(self, s: str, t: str) -> bool:\r\n j = 0\r\n n = len(s)\r\n for i in range(len(t)):\r\n if j float:\n if len(x) != len(y):\n raise ValueError('x and y must be the same length')\n \n numerator = 0\n n = len(x) - 1\n x_hat = np.mean(x)\n y_hat = np.mean(y)\n for i in range(0, len(x)):\n numerator += (x[i] - x_hat) * (y[i] - y_hat)\n return round((numerator / n), 2) \n\ndef standard_deviation(data: list) -> float:\n numerator = 0\n n = len(data) - 1\n x_hat = np.mean(data)\n for i in range(0, len(data)):\n numerator += (data[i] - x_hat) ** 2\n variance = round((numerator / n), 2)\n print(np.sqrt(variance))\n return np.sqrt(variance)\n\ndef pearson_correlation(x: list, y: list) -> float:\n cov = covariance(x=x, y=y)\n std_x = standard_deviation(data=x)\n std_y = standard_deviation(data=y)\n corr = cov / (std_x * std_y)\n return round(corr, 2)\n\n\n# Question 1\nx = [1.7, 2.4, .1, .5, -2.5, 6.6, 1.5, .2, .1, 2.1, 3.1, -1.1]\ny = [-.1, -.3, 3.2, 2.5, 5.2, -1.3, .2, 1.8, 2.2, .3, .1, 1.9]\n\nanswer1a = covariance(x, y)\nprint(answer1a) # -3.67\nanswer1b = 'Monthly returns between the two stocks are negatively correlated'\n\n# Question 2\nanswer2 = pearson_correlation(x=x, y=y)\nprint(answer2)","repo_name":"atfb10/Math-for-Data-Science","sub_path":"problem_sets/JointDistributions/hw.py","file_name":"hw.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24689015332","text":"import os, sys\n\n\ndef conv_to_int(x):\n try:\n x = float(x)\n except ValueError:\n print(\"Error: Unrecognizable number. Exiting\")\n sys.exit()\n return x\n\n\nops_connues = {\n '+': lambda x, y: x + y,\n '-': lambda x, y: x - y,\n '*': lambda x, y: x * y,\n '/': lambda x, y: x / y,\n }\n\nopnd1 = input(\"Entrer l'operande 1:\")\nopnd1 = conv_to_int(opnd1)\n\noptn = input(\"Entrez l'operation '+', '-', '*', '/' :\")\nif optn not in ops_connues:\n print(\"Error: Unrecognizable operation. Exiting\")\n sys.exit()\n\nopnd2 = input(\"Entrez l'operande 2:\")\nopnd2 = conv_to_int(opnd2)\nif (optn == '/') and (opnd2==0):\n print(\"Error: Cannot divide by 0. Exiting\")\n sys.exit()\n\nresult = ops_connues[optn](opnd1, opnd2)\nprint()\nprint(\"Result: \", result)\n","repo_name":"iurantr/python_exercises","sub_path":"calculatrice.py","file_name":"calculatrice.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9415929576","text":"import errno\n\nimport pytest\nfrom pr2test.context_manager import make_test_matrix\nfrom pr2test.marks import require_root\nfrom pr2test.tools import qdisc_exists\n\nfrom pyroute2 import NetlinkError\n\npytestmark = [require_root()]\ntest_matrix = make_test_matrix(targets=['local', 'netns'])\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_pfifo(context):\n index, ifname = context.default_interface\n context.ipr.tc('add', 'pfifo', index=index, limit=700)\n assert qdisc_exists(context.netns, 'pfifo', ifname=ifname, limit=700)\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_pfifo_fast(context):\n index, ifname = context.default_interface\n context.ipr.tc('add', 'pfifo_fast', index=index, handle=0)\n ret = qdisc_exists(context.netns, 'pfifo_fast', ifname=ifname)[0]\n assert ret.get_attr('TCA_OPTIONS')['priomap']\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_plug(context):\n index, ifname = context.default_interface\n context.ipr.tc('add', 'plug', index=index, limit=13107)\n assert qdisc_exists(context.netns, 'plug', ifname=ifname)\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_blackhole(context):\n index, ifname = context.default_interface\n context.ipr.tc('add', 'blackhole', index=index)\n assert qdisc_exists(context.netns, 'blackhole', ifname=ifname)\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_codel(context):\n index, ifname = context.default_interface\n context.ipr.tc(\n 'add',\n 'codel',\n index=index,\n handle='1:0',\n cdl_interval='40ms',\n cdl_target='2ms',\n cdl_limit=5000,\n cdl_ecn=1,\n )\n assert qdisc_exists(\n context.netns, 'codel', ifname=ifname, codel_ecn=1, codel_limit=5000\n )\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_sfq(context):\n index, ifname = context.default_interface\n context.ipr.tc('add', 'sfq', index=index, handle=0, perturb=10)\n assert qdisc_exists(context.netns, 'sfq', ifname=ifname, perturb_period=10)\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_tbf(context):\n index, ifname = context.default_interface\n context.ipr.tc(\n 'add',\n 'tbf',\n index=index,\n handle=0,\n rate='220kbit',\n latency='50ms',\n burst=1540,\n )\n opts = qdisc_exists(context.netns, 'tbf', ifname=ifname)[0].get_nested(\n 'TCA_OPTIONS', 'TCA_TBF_PARMS'\n )\n assert opts['rate'] == 27500\n\n\n@pytest.mark.parametrize('context', test_matrix, indirect=True)\ndef test_choke(context):\n index, ifname = context.default_interface\n try:\n context.ipr.tc(\n 'add', 'choke', index=index, limit=5500, bandwith=3000, ecn=True\n )\n except NetlinkError as e:\n if e.code == errno.ENOENT:\n pytest.skip('qdisc not supported: choke')\n raise\n opts = qdisc_exists(context.netns, 'choke', ifname=ifname)[0].get_nested(\n 'TCA_OPTIONS', 'TCA_CHOKE_PARMS'\n )\n assert opts['limit'] == 5500\n assert opts['qth_max'] == 1375\n assert opts['qth_min'] == 458\n","repo_name":"svinota/pyroute2","sub_path":"tests/test_linux/test_tc/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"} +{"seq_id":"32351100917","text":"'''\r\n Napisz program, który pozwala na rekurencyjne obliczanie dowolnej potęgi (N) wprowadzonej przez użytkownika liczby (M). Wykorzystując funkcję ​time.sleep() zatrzymaj wykonywanie programu na \r\n M sekund, jeśli liczba N jest liczbą pierwszą – sprawdź wykorzystują rekurencję.\r\n'''\r\nimport time\r\n\r\ndef isPrime(number,divisor=2):\r\n if number <= 2:\r\n return number==2;\r\n if number % divisor == 0:\r\n return False\r\n if divisor * divisor > number:\r\n return True\r\n return isPrime(number, divisor + 1)\r\n\r\ndef recursivePower(number,power):\r\n if power==0:\r\n return 1\r\n if power==1:\r\n return number\r\n \r\n result=number\r\n result*=recursivePower(number,power-1)\r\n return result\r\n\r\na = int(input('number '))\r\nb = int(input('power '))\r\n\r\nif(isPrime(b)):\r\n print(f'waiting {a} seconds...')\r\n time.sleep(a)\r\n print('done')\r\n\r\nprint(f'{a}^{b} = {recursivePower(a,b)}')","repo_name":"Pawel-Kica/wdi","sub_path":"python/lab7/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15653585093","text":"\n#receive data as single str, use \"split\" to split the elements, use list comprehension to turn them into nums\n\nnumbers = [int(el) for el in input().split(\", \")]\n\n# run a for loop to locate the 0's and move them to back of list\n\nfor num in numbers:\n \n if num == 0:\n numbers.remove(num)\n numbers.append(num)\n \n# print result\nprint(numbers)\n","repo_name":"geodimitrov/Python-Fundamentals-SoftUni","sub_path":"Lists/Basics/More Exercises/01. zeros_to_back.py","file_name":"01. zeros_to_back.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11267478157","text":"import math\r\nimport random\r\nfrom numpy.random import randint\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\nlstx = randint(1,100,[12])\r\nlsty = randint(1,100,[12])\r\nprint(lstx,lsty)\r\nx = np.array(lstx)\r\nx = np.sort(x)\r\ny = np.array(lsty)\r\ny = np.sort(y)\r\nradian = list()\r\ndistance = list()\r\nangle = list()\r\nplotx = list()\r\nploty = list()\r\narx = np.array([-0.3,-0.2,-0.1,0,0.1,0.2,0.3])\r\nary = np.array([0,0,0,0,0,0,0])\r\n\r\nfor id in range(len(x)):\r\n r = math.sqrt(pow(x[id],2)+pow(y[id],2))\r\n distance.append(r+0.1)\r\n\r\nfor id in range(len(x)):\r\n r = math.atan2(y[id],x[id])\r\n radian.append(r+0.2)\r\n d = math.degrees(r)\r\n angle.append(d)\r\n\r\n\r\narz = np.array(distance)\r\narradian = np.array(radian)\r\nangl = np.array(angle)\r\nfor id in range(len(x)):\r\n x1 = arz[id]*math.cos(arradian[id])\r\n y1 = arz[id]*math.sin(arradian[id])\r\n plotx.append((x1))\r\n ploty.append((y1))\r\n\r\npx = np.array(plotx)\r\npy = np.array(ploty)\r\n\r\n\r\n\r\nprint(px,py)\r\nprint(angle)\r\nplt.subplot(311)\r\nplt.scatter(x,y,c = 'red')\r\nplt.ylabel('Y axis') \r\nplt.xlabel('X axis') \r\nplt.scatter(arx,ary,c = 'blue')\r\nplt.scatter(px,py,c = 'green')\r\nplt.show()\r\nplt.subplot(312)\r\n\r\nplt.title('Error Detection Graph')\r\nplt.plot(x,y)\r\nplt.plot(px,py)\r\nplt.scatter(px,py,marker = 'x',c = 'green')\r\nplt.ylabel('Y axis') \r\nplt.xlabel('X axis') \r\nplt.show()\r\n\r\nfor id in range(len(x)):\r\n np.random.seed(0)\r\n\r\n doa = np.array([angl[id]]) # Direction of arrival\r\n N = 200 # Snapshots\r\n w = np.array([np.pi / 4]) # Frequency\r\n M = 10 # Number of array elements\r\n P = len(w) # The number of signal\r\n lambd = 150 # Wavelength\r\n d = lambd / 2 # Element spacing\r\n snr = 20 # SNA\r\n\r\n D = np.zeros((M, P), dtype=np.complex128) # To create a matrix with P row and M column\r\n for k in range(P):\r\n D[:, k] = np.exp(-1j * 2 * np.pi * d * np.sin(doa[k]) / lambd * np.arange(M))\r\n\r\n xx = 2 * np.exp(1j * np.outer(w, np.arange(N))) # Simulate signal\r\n x = D @ xx\r\n x += np.random.randn(*x.shape) * np.sqrt(0.5 * 10 ** (-snr / 10)) # Insert Gaussian white noise\r\n\r\n R = x @ x.T.conj() / N # Data covariance matrix\r\n N_, V = np.linalg.eig(R) # Find the eigenvalues and eigenvectors of R\r\n NN = V[:, :M - P] # Estimate noise subspace\r\n\r\n theta = np.arange(0, 90.5, 0.2) # Peak search\r\n Pmusic = np.zeros_like(theta)\r\n for ii in range(len(theta)):\r\n SS = np.exp(-1j * 2 * np.pi * d * np.sin(theta[ii] / 180 * np.pi) / lambd * np.arange(M))\r\n PP = abs(SS @ NN @ NN.conj().T @ SS.conj())\r\n Pmusic[ii] = 1 / PP\r\n\r\n Pmusic = 10 * np.log10(Pmusic / Pmusic.max()) # Spatial spectrum function\r\n plt.subplot(313)\r\n plt.plot(theta, Pmusic, '-k')\r\n plt.xlabel('angle \\u03b8/degree')\r\n plt.ylabel('spectrum function P(\\u03b8) /dB')\r\n plt.title('DOA estimation based on MUSIC algorithm')\r\n plt.grid(True)\r\n plt.show()","repo_name":"Sahithkumar-31/Project-Location_of_mobile_nodes","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33511370548","text":"class Book:\n Information=None\n def __init__(self, ID, Name): #constructor for ID and Name\n self.ID=ID\n self.Name=Name\n\n def addBookDescription(self, Information): #method for addin information\n self.Information=Information\n\n def Description(self): #Method to print all information about the\n print('The ID is ',self.ID) #book\n print('The Name of the book is :',self.Name)\n if(self.Information != None):\n print('The information are:')\n for i in self.Information:\n print(i)\n\n\nb1=Book('32','The Book') #creating an object for the class\nb1.addBookDescription(['Great book on cultural Facts','Best release']) #adding information\nb1.Description() #calling method to print all the information \n","repo_name":"Neeraj-Palliyali/chegg_python","sub_path":"book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34693324326","text":"from django.db import models\nimport os\nfrom django.utils.text import slugify\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext as _\nfrom django.contrib.postgres.fields import JSONField\n\n\nclass UserProfile(models.Model):\n def image_path(self, filename):\n extension = os.path.splitext(filename)[1][1:]\n file_name = os.path.splitext(filename)[0]\n url = \"Users/id-%s/profile/%s.%s\" % (self.user.id, slugify(str(file_name)), extension)\n return url\n\n user = models.OneToOneField(User)\n image = models.ImageField(upload_to=image_path, null=True, blank=True, verbose_name=_(\"Foto de perfil\"))\n\n def __str__(self):\n return self.user.get_full_name()\n\n class Meta:\n verbose_name = _(\"Perfil de usuario\")\n verbose_name_plural = _(\"Perfil de usuarios\")\n\n\nclass FileTemp(models.Model):\n\n def file_path(self, filename):\n extension = os.path.splitext(filename)[1][1:]\n file_name = os.path.splitext(filename)[0]\n url = \"tmp/%s.%s\" % (slugify(str(file_name)), extension)\n return url\n\n file = models.FileField(upload_to=file_path)\n\n\nclass ExcelData(models.Model):\n username = models.ForeignKey(User)\n excel_name = models.CharField(max_length=500)\n # excel_sheet = models.CharField(max_length=500)\n data = JSONField()\n\n\nclass GraphExcelData(models.Model):\n username = models.ForeignKey(User)\n excel_name = models.CharField(max_length=500)\n graph_key = models.IntegerField(default=1)\n\n\nclass DataAnalytic(models.Model):\n\n def file_path(self, filename):\n extension = os.path.splitext(filename)[1][1:]\n file_name = os.path.splitext(filename)[0]\n url = \"tmp/%s.%s\" % (slugify(str(file_name)), extension)\n return url\n\n user = models.ForeignKey(User)\n file = models.FileField(upload_to=file_path)\n name = models.CharField(null=True, blank=True, max_length=10000)\n json = JSONField(null=True, blank=True)\n timestamp = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name if self.name else str(self.timestamp)\n\n def save(self, *args, **kwargs):\n if self.file and not self.id:\n self.name = self.file.name\n super(DataAnalytic, self).save(*args, **kwargs)\n\n\nclass Chart(models.Model):\n\n __charts = (\n (1, 'Line chart'),\n (2, 'Bar chart'),\n (3, 'Column chart'),\n (4, 'Pie chart'),\n (5, 'Spline chart'),\n (6, 'Basic Area Chart'),\n (7, 'Area Spline chart'),\n (8, 'Waterfall chart'),\n (9, 'Polygon chart'),\n (10, 'Scatter Chart')\n )\n\n type_key = models.PositiveIntegerField(_('Tipo'), choices=__charts)\n # name = models.CharField(verbose_name=_('Nombre'), max_length=50)\n position_x = models.CharField(verbose_name=_('Posición X'), max_length=100, null=True, blank=True)\n position_y = models.CharField(verbose_name=_('Posición Y'), max_length=100, null=True, blank=True)\n label = models.CharField(verbose_name=_('Posición'), max_length=100, null=True, blank=True)\n data_file = models.ForeignKey(DataAnalytic)\n #\n # def __str__(self):\n # return self.\n\n","repo_name":"CarlosUicab/grafistica","sub_path":"Grafistica/apps/viewer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1904231073","text":"import unittest\nfrom models import news\nNews = news.News\n\nclass NewsTest(unittest.TestCase):\n '''\n Tesk Class to test the behaviour\n '''\n def setUp(self):\n '''\n Set up\n '''\n self.new_news = News(1234, 'Citizen', 'Politics in kenya')\n\n def test_instance(self):\n self.assertTrue(isinstance(self.new_news, News))\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"John-Kimani/hello-flask","sub_path":"app/news__test.py","file_name":"news__test.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23989053124","text":"\n\ndef calc(op, line):\n line = [int(a) for a in line]\n\n if(op == 'S'): # soma\n return 'T ' + str(sum(line))\n\n elif(op == 'R'): # resto\n return 'T ' + str(2*line[0] - sum(line))\n\n else:\n return 'E'\n\n\n\nlines = int(input())\n\nfor i in range(lines):\n line = input().split()\n print(calc(line[0], line[1:]))\n\n","repo_name":"gmelodie/College","sub_path":"AdvAlg2/io/ioputs3.py","file_name":"ioputs3.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6941562799","text":"from flask_restplus import Resource\n\nfrom endpoints.Complaint import db\n\nclass Complaint(Resource):\n def get(self,complaint_id):\n \"\"\"\n Get the complaint from the database\n :param complaint_id: Id of the to get complaint\n :return:\n \"\"\"\n complaint = db.get(complaint_id)\n item = {\n \"_id\": str(complaint['_id']),\n \"type\": complaint['type'],\n \"description\": complaint['description'],\n \"location\": complaint['location'],\n \"resolved\": complaint['resolved'],\n \"timestamp\": complaint['timestamp'].strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"sender-info\" : complaint['sender']\n }\n return item\n","repo_name":"StefanEvanghelides/Net-Computing","sub_path":"Server/ComplaintsAPI/endpoints/Complaint/complaint.py","file_name":"complaint.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8864721809","text":"from sklearn.linear_model import Ridge\nfrom sklearn.model_selection import train_test_split\nfrom xgboost import XGBRegressor\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom joblib import dump\nimport pandas as pd\nimport numpy as np\nimport logging\n\nlogging.basicConfig(\n format=\"%(levelname)s: %(message)s\", \n level=logging.INFO\n)\n\n\nclass Model:\n model = None\n X_train = None\n X_test = None\n y_train = None\n y_test = None\n\n def train_model(self):\n df: pd.DataFrame = pd.DataFrame({\n \"a\": np.random.rand(500) * 10,\n \"b\": np.random.rand(500) * 5,\n \"c\": np.random.rand(500) * 2,\n \"y\": np.random.rand(500) * 50\n })\n\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(\n df[[col for col in df.columns if col != \"y\"]], df.y\n )\n\n self.model = XGBRegressor()\n\n self.model.fit(self.X_train, self.y_train)\n\n def score_model(self):\n predictions = self.model.predict(self.X_test)\n score = mean_squared_error(self.y_test, predictions)\n logging.info(f\"Model score: {score:.2}\")\n\n def save_model(self):\n dump(self.model, \"./model.joblib\")\n\n\ndef main():\n model = Model()\n model.train_model()\n model.score_model()\n model.save_model()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"jtbaker/cookiecutter_fullstack","sub_path":"{{cookiecutter.project_slug}}/task_queue/tests/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17560391466","text":"import matplotlib.pyplot as plt\nimport glob \nfrom scipy.fft import fft, fftfreq, ifft\nimport numpy as np\nfrom scipy.signal import savgol_filter\nfrom scipy.interpolate import interp1d\nimport math\nfrom statistics import mean\nimport copy\n\n# for reading data\ndef get_data(filename):\n with open(filename, 'r') as f:\n raw_data = f.read().splitlines()[1:]\n data = [float(item) for item in raw_data]\n return data\n\ndef get_freq_names(path, patient):\n name1 = path+'freq/'+patient+'_so.csv'\n name2 = path+'freq/'+patient+'_do.csv'\n name3 = path+'freq/'+patient+'_mi.csv'\n return name1, name2, name3\n\ndef get_pres_names(path, patient):\n name1 = path+'pres/'+patient+'_so.csv'\n name2 = path+'pres/'+patient+'_do.csv'\n name3 = path+'pres/'+patient+'_mi.csv'\n return name1, name2, name3\n\ndef get_all_data(path, patient):\n pname1, pname2, pname3 = get_pres_names(path, patient)\n fname1, fname2, fname3 = get_freq_names(path, patient)\n\n p1 = get_data(pname1)\n p2 = get_data(pname2)\n p3 = get_data(pname3)\n\n f1 = get_data(fname1)\n f2 = get_data(fname2)\n f3 = get_data(fname3)\n \n plen1 = len(p1)\n flen1 = len(f1)\n plen2 = len(p2)\n flen2 = len(f2)\n plen3 = len(p3)\n flen3 = len(f3)\n\n if plen1 None:\n self.nums = nums\n \n def removeDuplicate(self, nums) -> int:\n i = 0\n for j in range(1,len(nums)):\n if nums[i] != nums[j]:\n i += 1\n nums[i] = nums[j]\n \n return i+1 # always the index 1 less the length\n\n\nif __name__ == \"__main__\":\n nums = list(map(int, input(\"array : \").split()))\n\n result = solution(nums)\n k = result.removeDuplicate(nums)\n print(f\"result : \", result.nums[:k])\n","repo_name":"lonebots/python-programming","sub_path":"leet-code/arrays/4-remove-duplicates.py","file_name":"4-remove-duplicates.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28086469198","text":"from pathlib import Path\n\n\nclass RAW_PATH_CONSTANTS:\n\n S_PROJECT_PATH = str(Path(__file__).parent.parent.parent.parent.parent)\n S_LOCAL_FILE_PATH = str(Path(__file__).parent.parent.parent.parent.parent.parent) + \"/user_crawler_directory\"\n S_RAW_PATH = S_PROJECT_PATH + \"/\"\n S_DATASET_PATH = \"/parse_services/raw/crawled_classifier_websites.csv\"\n\nclass CRAWL_SETTINGS_CONSTANTS:\n\n # Allowed Extentions\n S_DOC_TYPES = [\".pdf\", \".msword\", \".document\", \".docx\", \".doc\"]\n\n # Local URL\n S_START_URL = \"https://drive.google.com/uc?export=download&id=1ZG7D2NsI-NrVyp3SDq9q4zcrgFi3jhaG\"\n\n # Total Thread Instances Allowed\n S_MAX_THREAD_COUNT_PER_INSTANCE = 30\n\n # Time Delay to Invoke New Url Requests\n S_ICRAWL_INVOKE_DELAY = 2\n S_CRAWLER_INVOKE_DELAY = 2\n S_ICRAWL_IMAGE_INVOKE_DELAY = 2\n S_TOR_NEW_CIRCUIT_INVOKE_DELAY = 300\n S_LOCAL_FILE_CRAWLER_INVOKE_DELAY = 1\n S_LOCAL_FILE_CRAWLER_INVOKE_DELAY_LONG = 5\n\n # Max Allowed Depth\n S_MAX_ALLOWED_DEPTH = 2\n S_DEFAULT_DEPTH = 0\n\n # Max URL Timeout\n S_URL_TIMEOUT = 11170\n S_HEADER_TIMEOUT = 30\n\n # Max Host Queue Size\n S_MAX_HOST_QUEUE_SIZE = 100\n S_MAX_SUBHOST_QUEUE_SIZE = 100\n\n # Max URL Size\n S_MAX_URL_SIZE = 480\n\n # Backup Time\n S_BACKUP_TIME_DELAY = 86400\n S_BACKUP_FETCH_LIMIT = 50\n\n # Min Image Content Size\n S_MIN_CONTENT_LENGTH = 50000\n\n # User Agent\n S_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; rv:68.0) Gecko/20100101 Firefox/68.0'\n\n # Crawl Catagory\n S_THREAD_CATEGORY_GENERAL = \"general\"\n S_THREAD_CATEGORY_UNKNOWN = \"unknown\"\n\n # Max Static Images\n S_STATIC_PARSER_LIST_MAX_SIZE = 10\n S_MIN_CONTENT_LENGTH = 50000\n\n","repo_name":"msmannan00/Genesis-Search","sub_path":"modules/user_data_parser/parse_instance/constants/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"18812086193","text":"import cv2\r\nimport numpy as np\r\nfrom pupil_apriltags import Detector\r\n\r\nLINE_LENGTH = 5\r\nCENTER_COLOR = (0, 255, 0)\r\nCORNER_COLOR = (255, 0, 255)\r\n\r\n### Some utility functions to simplify drawing on the camera feed\r\n# draw a crosshair\r\ndef plotPoint(image, center, color):\r\n center = (int(center[0]), int(center[1]))\r\n image = cv2.line(image,\r\n (center[0] - LINE_LENGTH, center[1]),\r\n (center[0] + LINE_LENGTH, center[1]),\r\n color,\r\n 3)\r\n image = cv2.line(image,\r\n (center[0], center[1] - LINE_LENGTH),\r\n (center[0], center[1] + LINE_LENGTH),\r\n color,\r\n 3)\r\n return image\r\n\r\n# plot a little text\r\ndef plotText(image, center, color, text):\r\n center = (int(center[0]) + 4, int(center[1]) - 4)\r\n return cv2.putText(image, str(text), center, cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, color, 3)\r\n\r\n\r\n\r\ncap=cv2.VideoCapture(1) #camera used\r\ndetector = Detector(families='tag36h11', \r\n nthreads=1,\r\n quad_decimate=1.0,\r\n quad_sigma=0.0,\r\n refine_edges=1,\r\n decode_sharpening=0.25,\r\n debug=0,\r\n ) #physical size of the apriltag\r\n\r\nintrisic = [500,500,960,540] # camera parameters, [fx, fy cx, cy]\r\ntagsize = 0.036 #physical size of printed tag, unit = meter\r\n\r\nlooping = True\r\n\r\nwhile (looping):\r\n ret, frame = cap.read()\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n tags = detector.detect(gray, estimate_tag_pose=True, camera_params=intrisic, tag_size=tagsize)\r\n \r\n if not tags:\r\n print(\"Nothing\")\r\n else:\r\n for tag in tags:\r\n print(tag.pose_R)\r\n frame = plotPoint(frame, tag.center, CENTER_COLOR)\r\n frame = plotText(frame, tag.center, CENTER_COLOR, tag.pose_R)\r\n for corner in tag.corners:\r\n frame = plotPoint(frame, corner, CORNER_COLOR)\r\n\r\n cv2.imshow('frame', frame)\r\n\r\n key = cv2.waitKey(1000) #ms\r\n\t# terminate the loop if the 'Return' key is hit\r\n if key == 13:\r\n looping = False\r\n\r\ncv2.destroyAllWindows()\r\n\r\n\r\n","repo_name":"zhangyutong0318/2.120","sub_path":"apriltagdetection.py","file_name":"apriltagdetection.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7515008793","text":"import re\nfrom ..models import Area, Trabalho, Professor, EscolhaProf\nfrom ..forms import AddJobs\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\n\n@login_required(login_url='/login')\ndef showdataform(request):\n #print (request.user)\n if request.method =='GET':\n areas = Area.objects.all()\n context = {\n 'areas':areas,\n }\n return render(request,'index/index.html',context=context)\n else:\n\n print(request.POST)\n nome = request.user\n prof = Professor.objects.filter(nome=nome).first()\n #print(prof.id)\n trabarea = request.POST.get('trabarea')\n print(trabarea)\n \n if(EscolhaProf.objects.filter(trabalho_id=trabarea)):\n print(\"oi\")\n areas = Area.objects.all()\n context = {\n 'areas':areas,\n 'msg': 'O Trabalho anterior já foi adicionado!'\n }\n return render(request,'index/index.html',context=context)\n else:\n trabalho = Trabalho.objects.filter(id=int(trabarea)).first()\n print(prof.id)\n print(\"-------------\")\n escolhaprof = EscolhaProf(professor=prof,trabalho=trabalho)\n escolhaprof.save()\n return redirect('showdataform')\n \n\n\n","repo_name":"Evelyn-Oliveira/Projeto-Django","sub_path":"afa/tcc2022/views/showdataform.py","file_name":"showdataform.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40317269731","text":"from steganography.steganography import Steganography\nfrom select_friend import select_a_friend\nfrom datetime import datetime\nfrom spy_details import *\nimport csv\n\n\ndef send_message():\n #\n friend_choice = friends[select_a_friend()].name\n original_image = raw_input(\"Enter the name of the image?\")\n output_path = 'output.jpg'\n text = raw_input(\"Enter your secret message. \")\n Steganography.encode(original_image, output_path, text)\n # Successful message after encoding\n print(\"Encryption successfully!!\")\n # the message will be stored in chat message class\n new_chat = ChatMessage(spy_n= spy.name, friend_n= friend_choice, time= datetime.now().strftime(\"%d %B %Y\"), message=text)\n\n # name of the friend along which we add message.\n chats.append(new_chat)\n print(\"your secret message is ready.\")\n with open('chats.csv', 'a') as chats_data:\n writer = csv.writer(chats_data)\n writer.writerow([new_chat.spy_n, new_chat.friend_n, new_chat.time, new_chat.message])\n\n\n\n\n\n","repo_name":"Shivanisingh05/Spychat","sub_path":"send_message.py","file_name":"send_message.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3207979982","text":"class Solution:\n def smallestValue(self, n: int) -> int:\n\n while n > self.f(n):\n n = self.f(n)\n return n\n\n def f(self, n):\n for i in range(2, n):\n if n % i == 0:\n return i + int(n // i)\n return n\n\n\nif __name__ == '__main__':\n n = 12\n solution = Solution()\n res = solution.smallestValue(n)\n print(res)","repo_name":"foreverxujiahuan/algorithm","sub_path":"数学/lc6266.py","file_name":"lc6266.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2824784102","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nimport sqlite3\n\nprint('Files with .sqlite3 extension in current dir:')\nprint(*glob.glob('*.sqlite3'), sep='\\n')\nfile_names_db = [os.path.splitext(val)[0] for val in glob.glob('*.sqlite3')]\nchosen_db = input(f'Choose from: {file_names_db} -> type one: ')\n\nif chosen_db in file_names_db:\n chosen_db = chosen_db+'.sqlite3'\nelse:\n print('Choose valid option, please')\n exit()\n\ncon = sqlite3.connect(chosen_db)\ncur = con.cursor()\ncur.execute('''\nSELECT name\nFROM sqlite_schema\nWHERE type='table';\n''')\n\nprint(f'Available tables in {chosen_db} are: ')\nfor result in cur:\n print('-', *result)\n\n\nchosen_table = input('Select table to show: ')\nprint(f'In table \"{chosen_table}\" rows are:')\n\ncur.execute(f'''\nSELECT *\nFROM {chosen_table};\n''')\nfor result in cur:\n print(*result)\n\ncon.close()\n","repo_name":"sergey-samoylov/db_query","sub_path":"db_query.py","file_name":"db_query.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16216287239","text":"def common_child(a, b):\n m = len(a)\n n = len(b)\n cell = [[0]*(n+1) for j in range(m+1)]\n print(cell)\n for i in range(1, m+1):\n for j in range(1, n+1):\n if a[i-1] == b[j-1]:\n cell[i][j] = cell[i-1][j-1] + 1\n else:\n cell[i][j] = max(cell[i-1][j], cell[i][j-1])\n print(cell)\n return cell[m][n]\n\n\ndef common_string(a, b):\n m, n = len(a), len(b)\n prev, cur = [0] * (n + 1), [0] * (n + 1)\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if a[i - 1] == b[j - 1]:\n cur[j] = 1 + prev[j - 1]\n else:\n cur[j] = max(cur[j - 1], prev[j])\n cur, prev = prev, cur\n print(cur, prev)\n return prev[n]\n\n\nif __name__ == '__main__':\n s1 = 'ABCDEF'\n s2 = 'FBDAMNB'\n common_child(s1, s2)\n print(common_string(s1, s2))\n","repo_name":"xiaoxue11/hank_practice","sub_path":"String/05_commonChild.py","file_name":"05_commonChild.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43258343506","text":"import config\nimport psycopg2\nimport ClientController\nimport itertools\n\nclass SecTestController(ClientController.ClientController):\n \"\"\"\n Security test controller class used by admin.py\n \"\"\"\n\n STATUS_HAS_ACCESS = 0\n STATUS_NO_ACCESS = 1\n\n _config = {}\n\n def __init__(self):\n self._sUser = config.DB_PLAYER_USER\n self._sPass = config.DB_PLAYER_PASS\n self._sCrtFile = config.DB_PLAYER_CRT_FILE\n self._sKeyFile = config.DB_PLAYER_KEY_FILE\n super().__init__()\n try:\n self.initConfig()\n except psycopg2.Error as e:\n print('[-] There is a function missing')\n exit(0)\n\n def initConfig(self):\n self._config = \\\n { \\\n 'P_ACCESS': [ \\\n (self._oDB.proc('submitFlagFromIp(varchar,varchar)'),['10.0.0.1', 'b']), \\\n (self._oDB.proc('getScore(integer,varchar,varchar)'),[30,None,None]), \\\n (self._oDB.proc('getCatProgressFromIp(varchar)'),['10.0.0.1']), \\\n (self._oDB.proc('getFlagProgressFromIp(varchar)'),['10.0.0.1']), \\\n (self._oDB.proc('getNewsList()'),[]) \\\n ], \\\n 'P_NO_ACCESS': [ \\\n (self._oDB.proc('addTeam(varchar,varchar)'),['Team Name', '192.168.1.0/24']), \\\n (self._oDB.proc('addFlagStatus(integer,varchar,text)'),[4, 'Name', 'blabla']), \\\n (self._oDB.proc('addHost(varchar,varchar,text)'),['a', 'b', 'c']), \\\n (self._oDB.proc('addFlagCategory(varchar,varchar,text,boolean)'),['a', 'b', 'c', None]), \\\n (self._oDB.proc('addRandomFlag(varchar,integer,varchar,varchar,integer,varchar,varchar,varchar,boolean,text,varchar)'),['name', 100, 'host', 'cat', 1, None, 'Author', 'Standard', True, 'desc', 'updatecmd']), \\\n (self._oDB.proc('addKingFlagFromName(varchar,varchar,integer)'),['a', 'b', 1]), \\\n (self._oDB.proc('addNews(varchar,varchar)'),['a','2014-03-03']), \\\n (self._oDB.proc('getAllKingFlags()'),[]), \\\n (self._oDB.proc('getKingFlagsFromHost(varchar)'),['asdf']), \\\n (self._oDB.proc('getKingFlagsFromName(varchar)'),['asdf']), \\\n (self._oDB.proc('addRandomKingFlagFromId(integer,integer)'),[1,2]), \\\n (self._oDB.proc('getScoreProgress(integer)'),[20]), \\\n (self._oDB.proc('getGameStats()'),[]), \\\n (self._oDB.proc('getSettings()'),[]), \\\n (self._oDB.proc('startGame()'),[]), \\\n (self._oDB.proc('insertRandomData()'),[]) \\\n ] \\\n }\n\n \n def testSecurity(self):\n for (f,a) in self._config['P_ACCESS']:\n try:\n if len(a) > 0:\n ret = f(*a)\n else:\n ret = f()\n#except postgresql.exceptions.InsufficientPrivilegeError:\n# print(f.name+'(): Player does not have access: ERROR')\n except psycopg2.Error as e:\n #print('[-] ('+str(e.code)+') '+e.message)\n print(f.name+'(): Player have access: OK')\n else:\n print(f.name+'(): Player have access: OK')\n\n for (f,a) in self._config['P_NO_ACCESS']:\n try:\n if len(a) > 0:\n ret = f(*a)\n else:\n ret = f()\n # An operation must be done to trigger a privilege error on itertools.chain\n # Operation here is list(ret)\n if type(ret) is itertools.chain:\n ret2 = list(ret)\n except postgresql.exceptions.InsufficientPrivilegeError:\n print(f.name+'(): Player does not have access: OK')\n except Exception as e:\n print(e)\n else:\n print(f.name+'(): Player have access: ERROR')\n\n","repo_name":"hackfestca/hfscoreboard","sub_path":"lib/SecTestController.py","file_name":"SecTestController.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"71375785154","text":"from django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import parser_classes\nfrom REST.Interpreters import GDriveInterpreter\nfrom REST.Interpreters import GClassInterpreter\nfrom rest_framework.decorators import api_view\nfrom rest_framework.parsers import JSONParser\nfrom django.http import HttpResponseRedirect\nfrom digipackDB import DatabaseManager\nfrom Auth import AuthenticationManager \nfrom django.http import FileResponse\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.http import HttpRequest\nfrom django.shortcuts import render\nfrom .forms import UploadFileForm\nimport mimetypes\nimport pathlib\nimport json\nimport os\n\n#@login_required(login_url='/login')\ndef index(request):\n return render(request, 'index.html')\n\ndef login(request):\n return render(request, 'login.html')\n\n@api_view(['POST'])\n@parser_classes([JSONParser])\n@ensure_csrf_cookie\ndef MobileAuth(request):\n #pass webclientsecret to auth manager\n AM = AuthenticationManager.AuthenticationManager('webClientSecret.json')\n\n #load the json data\n auth_json = request.data\n\n #pass access token to the auth manager\n credentials = AM.redeemAuthCode(auth_json[\"googleAccessToken\"])\n\n return JsonResponse({'Result' : 'ACK'})\n \n \n@api_view(['POST'])\n@parser_classes([JSONParser])\n@ensure_csrf_cookie\ndef WebAuth(request):\n #pass webclientsecret to auth manager\n AM = AuthenticationManager.AuthenticationManager('webClientSecret.json')\n\n #load the json data\n auth_json = request.data\n\n #pass access token to the auth manager\n credentials = AM.redeemAuthCodeWeb(auth_json[\"googleAccessToken\"])\n\n return JsonResponse({'Result' : 'ACK'}) \n \n\n#TODO: add a restriction that only allows a user to access their own url\n@api_view(['GET'])\ndef InitDrive(request, user):\n\n secretFile = open(\"webClientSecret.json\", \"r\")\n secretText = secretFile.read()\n\n # get file list // next try with drive list\n drivelist = GDriveInterpreter.get_file_list('webClientSecret.json', user, 'my-drive')\n\n filedata = []\n\n for d in drivelist:\n if d.get(\"name\") != None:\n\n val = {\n \"fileName\": d[\"name\"],\n \"fileid\": d[\"driveID\"]\n }\n filedata.append(val)\n\n # spit the file name list back\n return JsonResponse({\"Files\" : filedata})\n\n#TODO: add restriction on this one too\n@api_view(['GET'])\ndef DriveServerDownload(request, user, fileid):\n\n #pass webclientsecret to auth manager\n AM = AuthenticationManager.AuthenticationManager('webClientSecret.json')\n\n #get credentials\n credentials = AM.getCredentialsFromDatabase(user)\n\n #get file metadata and\n filemeta = GDriveInterpreter.get_file_metadata(credentials, user, fileid) \n GDriveInterpreter.get_file('webClientSecret.json', user, filemeta)\n \n #print (filemeta)\n\n return JsonResponse({'File Metadata' : filemeta})\n\n#@api_view(['GET'])\ndef DriveClientDownload(request, filename):\n\n #use mimetype to determine the file type\n file_path = './DriveStorage/' + filename\n file_to_download = open(file_path, 'rb')\n content_type = mimetypes.guess_type(filename)[0]\n response = FileResponse(file_to_download, content_type=content_type)\n response['Content-Disposition'] = \"attachment ; filename=%s\" % filename\n\n return response\n\n@api_view(['GET'])\ndef DrivePWADownload(request, user, fileid, filename):\n #pass webclientsecret to auth manager\n AM = AuthenticationManager.AuthenticationManager('webClientSecret.json')\n\n #get credentials\n credentials = AM.getCredentialsFromDatabase(user)\n\n #get file metadata and\n filemeta = GDriveInterpreter.get_file_metadata(credentials, user, fileid) \n GDriveInterpreter.get_file('webClientSecret.json', user, filemeta)\n\n #use mimetype to determine the file type\n file_path = './DriveStorage/' + filename\n file_to_download = open(file_path, 'rb')\n content_type = mimetypes.guess_type(filename)[0]\n response = FileResponse(file_to_download, content_type=content_type)\n response['Content-Disposition'] = \"attachment ; filename=%s\" % filename\n\n return response\n\n@api_view(['POST'])\ndef FileUpload(request):\n\n path = './media/' + request.FILES[fileName]\n print('This is the path: ' + path)\n with open(path, 'wb+') as destination:\n for chunk in request.FILES['uploaded_file'].chunks():\n destination.write(chunk)\n return HttpResponse('the file is saved')\n\n#TODO: add a restriction that only allows a user to access their own url\n@api_view(['GET'])\ndef InitGClass(request, user):\n\n # get class info\n courses = GClassInterpreter.get_course_list('webClientSecret.json', user)\n\n # spit the file name list back\n return JsonResponse({\"Courses\" : courses})\n","repo_name":"CANIS-NAU/digiLearn","sub_path":"com/DigiLearn/DigitalBackpack/Server/Django/commanager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41986153672","text":"from app import db\nfrom reader.seriediaria import SerieDiariaReader\nfrom datetime import date\nfrom model.seriesemanal import SerieSemanalModel\n\nclass SerieSemanalLoader(): \n def __init__(self):\n self.symbol = None\n self.anyo = None\n self.semana = None\n self.fch_semana_inicio = None \n\n def procesar(self,symbol, anyo = None, semana = None): \n self.symbol = symbol\n self.anyo = anyo\n self.semana = semana\n \n if anyo is not None and semana is not None:\n self.fch_semana_inicio = date.fromisocalendar(anyo, semana, 1)\n \n self.__eliminar_series_semanales()\n \n #(anyo, semana_ini) = self.eliminar_serie_semanal(symbol=symbol, profundidad=profundidad)\n base = SerieDiariaReader.get_preseries_semanal(self.symbol, self.fch_semana_inicio) \n \n fch_registro = date.today()\n for preserie_semana in base: \n self._procesar_serie(symbol=symbol, preserie_semana=preserie_semana, fch_registro=fch_registro)\n\n def __eliminar_series_semanales(self):\n \n stmt = (\n db.delete(SerieSemanalModel).\n where(SerieSemanalModel.symbol == self.symbol)\n )\n\n if self.fch_semana_inicio is not None:\n stmt = stmt.where(\n SerieSemanalModel.fch_semana >= self.fch_semana_inicio \n )\n\n db.session.execute(stmt)\n \n def _procesar_serie(self,symbol, preserie_semana, fch_registro):\n serie_apertura = SerieDiariaReader.get_serie(symbol, preserie_semana.open_date) \n serie_cierre = SerieDiariaReader.get_serie(symbol, preserie_semana.close_date) \n\n nueva_serie_semanal = SerieSemanalModel(\n symbol = preserie_semana.symbol,\n fch_semana = preserie_semana.fch_semana,\n anyo = preserie_semana.anyo,\n semana = preserie_semana.semana,\n imp_apertura = serie_apertura.imp_apertura,\n imp_maximo = preserie_semana.high,\n imp_minimo = preserie_semana.low,\n imp_cierre = serie_cierre.imp_cierre,\n imp_apertura_ajus = serie_apertura.imp_apertura_ajus,\n imp_maximo_ajus = preserie_semana.adj_high,\n imp_minimo_ajus = preserie_semana.adj_low,\n imp_cierre_ajus = serie_cierre.imp_cierre_ajus\n )\n\n db.session.add(nueva_serie_semanal)","repo_name":"ToxidSeed/bagholdercuy","sub_path":"processor/seriesemanal.py","file_name":"seriesemanal.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18085745838","text":"# Problem Set 3, problem 1\n\ndef radiationExposure(start, stop, step):\n '''\n Computes and returns the amount of radiation exposed\n to between the start and stop times. Calls the \n function f (defined for you in the grading script)\n to obtain the value of the function at any point.\n \n start: integer, the time at which exposure begins\n stop: integer, the time at which exposure ends\n step: float, the width of each rectangle. You can assume that\n the step size will always partition the space evenly.\n\n returns: float, the amount of radiation exposed to \n between start and stop times.\n '''\n # Variable for the total amount of radiation\n totalRadiation = 0\n\n # Variable for holding the current time\n time = start\n\n # Iterate over the time intervals and add the radiation to the total radiation\n while time < stop:\n totalRadiation += step * f(time)\n time += step\n\n # Return the computed value\n return totalRadiation\n","repo_name":"Sacusa/online-courses","sub_path":"6.00.1x - Introduction to Computer Science and Programming Using Python/Week 3/02 - Problem Set 3/pset3a.py","file_name":"pset3a.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"17477445381","text":"import datetime\nimport re\nimport requests\n\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport pytz\n\nimport vaxutils\n\n\ndef read(source: str) -> pd.Series:\n soup = BeautifulSoup(requests.get(source).content, \"html.parser\")\n return parse_data(soup)\n\n\ndef parse_data(soup: BeautifulSoup) -> pd.Series:\n source = soup.find(class_=\"sidebar\").find(class_=\"text-dark\")[\"href\"]\n\n soup = BeautifulSoup(requests.get(source).content, \"html.parser\")\n\n regex = r\"që nga fillimi i vaksinimit janë kryer ([\\d,]+) vaksinime\"\n total_vaccinations = re.search(regex, soup.text).group(1)\n total_vaccinations = vaxutils.clean_count(total_vaccinations)\n\n return pd.Series({\n \"total_vaccinations\": total_vaccinations,\n \"source_url\": source,\n })\n\n\ndef set_date(ds: pd.Series) -> pd.Series:\n date = str(datetime.datetime.now(pytz.timezone(\"Europe/Tirane\")).date() - datetime.timedelta(days=1))\n return vaxutils.enrich_data(ds, \"date\", date)\n\n\ndef enrich_location(ds: pd.Series) -> pd.Series:\n return vaxutils.enrich_data(ds, \"location\", \"Albania\")\n\n\ndef enrich_vaccine(ds: pd.Series) -> pd.Series:\n return vaxutils.enrich_data(ds, \"vaccine\", \"Pfizer/BioNTech\")\n\n\ndef pipeline(ds: pd.Series) -> pd.Series:\n return (\n ds\n .pipe(set_date)\n .pipe(enrich_location)\n .pipe(enrich_vaccine)\n )\n\n\ndef main():\n source = \"https://coronavirus.al/masa/\"\n data = read(source).pipe(pipeline)\n vaxutils.increment(\n location=data[\"location\"],\n total_vaccinations=data[\"total_vaccinations\"],\n date=data[\"date\"],\n source_url=data[\"source_url\"],\n vaccine=data[\"vaccine\"]\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NDMan-PM/TK_PTDL","sub_path":"covid-19-data-master/scripts/scripts/vaccinations/automations/incremental/albania.py","file_name":"albania.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23422167901","text":"#! /usr/bin/env python\n\nfrom __future__ import print_function\nfrom sys import stdin, stderr\nimport math\n\n\ndebug = True\n\ndef debug_print(s):\n\tif debug:\n\t\tprint(' DEBUG: {}'.format(s), file=stderr)\n\n\ndef solve(C, F, X):\n\tp = (X / C) - (2 / F)\n\tp_floor = math.trunc(p) if p > 0 else 0\n\tt = sum([(C / (2 + (F * i))) for i in range(int(p_floor))])\n\tn = X / (2 + (F * p_floor))\n\tz = t + n\n\tdebug_print(' p: {}, p_floor: {}, t: {}, n: {}'.format(p, p_floor, t, n))\n\treturn z\n\n\nif __name__ == '__main__':\n\tnum_cases = int(stdin.readline().strip())\n\n\tfor case in range(1, num_cases + 1):\n\t\tC, F, X = [float(x) for x in stdin.readline().strip().split()]\n\t\tdebug_print(' C: {}, F: {}, X: {}'.format(C, F, X))\n\n\t\tout = solve(C, F, X)\n\t\tprint('Case #{}: {}'.format(case, out))\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/1527.py","file_name":"1527.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13554769086","text":"#!/usr/bin/env python3\nimport itertools\nfrom game import ngame\n\ndef get_magic_numbers(sbox=ngame.sbox):\n cycles = get_cycles(sbox)\n\n for number_of_combined_cycles in range(len(cycles)):\n\n for combined_cycles in itertools.combinations(cycles, r=number_of_combined_cycles):\n\n 𝛹 = list(itertools.chain.from_iterable(combined_cycles))\n\n if is_cyclic_under_ternary_xor(𝛹) and has_full_coverage(𝛹):\n return sorted(𝛹)\n\ndef get_cycles(sbox=ngame.sbox):\n cycles = list()\n visited = set()\n\n for node in range(256):\n if node in visited:\n continue\n\n trail = list()\n\n while node not in visited:\n visited.add(node)\n trail.append(node)\n node = sbox[node]\n\n cycle_is_detected = node in trail\n\n if cycle_is_detected:\n start_index = trail.index(node)\n cycle = trail[start_index:]\n cycles.append(cycle)\n\n return cycles\n\ndef is_cyclic_under_ternary_xor(𝛹):\n 𝑋 = (𝑎 ^ 𝑏 ^ 𝑐 for 𝑎, 𝑏, 𝑐 in itertools.combinations_with_replacement(𝛹, r=3))\n return all(𝑥 in 𝛹 for 𝑥 in 𝑋)\n\ndef has_full_coverage(𝛹, sbox=ngame.sbox):\n coverage = set(sbox[a] ^ sbox[b+256] for a,b in itertools.product(𝛹, 𝛹))\n return all(byte_value in coverage for byte_value in range(256))\n\nif __name__ == \"__main__\":\n 𝛹 = get_magic_numbers()\n print(\"\".join(f\"{𝑥:02x}\" for 𝑥 in 𝛹))\n","repo_name":"tlk/nsg","sub_path":"docs/εὕρηκα.py","file_name":"εὕρηκα.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32202774317","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn import preprocessing\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\n\r\ndf = pd.read_csv(\"prices.csv\")\r\n\r\n#TCS Script data between 02 Mar 2021 and 04 Mar 2022\r\npredict_col = 'Close Price'\r\npredict_out = 7\r\ntest_size = 0.3\r\n\r\ndef prepare_data(df,predict_col,predict_out,test_size):\r\n label = df[predict_col].shift(-predict_out) #creating new column called label with the last 5 rows are nan\r\n X = np.array(df[[predict_col]]) #creating the feature array\r\n X = preprocessing.scale(X) #processing the feature array\r\n X_predict = X[-predict_out:] #creating the column i want to use later in the predicting method\r\n X = X[:-predict_out] # X that will contain the training and testing\r\n label.dropna(inplace=True) #dropping na values\r\n y = np.array(label) # assigning Y\r\n X_train, X_test, Y_train, Y_test = train_test_split(X, y, test_size=test_size, random_state=0) #cross validation\r\n\r\n response = [X_train,X_test , Y_train, Y_test , X_predict]\r\n return response\r\n\r\nX_train, X_test, Y_train, Y_test , X_predict =prepare_data(df,predict_col,predict_out,test_size); #calling the method were the cross validation and data preperation is in\r\nlearner = LinearRegression() #initializing linear regression model\r\n\r\nlearner.fit(X_train,Y_train) #training the linear regression model\r\n\r\nscore=learner.score(X_test,Y_test)#testing the linear regression model\r\npredict= learner.predict(X_predict) #set that will contain the predicted data\r\nresponse={}#creting json object\r\nresponse['predicted_value']=predict\r\n\r\nprint(response)","repo_name":"sivnth07/Predict_Stock_Price","sub_path":"YT - Copy.py","file_name":"YT - Copy.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31149531784","text":"from sys import stdin,stdout\ncin = stdin.readline;cout = stdout.write\nfor _ in xrange(int(cin())):\n n,m = map(int,cin().split())\n loc = list()\n for i in xrange(n):\n array = cin()\n for j in xrange(m):\n if array[j]=='1':loc.append((i,j))\n ans = [0]*(n+m-1)\n for i in xrange(len(loc)):\n for j in xrange(i+1,len(loc)):\n dist = abs(loc[i][0]-loc[j][0]) + abs(loc[i][1]-loc[j][1])\n if dist<(n+m-1):ans[dist]+=1\n for i in xrange(1,n+m-1):\n cout(\"%d \"%ans[i])\n cout(\"\\n\")\n","repo_name":"phantomhieve/CP","sub_path":"2018/snack(18)/a/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13405309998","text":"\nfrom anaconda_rust.anaconda_lib.sublime import run_linter\nfrom anaconda_rust.anaconda_lib.anaconda_plugin import linting\nfrom anaconda_rust.anaconda_lib.helpers import check_linting, get_settings\n\n\nclass BackgroundLinter(linting.BackgroundLinter):\n \"\"\"Background linter, can be turned off via plugin settings\n \"\"\"\n\n def __init__(self):\n kwargs = {'lang': 'Rust', 'linter': run_linter}\n super(BackgroundLinter, self).__init__(**kwargs)\n self.check_auto_lint = True\n\n def on_modified(self, view):\n \"\"\"Rustc can only work in files not in buffers\n \"\"\"\n\n if check_linting(view, 0, code=self.lang.lower()):\n # remove prvious linting marks if configured to do so\n if not get_settings(view, 'anaconda_linter_persistent', False):\n linting.erase_lint_marks(view)\n else:\n self._erase_marks_if_no_linting(view)\n","repo_name":"DamnWidget/anaconda_rust","sub_path":"listeners/linting.py","file_name":"linting.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"23634078031","text":"'''\r\nCreated on Apr 14, 2012\r\n\r\n@author: Nick\r\n'''\r\n\r\ninputFile = open('in.txt')\r\noutputFile = open('out.txt','w')\r\n\r\nenglish = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\ngooglish = ['y','h','e','s','o','c','v','x','d','u','i','g','l','b','k','r','z','t','n','w','j','p','f','m','a','q']\r\n\r\ninputTxt = inputFile.readlines()\r\noutputLine = ''\r\n\r\nfor i in range(1,int(inputTxt[0])+1):\r\n words = inputTxt[i].split()\r\n for word in words:\r\n for letter in word:\r\n index = english.index(letter)\r\n outputLine += googlish[index]\r\n outputLine += ' '\r\n outputLine += '\\n'\r\n outputFile.write(\"Case #\" + str(i) + \": \")\r\n outputFile.write(outputLine)\r\n outputLine = ''\r\n \r\n \r\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/2572.py","file_name":"2572.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"730678223","text":"#! /usr/bin/env python3\n\nimport sys\nimport glob\nimport os\nimport re\n\n\nfasta_dir = sys.argv[1]\nresult_dir = \"/\".join(fasta_dir.split(\"/\")[:-1])\nfasta_files = glob.glob(\"{}/*.fasta\".format(fasta_dir))\nfasta_files = [fasta_file for fasta_file in fasta_files if \"all\" not in os.path.basename(fasta_file)]\n\ntranscript_ids = {}\ntranscript_ids_cooksCutoff_FALSE = {}\n\ndef add_annotation(transcript_ids, transcript_id, annotation):\n if transcript_id not in transcript_ids:\n transcript_ids[transcript_id] = [annotation]\n else:\n transcript_ids[transcript_id].append(annotation)\n return transcript_ids\n\ndef get_expressions(transcript_id):\n with open(\"{}/30_count_matrix/RSEM.isoform.TMM.EXPR.matrix\".format(result_dir)) as f:\n for line in f:\n if transcript_id in line:\n line = line.rstrip(\"\\n\")\n cols = line.split(\"\\t\")\n return cols[1:]\n\ndef get_pvalue(transcript_id):\n with open(\"{}/40_DEGseq2/DEGseq2_isoform_result_cooksCutoff_FALSE/RSEM.isoform.counts.matrix.N_vs_V.DESeq2.DE_results.cooksCutoff_FALSE\".format(result_dir)) as f:\n for line in f:\n if transcript_id in line:\n line = line.rstrip(\"\\n\")\n cols = line.split(\"\\t\")\n pvalue = cols[9]\n return pvalue\n\nfor fasta_file in fasta_files:\n annotation = os.path.basename(fasta_file).split(\".\")[0]\n with open(fasta_file) as f:\n for line in f:\n if re.match(\">\", line):\n transcript_id = line.lstrip(\">\")\n transcript_id = transcript_id.rstrip(\"\\n\")\n if re.match(\">\", line):\n if \"cooksCutoff_FALSE\" in fasta_file:\n transcript_ids_cooksCutoff_FALSE = add_annotation(transcript_ids_cooksCutoff_FALSE, \\\n transcript_id, \\\n annotation)\n else:\n transcript_ids = add_annotation(transcript_ids, \\\n transcript_id, \\\n annotation)\n\nfor k, v in transcript_ids_cooksCutoff_FALSE.items():\n annotations = \",\".join(set(v))\n exps = \"\\t\".join(get_expressions(k))\n pvalue = get_pvalue(k)\n if k in transcript_ids:\n print(k, \"-\", annotations, \",\".join(transcript_ids[k]), pvalue, exps, sep=\"\\t\")\n else:\n print(k, \"+\", annotations, \"\", pvalue, exps, sep=\"\\t\")\n\n","repo_name":"YuSugihara/ViiR","sub_path":"utils/generate_summary.py","file_name":"generate_summary.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40619563798","text":"from .base import commen, data, model, train, test\nimport numpy as np\n\ndata.scale = np.array([896, 384])\ndata.input_w, data.input_h = (896, 384)\ndata.scale_range = np.arange(0.4, 1.0, 0.1)\n\nmodel.heads['ct_hm'] = 7\n\ntrain.dataset = 'kitti_train'\ntrain.optimizer['gamma'] = 0.25\ntrain.batch_size = 64\ntrain.num_workers = 64\n\ntest.test_rescale = 0.5\ntest.dataset = 'kitti_val'\ntest.with_nms = False\n\nclass config(object):\n commen = commen\n data = data\n model = model\n train = train\n test = test\n","repo_name":"zhang-tao-whu/e2ec","sub_path":"configs/kitti.py","file_name":"kitti.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":187,"dataset":"github-code","pt":"61"} +{"seq_id":"2865152868","text":"from django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.dispatch import receiver\nfrom rest_framework.authtoken.models import Token\n\nfrom api.models import Profile, Task\nfrom api.utils import flatten_query_set\n\n\n@receiver(post_save, sender=User)\ndef init_new_user(sender, instance, signal, created, **kwargs):\n if kwargs['raw']:\n print(\"Skipping signal for {}\".format(kwargs))\n return\n if created:\n Token.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if kwargs['raw']:\n print(\"Skipping signal for {}\".format(kwargs))\n return\n if created:\n Profile.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n if kwargs['raw']:\n print(\"Skipping signal for {}\".format(kwargs))\n return\n instance.profile.save()\n\n\n@receiver(post_save, sender=Task)\ndef task_save_check(sender, instance, **kwargs):\n if instance.amount == len(flatten_query_set(instance.completions)) and instance.active:\n instance.active = False\n instance.save()\n","repo_name":"feclist/utask","sub_path":"utask/api/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43392639132","text":"#!/usr/bin/env python3\n\nimport subprocess\ncmd=\"ls -lrt\"\n#sp=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\t# this produces output as byte code\nsp=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,universal_newlines=True)\nrc=sp.wait()\nout,err=sp.communicate()\nprint(f\"The return code is : {rc}\")\n#print(f\"Output is as below : \\n{out}\")\t\t# prints output as string \n#print(f\"Error is : \\n{err}\")\t\t\t# prints error as string\n# Suppose we don't want to print output as string, but instead as a list\nprint(f\"Output is as below : \\n{out.splitlines()}\")\t\t# splitlines() by default takes separator as \\n\nprint(f\"Error is as below : \\n{err.splitlines()}\")\t\t# splitlines(\"\\t\") will consider \\t as separator\n","repo_name":"inderpal2406/python","sub_path":"udemy/01_walkthrough/execute_ls-lrt_using_subprocess.py","file_name":"execute_ls-lrt_using_subprocess.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72512359555","text":"'''\nGiven two binary strings a and b, return their sum as a binary string\n\nConstraints:\n- 1 <= a.length, b.length <= 104\n- a and b consist only of '0' or '1' characters.\n- Each string does not contain leading zeros except for the zero itself.\n'''\n\n# ATTEMPT 1\nclass Solution:\n def addBinary(self, a: str, b: str) -> str:\n # https://www.educative.io/answers/how-to-add-binary-numbers-in-python\n print(int(a,2))\n print(int(b,2))\n\n c = str(bin(int(a, 2) + int(b, 2)))\n\n # bin() returns a string with prefix '0b', so slice that out\n return c[2:]","repo_name":"newns92/leetcode","sub_path":"python3/medium/67_AddBinary.py","file_name":"67_AddBinary.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23936646544","text":"\ndef operandErrorMessage(this_type, operation, other_type):\n return f\"Unsupported operand type(s) for {operation}: '{this_type}' and '{other_type}'\"\n\nclass Point:\n def __init__(self, value):\n self.value = value\n\n def __eq__(self, other):\n if isinstance(other, Point):\n return self.value == other.value\n else:\n error_msg=operandErrorMessage('Point', '==', type(other).__name__)\n raise TypeError(error_msg)\n\n def __ne__(self, other):\n if isinstance(other, Point):\n return self.value != other.value\n else:\n error_msg=operandErrorMessage('Point', '!=', type(other).__name__)\n raise TypeError(error_msg)\n\n def __lt__(self, other):\n if isinstance(other, Point):\n return self.value < other.value\n else:\n error_msg=operandErrorMessage('Point', '<', type(other).__name__)\n raise TypeError(error_msg)\n\n def __le__(self, other):\n if isinstance(other, Point):\n return self.value <= other.value\n else:\n error_msg=operandErrorMessage('Point', '<=', type(other).__name__)\n raise TypeError(error_msg)\n\n def __gt__(self, other):\n if isinstance(other, Point):\n return self.value > other.value\n else:\n error_msg=operandErrorMessage('Point', '>', type(other).__name__)\n raise TypeError(error_msg)\n\n def __ge__(self, other):\n if isinstance(other, Point):\n return self.value >= other.value\n else:\n error_msg=operandErrorMessage('Point', '>=', type(other).__name__)\n raise TypeError(error_msg)\n\n def __add__(self, other):\n if isinstance(other, Point):\n return Point(self.value + other.value)\n else:\n error_msg=operandErrorMessage('Point', '+', type(other).__name__)\n raise TypeError(error_msg)\n\n def __sub__(self, other):\n if isinstance(other, Point):\n return Point(self.value - other.value)\n else:\n error_msg=operandErrorMessage('Point', '-', type(other).__name__)\n raise TypeError(error_msg)\n\n def __repr__(self):\n return f\"Point({self.value})\"\n\nclass ContinuousInterval:\n def __init__(self, start, end, is_start_open=False, is_end_open=False):\n if start > end:\n raise ValueError(\"Invalid interval: start must be less or equal than end\")\n\n if start == end and start != 0:\n empty_msg = f\"Only start and end equal 0 is allowed!\"\n error_msg = f\"Invalid interval: open interval with zero length. {empty_msg}\"\n raise ValueError(error_msg)\n\n self.start = start\n self.end = end\n self.is_start_open = is_start_open\n self.is_end_open = is_end_open\n \n @staticmethod\n def empty():\n return ContinuousInterval(0, 0, True, True)\n \n def is_empty(self):\n are_open=self.is_start_open and self.is_end_open\n are_zero=self.start == self.end and self.start == 0\n \n return are_zero and are_open\n\n def overlaps(self, other):\n if self.start < other.end and self.end > other.start:\n if self.start == other.end:\n if self.is_start_open or other.is_end_open:\n return False\n return True\n if self.end == other.start:\n if self.is_end_open or other.is_start_open:\n return False\n return True\n if self.start < other.start < self.end or other.start < self.start < other.end:\n return True\n return False\n\n def __eq__(self, other):\n if isinstance(other, ContinuousInterval):\n return (self.start, self.end, self.is_start_open, self.is_end_open) == \\\n (other.start, other.end, other.is_start_open, other.is_end_open)\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '==', type(other).__name__)\n raise TypeError(error_msg)\n\n def __ne__(self, other):\n if isinstance(other, ContinuousInterval):\n return (self.start, self.end, self.is_start_open, self.is_end_open) != \\\n (other.start, other.end, other.is_start_open, other.is_end_open)\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '!=', type(other).__name__)\n raise TypeError(error_msg)\n\n def __lt__(self, other):\n if isinstance(other, ContinuousInterval):\n print(self.end)\n print(other.start)\n print((self.end == other.start and (self.is_end_open or other.is_start_open)))\n return self.end < other.start or \\\n (self.end == other.start and (self.is_end_open or other.is_start_open))\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '<', type(other).__name__)\n raise TypeError(error_msg)\n\n def __le__(self, other):\n if isinstance(other, ContinuousInterval):\n return self.end < other.end or (self.end == other.end and\n (self.is_end_open or not other.is_end_open))\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '<=', type(other).__name__)\n raise TypeError(error_msg)\n\n def __gt__(self, other):\n if isinstance(other, ContinuousInterval):\n return other.__lt__(self)\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '>', type(other).__name__)\n raise TypeError(error_msg)\n\n def __ge__(self, other):\n if isinstance(other, ContinuousInterval):\n return other.__le__(self)\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '>=', type(other).__name__)\n raise TypeError(error_msg)\n\n def __add__(self, other):\n if isinstance(other, ContinuousInterval):\n if self.is_empty():\n return other\n elif other.is_empty():\n return self\n elif self.end == other.start and not (self.is_end_open or other.is_start_open):\n return ContinuousInterval(self.start, other.end, self.is_start_open, other.is_end_open)\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '+', type(other).__name__)\n raise TypeError(error_msg)\n\n def __sub__(self, other):\n if isinstance(other, ContinuousInterval):\n if self.is_empty() or other.is_empty() or self == other:\n return ContinuousInterval.empty()\n elif other.end <= self.start or other.start >= self.end:\n return self\n elif self.start < other.start:\n if self.end > other.end:\n return ContinuousInterval(self.start, other.start, self.is_start_open, not other.is_start_open) + \\\n ContinuousInterval(other.end, self.end, not other.is_end_open, self.is_end_open)\n else:\n return ContinuousInterval(self.start, other.start, self.is_start_open, not other.is_start_open)\n else:\n return ContinuousInterval(other.end, self.end, not other.is_end_open, self.is_end_open)\n else:\n error_msg=operandErrorMessage('ContinuousInterval', '-', type(other).__name__)\n raise TypeError(error_msg)\n \n def length(self):\n return self.end - self.start\n\n def contains(self, item):\n if isinstance(item, ContinuousInterval):\n return self.contains_interval(item)\n elif isinstance(item, Point):\n return self.contains_point(item)\n else:\n raise TypeError(\"Invalid type. Expected ContinuousInterval or Point.\")\n\n def contains_interval(self, interval):\n if interval.start < self.start or interval.end > self.end:\n return False\n\n if interval.start == self.start and interval.is_start_open and not self.is_start_open:\n return False\n\n if interval.end == self.end and interval.is_end_open and not self.is_end_open:\n return False\n\n if interval.start == self.start and interval.end == self.end:\n return interval.is_start_open == self.is_start_open and interval.is_end_open == self.is_end_open\n\n return True\n\n def contains_point(self, point):\n return (point.value == self.start and not self.is_start_open) or \\\n (point.value == self.end and not self.is_end_open) or \\\n (self.start < point.value < self.end)\n\n def is_overlapping(self, interval):\n are_not_disjoint=not (self.end < interval.start or self.start > interval.end)\n endpoints_overlap=(self.end == interval.start and \\\n not self.is_end_open and not interval.is_start_open) or \\\n (self.start == interval.end and \\\n not self.is_start_open and not interval.is_end_open)\n has_intersection=(interval.start < self.start < interval.end) or \\\n (interval.start < self.end < interval.end) or \\\n (self.start < interval.start < self.end) or \\\n (self.start < interval.end < self.end)\n \n is_item_inside=self.contains(interval)\n \n return are_not_disjoint and (endpoints_overlap or has_intersection or is_item_inside)\n \n def intersection(self, interval):\n if not self.is_overlapping(interval) or interval.is_empty():\n return ContinuousInterval.empty()\n\n equal_endpoints = self.start == interval.start and self.end == interval.end\n equal_boundaries = self.is_start_open == interval.is_start_open and self.is_end_open == interval.is_end_open\n\n if equal_endpoints and equal_boundaries:\n # The intervals are coincidental\n return self\n\n if self.contains(interval):\n return interval\n\n if interval.contains(self):\n return self\n\n if self.start == interval.end and not self.is_start_open and not interval.is_end_open:\n return Point(self.start)\n\n if self.end == interval.start and not self.is_end_open and not interval.is_start_open:\n return Point(self.end)\n\n start = max(self.start, interval.start)\n end = min(self.end, interval.end)\n\n if start > end:\n return ContinuousInterval.empty()\n\n is_start_open = (start == self.start and self.is_start_open) or (start == interval.start and interval.is_start_open)\n is_end_open = (end == self.end and self.is_end_open) or (end == interval.end and interval.is_end_open)\n\n return ContinuousInterval(start, end, is_start_open, is_end_open)\n\n\n def union(self, interval):\n if not self.is_overlapping(interval):\n # Return the two disjoint intervals as a list\n return [self, interval]\n\n # Determine the start value\n if self.start < interval.start:\n start = self.start\n is_start_open = self.is_start_open\n elif self.start > interval.start:\n start = interval.start\n is_start_open = interval.is_start_open\n else:\n start = self.start\n is_start_open = self.is_start_open and interval.is_start_open\n\n # Determine the end value\n if self.end > interval.end:\n end = self.end\n is_end_open = self.is_end_open\n elif self.end < interval.end:\n end = interval.end\n is_end_open = interval.is_end_open\n else:\n end = self.end\n is_end_open = self.is_end_open and interval.is_end_open\n\n return ContinuousInterval(start, end, is_start_open, is_end_open)\n\n def difference(self, interval):\n if not self.is_overlapping(interval):\n return [self]\n\n if self.start >= interval.start and self.end <= interval.end:\n return []\n\n result = []\n\n if self.start < interval.start:\n start = self.start\n end = interval.start\n is_start_open = self.is_start_open\n is_end_open = interval.is_start_open\n result.append(ContinuousInterval(start, end, is_start_open, is_end_open))\n\n if self.end > interval.end:\n start = interval.end\n end = self.end\n is_start_open = interval.is_end_open\n is_end_open = self.is_end_open\n result.append(ContinuousInterval(start, end, is_start_open, is_end_open))\n\n return result\n \n def __repr__(self):\n input_msg=f\"{self.start}, {self.end}, is_start_open={self.is_start_open}, is_end_open={self.is_end_open}\"\n msg=f\"ContinuousInterval({input_msg})\"\n return msg\n\n \nclass DisjointInterval:\n def __init__(self, intervals):\n self.intervals = intervals\n\n def add_interval(self, interval):\n # Add a new continuous interval to the collection\n self.intervals.append(interval)\n\n def merge_overlapping_intervals(self):\n # Merge overlapping intervals within the collection\n merged_intervals = []\n sorted_intervals = sorted(self.intervals, key=lambda interval: interval.start)\n \n for interval in sorted_intervals:\n if not merged_intervals or merged_intervals[-1].end < interval.start:\n merged_intervals.append(interval)\n else:\n merged_intervals[-1].end = max(merged_intervals[-1].end, interval.end)\n \n self.intervals = merged_intervals\n\n def get_non_overlapping_intervals(self):\n # Retrieve a list of non-overlapping intervals\n self.merge_overlapping_intervals()\n return self.intervals\n\n def get_interval_containing_point(self, point):\n # Find the interval (if any) that contains the given point\n for interval in self.intervals:\n if interval.start <= point.value <= interval.end:\n return interval\n \n return None\n\nclass IntervalSet:\n def __init__(self, points, intervals, disjoint_intervals):\n self.points = points\n self.intervals = intervals\n self.disjoint_intervals = disjoint_intervals\n\n def find_intervals_containing_points(self, points):\n # Perform operations involving points and intervals together\n pass\n\n def merge_overlapping_intervals_within_disjoint_intervals(self):\n # Perform operations involving intervals and disjoint intervals together\n pass","repo_name":"alloyha/experiments","sub_path":"data/continuousIntervals/intervals/src/intervals.py","file_name":"intervals.py","file_ext":"py","file_size_in_byte":14600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1198813478","text":"#!/usr/bin/env python3\n\"\"\"\nThe challenge primarily focuses on dependency trees. I don't feel like\nPython native data structures provide the optimal building blocks for\nsolving this.\n\nHowever, I made it work by creating two trees:\n - one where the values are required to complete before the key can be\n processed (a tree indicating pre-requisites)\n - one where the values are the next item that can be matched when the\n key has been processed (forward mapping)\n\nThese two trees are needed in my approach because of the interesting\ntwist of the puzzle - namely, as units are available to be processed\n(namely, all their pre-reqs are completed), the next unit to be\nprocessed is based on an alphabetical list of available units.\n\nAs units become available, I add the unit to a work queue and, when it\nis time to start a new unit, I sort the list. Then, before you can pull\nthe item from the queue, all its pre-reqs (not just the one that\nfinished and added it to the queue) must be met. Hence, the two trees.\n\n\"\"\"\n\nimport re\nimport operator\nimport sys\n\n\ndef regex():\n s = r'Step (?P\\w) must be finished'\n s = r'{} before step (?P\\w) can begin.'.format(s)\n c = re.compile(s)\n\n return c\n\n\nif __name__ == '__main__':\n\n parser = regex()\n\n dependencies = {}\n following = {}\n\n with open('input.txt', 'r') as f:\n for l in f:\n result = parser.match(l)\n before = result.group('before')\n after = result.group('after')\n\n # Add the dependencies\n if after not in dependencies:\n dependencies[after] = [before]\n else:\n dependencies[after].append(before)\n\n # Populate the pre-reqs too\n if before not in dependencies:\n dependencies[before] = []\n\n # Map the other direction, for each item, what is next\n if before not in following:\n following[before] = [after]\n else:\n following[before].append(after)\n\n # Populate the followers too\n if after not in following:\n following[after] = []\n\n # The 'dependencies' tree key with 0 length value is the beginning\n # So, generate a list of tuples with length of pre-reqs and key\n counts = []\n for a in dependencies.keys():\n counts.append((len(dependencies[a]), a))\n\n # Sort the counts\n counts.sort(key=operator.itemgetter(0))\n\n # Find the various entry points into the workflow\n work = []\n for c in counts:\n if c[0] == 0:\n work.append(c[1])\n else:\n break\n\n # Prime the final result\n final = []\n\n while(len(work)):\n # This list must be alphabetically sorted\n work.sort()\n\n # Loop through the available work\n for i in range(0, len(work)):\n w = work[i]\n\n # Are the pre-reqs satisfied for this?\n requirements = dependencies[w]\n for r in requirements:\n # No, pre-req not met\n if r not in final:\n break\n else:\n # Pre-req met, add to final list\n final.append(w)\n\n # Remove from work list\n work.pop(i)\n\n # Add the followers to the work list\n work = list(set(work + following[w]))\n break\n\n # We looped through all the work and none of them met pre-reqs\n else:\n print('Infinite loop prevention')\n sys.exit(1)\n\n print('Puzzle 1: {}'.format(''.join(final)))\n","repo_name":"broadcaststorm/advent-of-code","sub_path":"2018/07/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6323653530","text":"# 1. 错误: 通常指的是语法错误,它会导致程序一行代码都不会执行\n# print(\"hello world\")\n# if True:\n# print(\"真的?\")\n# else:\n# print(\"假的\")\n\n# 2. 异常 在程序运行过程中出现的错误 导致程序中断 出现异常之前的程序可以正常执行,出现异常之后的程序将不再执行\nb = True\nif b:\n print(\"真的\")\n a = 10\nelse:\n c = 20\nprint(\"a\", a)\nprint(\"c\", c) # 异常 NameError\n\n\n\n\n\n","repo_name":"huiba7i/Mycode","sub_path":"python/class code/code4.24/异常/异常和错误.py","file_name":"异常和错误.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"13326046326","text":"import numpy as np\r\nimport cv2\r\n\r\nimg1 = cv2.imread(\"D:\\F - DISK\\Downloads\\coins.jpg\", 0)\r\ncv2.imshow(\"image\", img1)\r\nret, thresh1 = cv2.threshold(img1, 200, 255, cv2.THRESH_BINARY_INV)\r\ncv2.imshow(\"thresholding\", thresh1)\r\nk = np.ones((5, 5), np.uint8)\r\nimg_erosion = cv2.erode(thresh1, k, iterations=7)\r\ncv2.imshow(\"erosion\", img_erosion)\r\n(n, hierarchy) = cv2.findContours(img_erosion.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\nprint(\"#coins\", len(n))\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"Marwan951/Coins-Counter","sub_path":"main3.py","file_name":"main3.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23463416361","text":"# Usage : run the python interpreter. Then type :\n# import problemC\n# problemC.main(\"path1\", \"path2\")\n#\n# path1 is the path to the input file, path2 is the path to the output file\n# open the output file and enjoy the result\n\ni=0\nj=2\nk=3\nmi=4\nmj=6\nmk=7\n\ndef mult(a,b):\n if a==1:\n return b\n if b==1:\n return a\n if a == b:\n return 5\n if a==i:\n if b==j:\n return k\n else: #b==k\n return mj\n if a==j:\n if b==i:\n return mk\n else: # b==k\n return i\n # a==k\n if b==i:\n return j\n else: #b==j\n return mi\n \n\ndef stoval(s):\n if s=='i':\n return i\n if s=='j':\n return j\n return k\n\n\ndef main(infilename,outfilename):\n infile = open(infilename,'r')\n outfile = open(outfilename,'w')\n\n nbr_cases = int(infile.readline().split(' ')[0])\n \n for ctr_cases in range(1,nbr_cases+1):\n line = infile.readline().split(' ')\n pattern_size = int(line[0])\n times = int(line[1])\n pattern = infile.readline()[0:pattern_size];\n res = \"NO\\n\"\n target = i\n last = 1\n for counter in range(times):\n for index in range(pattern_size):\n last = (last&4)^mult(last&3,stoval(pattern[index]))\n if target==last:\n last = 1\n if target == j:\n target = 8\n if target == i:\n target = j\n\n if last == k and target == 8:\n res = \"YES\\n\"\n\n outline = \"Case #\" + str(ctr_cases) + \": \" + res\n outfile.write(outline)\n infile.close()\n outfile.close()\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_157/967.py","file_name":"967.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36377909998","text":"from collections import defaultdict\nN = int(input())\n\nfile_system = defaultdict(int)\n\nfor _ in range(N):\n filename, filetype = input().split('.')\n file_system[filetype] += 1\n\n\nfor name, type in sorted(file_system.items(), key=lambda x: x[0]):\n print(name, type)\n","repo_name":"studying-ice-bear/pparkkkimeom","sub_path":"GimYujin/String/20291_파일_정리.py","file_name":"20291_파일_정리.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30917355128","text":"import cv2\r\nimport numpy as np \r\nfrom datetime import datetime\r\n\r\nimg = cv2.imread('thAreasCross.png', cv2.IMREAD_GRAYSCALE)\r\nheight, width = img.shape\r\noutput = np.zeros((height,width), np.uint8)\r\n\r\nlineLength = 100\r\njumpSize = 1\r\n\r\ndef multiCompare(in1, in2):\r\n return in1[0] == in2[0] and in1[1] == in2[1]\r\n\r\ncontours, heirarchy = cv2.findContours(img,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n#cv2.drawContours(output, contours, -1, (255), -1)\r\n\r\nfor i in contours:\r\n pointIndex = 0\r\n \r\n \r\n lenI = len(i)\r\n eyes = [list(i[x][0]) for x in range(lenI)]\r\n start = datetime.now()\r\n for point in eyes:\r\n S = next(((point[0], y) for y in range(point[1] + lineLength, point[1], -1) if (point[0], y) in eyes), None)\r\n print(point[0] in eyes)\r\n # cv2.line(output, tuple(point), (point[0], point[0] + lineLength), (255), 1)\r\n # cv2.imshow('output',output)\r\n # cv2.waitKey(0)\r\n # if S != None:\r\n # cv2.line(output, tuple(point), S, (255), 1)\r\n # cv2.imshow('output',output)\r\n # cv2.waitKey(0)\r\n\r\n # while pointIndex < lenI:\r\n # # point = i[pointIndex][0]\r\n # # N = next(((point[0], y) for y in range(point[1] + lineLength, point[1] + 1, -1) if (point[0], y) in i), None)\r\n # # if N != None:\r\n # # cv2.line(img, tuple(point), tuple(N), (0), 1)\r\n\r\n # # E = next(((x, point[1]) for x in range(point[0] + lineLength, point[0] + 1, -1) if (x, point[1]) in i), None)\r\n # # if E != None:\r\n # # cv2.line(img, tuple(point), tuple(E), (0), 1)\r\n\r\n # # S = next(((point[0], y) for y in range(point[1] - 1, point[1] - lineLength, -1) if (point[0], y) in i), None)\r\n # # if S != None:\r\n # # cv2.line(img, tuple(point), tuple(S), (0), 1)\r\n \r\n # # W = next(((x, point[1]) for x in range(point[0] - 1, point[0] - lineLength, -1) if (x, point[1]) in i), None)\r\n # # if W != None:\r\n # # cv2.line(img, tuple(point), tuple(W), (0), 1)\r\n\r\n # point = i[pointIndex][0]\r\n # N = next(((point[0], y) for y in range(point[1] - lineLength, point[1] - 1, 1) if (point[0], y) in eyes), None)\r\n # #N = next(((point[0], y) for y in range(point[1] - 1, point[1] - lineLength, -1) if (point[1], y) in i), None)\r\n\r\n # print(point[1] - lineLength, point[1] - 1)\r\n # #print(point[1] - 1, point[1] - lineLength)\r\n # print(point, N)\r\n # if N != None:\r\n # cv2.line(output, tuple(point), tuple(N), (255), 1)\r\n # cv2.imshow('output',output)\r\n # cv2.waitKey(0)\r\n # # else:\r\n\r\n # # E = next(((x, point[1]) for x in range(point[0] + lineLength, point[0] + 1, -1) if (x, point[1]) in i), None)\r\n # # if E != None:\r\n # # cv2.line(output, tuple(point), tuple(E), (255), 1)\r\n # # else:\r\n # # S = next(((point[0], y) for y in range(point[1] + lineLength, point[1] + 1, -1) if (point[0], y) in i), None)\r\n # # if S != None:\r\n # # cv2.line(output, tuple(point), tuple(S), (255), 1)\r\n \r\n # # W = next(((x, point[1]) for x in range(point[0] - 1, point[0] - lineLength, -1) if (x, point[1]) in i), None)\r\n # # if W != None:\r\n # # cv2.line(output, tuple(point), tuple(W), (255), 1)\r\n \r\n # pointIndex += jumpSize\r\n print(datetime.now() - start)\r\n# (439, 161)\r\n'''[439 161]]\r\n\r\n [[439 165]]\r\n\r\n [[438 166]]\r\n\r\n [[438 167]]\r\n\r\n [[437 168]]'''\r\n\r\n\r\ncv2.imshow('output',output)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"JessedotTaylor/Machine_Vision","sub_path":"Development Files/attemptSlice.py","file_name":"attemptSlice.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40241691366","text":"# Write a Python function that accepts a string and calculate the number of\n# upper case letters and lower case letters.\n\ndef count_case(word):\n '''\n\n :param word: string\n :return: dictionary\n '''\n d = {\n 'lower_case': 0,\n 'upper_case': 0\n }\n for _ in word:\n if ord(_) in range(97, 123):\n d['lower_case'] += 1\n elif ord(_) in range(65, 91):\n d['upper_case'] += 1\n return d\n\nif __name__ == \"__main__\":\n print(count_case('The quick Brow Fox'))","repo_name":"Gaurav-Zaiswal/IW","sub_path":"python_assignment/Qf7.py","file_name":"Qf7.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22435309124","text":"import math\nimport numpy as np\nimport operator\nfrom paymentmethod import (\n Payment_Network, sum_future_payments_to_counterparty, MULTIPLIER_CHANNEL_BALANCE,\n DUMMY_PAYMENT_VALUE\n)\nfrom network import Network\n\n\n# LN fees from https://www.reddit.com/r/lightningnetwork/comments/tmn1kc/bmonthly_ln_fee_report/\n\nclass LN(Payment_Network):\n def __init__(\n self, nr_players, bitcoin_fee = 1000, bitcoin_delay = 3600, ln_fee = 0.00002,\n opening_transaction_size = 121.5, base_fee = 1000, coins_for_parties = \"max_value\"\n ):\n super().__init__(nr_players, bitcoin_fee, bitcoin_delay, coins_for_parties)\n self.method_name = \"ln\"\n self.ln_fee = ln_fee\n self.ln_delay = self.base_delay\n self.opening_transaction_size = opening_transaction_size\n self.network = Network(nr_players)\n self.base_fee = base_fee\n\n def get_payment_time(self, path):\n return self.ln_delay * (len(path)-1)\n\n def get_payment_fee(self, payment, num_hops):\n sender, receiver, value = payment\n return (self.base_fee + value * self.ln_fee) * (num_hops-1)\n\n def get_distances_and_paths_from_source(self, source, future_payments):\n \"\"\"\n Returns weighted distances to the future parties and to parties not occuring in future payments.\n Muiltiple payments to same party give multiple distances.\n \"\"\"\n distances = []\n # weight if we are endpoint\n weight_endpoint = 100\n # weight if we are possible intermediary\n weight_intermediary = 10\n # weight for other parties\n weight_other = 1\n encountered_parties = set({source})\n fee_intermediary = self.ln_fee * DUMMY_PAYMENT_VALUE + self.base_fee\n cheapest_paths_from_sender = self.network.find_cheapest_paths_from_sender(\n source, DUMMY_PAYMENT_VALUE, fee_intermediary\n )\n path_data = []\n for future_sender, future_receiver, value in future_payments:\n fee_intermediary = self.ln_fee * value + self.base_fee\n encountered_parties.add(future_sender)\n encountered_parties.add(future_receiver)\n if future_sender != source:\n path_data.append((\n weight_endpoint if future_receiver == source else weight_intermediary,\n self.network.find_cheapest_path(future_sender, source, value, fee_intermediary)\n ))\n if future_receiver != source:\n path_data.append((\n weight_endpoint if future_sender == source else weight_intermediary,\n cheapest_paths_from_sender.get(future_receiver)\n ))\n \n fee_intermediary = self.ln_fee * DUMMY_PAYMENT_VALUE + self.base_fee\n for party in (set(self.network.graph.nodes()).difference(encountered_parties)):\n path_data.append((\n weight_other,\n cheapest_paths_from_sender.get(party)\n ))\n \n for weight, cheapest_path in path_data:\n if cheapest_path is None:\n distances.append((weight, math.inf))\n else:\n distances.append((weight, len(cheapest_path)-1))\n\n return distances, cheapest_paths_from_sender\n\n def update_balances(self, value, ln_fee, base_fee, path, pay = False):\n # the pay argument tells whether this corresponds to making a payment\n # or undoing it.\n # all the descriptive names like \"op_take\", \"received\", etc are in the case of a payment\n # in case of undoing they do the opposite.\n op_take, op_give = (operator.add, operator.sub) if pay else (operator.sub, operator.add)\n num_intermediaries = len(path) - 2\n sender = path[0]\n receiver = path[-1]\n fee_intermediary = ln_fee * value + base_fee\n cost_sender = value + num_intermediaries * fee_intermediary\n if pay == True and cost_sender > self.network.graph[sender][path[1]]['balance']:\n raise ValueError\n # update the balances of the intermediaries.\n for i in range(1, num_intermediaries + 1):\n received = value + (num_intermediaries - (i-1)) * fee_intermediary\n transfered = received - fee_intermediary\n new_taker_balance = op_take(self.network.graph[path[i]][path[i-1]]['balance'], received)\n new_giver_balance = op_give(self.network.graph[path[i]][path[i+1]]['balance'], transfered)\n # we test just for new_giver_balance < 0 as in case of the payment only giver_balance gets smaller\n # In case of undoing it, there was a payment done before, so there shouldn't occur numbers < 0.\n if new_giver_balance < 0:\n for j in range(1, i):\n received = value + (num_intermediaries - (j-1)) * fee_intermediary\n transfered = received - fee_intermediary\n new_taker_balance = op_give(self.network.graph[path[j]][path[j-1]]['balance'], received)\n new_giver_balance = op_take(self.network.graph[path[j]][path[j+1]]['balance'], transfered)\n raise ValueError\n self.network.graph[path[i]][path[i-1]]['balance'] = new_taker_balance\n self.network.graph[path[i]][path[i+1]]['balance'] = new_giver_balance\n self.network.graph[sender][path[1]]['balance'] = op_give(self.network.graph[sender][path[1]]['balance'], cost_sender)\n self.network.graph[receiver][path[-2]]['balance'] = op_take(self.network.graph[receiver][path[-2]]['balance'], value)\n\n def get_new_channel_option(self, sender, receiver, value, knowledge_sender, counterparty):\n # case channel already exists.\n if self.network.graph.get_edge_data(sender, counterparty) is not None:\n return None\n future_payments, num_payments_sender, num_total_payments = knowledge_sender\n new_channel_time = self.plain_bitcoin.get_delay() + self.ln_delay\n new_channel_fee = self.plain_bitcoin.get_fee(self.opening_transaction_size)\n sum_future_payments = sum_future_payments_to_counterparty(sender, counterparty, future_payments)\n sender_coins = min(\n self.plain_bitcoin.coins[sender] - value - new_channel_fee,\n sum_future_payments + MULTIPLIER_CHANNEL_BALANCE * value\n )\n if sender_coins < 0:\n return None\n if counterparty != receiver:\n self.network.add_channel(sender, sender_coins, counterparty, 0)\n new_channel_offchain_option = self.get_offchain_option(\n sender, receiver, value, future_payments\n )\n new_channel_centrality = new_channel_offchain_option['centrality']\n new_channel_distance = new_channel_offchain_option['distance']\n new_channel_time = new_channel_time + new_channel_offchain_option['delay']\n new_channel_fee = new_channel_fee + new_channel_offchain_option['fee']\n else:\n self.network.add_channel(sender, sender_coins, counterparty, value)\n new_channel_offchain_option = None\n new_channel_distance, cheapest_paths_from_sender = (\n self.get_distances_and_paths_from_source(sender, future_payments)\n )\n new_channel_centrality = self.network.get_centrality(\n sender, cheapest_paths_from_sender\n )\n self.network.remove_channel(sender, counterparty)\n\n return {\n 'delay': new_channel_time,\n 'fee': new_channel_fee,\n 'centrality': new_channel_centrality,\n 'distance': new_channel_distance,\n 'payment_information': {\n 'kind': 'ln-open',\n 'data': (\n sender, receiver, value, counterparty,\n sender_coins, new_channel_offchain_option\n )\n }\n }\n\n def get_offchain_option(self, sender, receiver, value, knowledge_sender):\n fee_intermediary = fee_intermediary = self.ln_fee * value + self.base_fee\n offchain_cost_and_path = self.network.find_cheapest_path(sender, receiver, value, fee_intermediary)\n if offchain_cost_and_path is None:\n return None\n future_payments, num_payments_sender, num_total_payments = knowledge_sender\n offchain_hops, offchain_path = offchain_cost_and_path\n offchain_time = self.get_payment_time(offchain_path)\n payment = (sender, receiver, value)\n payment_information = {'kind': 'ln-pay', 'data': (offchain_path, value)}\n try:\n self.do(payment_information)\n except ValueError:\n return None\n offchain_fee = self.get_payment_fee(payment, offchain_hops)\n offchain_distance, cheapest_paths_from_sender = (\n self.get_distances_and_paths_from_source(sender, future_payments)\n )\n offchain_centrality = self.network.get_centrality(sender, cheapest_paths_from_sender)\n self.undo(payment_information)\n return {\n 'delay': offchain_time,\n 'fee': offchain_fee,\n 'centrality': offchain_centrality,\n 'distance': offchain_distance,\n 'payment_information': payment_information\n }\n\n def get_payment_options(self, sender, receiver, value, future_payments):\n onchain_option = self.get_onchain_option(sender, receiver, value, future_payments)\n counterparty = receiver\n new_channel_option = self.get_new_channel_option(\n sender, receiver, value, future_payments, counterparty\n )\n offchain_option = self.get_offchain_option(sender, receiver, value, future_payments)\n options = [onchain_option, new_channel_option, offchain_option]\n return [option for option in options if option is not None]\n\n def do(self, payment_information):\n if payment_information['kind'] == 'onchain':\n self.plain_bitcoin.pay(payment_information['data'])\n elif payment_information['kind'] == 'ln-open':\n (\n sender, receiver, value, counterparty, sender_coins,\n new_channel_offchain_option\n ) = payment_information['data']\n counterparty_coins = value if counterparty == receiver else 0\n self.network.add_channel(sender, sender_coins, counterparty, counterparty_coins)\n # next update the coins of sender\n amount_sender = - (\n sender_coins + counterparty_coins +\n self.plain_bitcoin.get_fee(self.opening_transaction_size)\n )\n self.plain_bitcoin.update_coins(sender, amount_sender)\n # use ln-pay here to make the off-chain payment after opening a new channel.\n if counterparty != receiver:\n self.do(new_channel_offchain_option['payment_information'])\n elif payment_information['kind'] == 'ln-pay':\n offchain_path, value = payment_information['data']\n self.update_balances(value, self.ln_fee, self.base_fee, offchain_path, pay = True)\n else:\n raise ValueError\n\n def undo(self, payment_information):\n if payment_information['kind'] == 'ln-pay':\n offchain_path, value = payment_information['data']\n self.update_balances(value, self.ln_fee, self.base_fee, offchain_path, pay = False)\n else:\n raise ValueError\n\n def equal_channels(self, other):\n if self.network.graph.nodes() != other.network.graph.nodes():\n return False\n # assuming that other network is also built as expected,\n # i.e in dict exists 'balance' key.\n for channel in self.network.graph.edges.data(\"balance\"):\n sender, receiver, balance = channel\n if other.network.graph.get_edge_data(sender, receiver) == None:\n return False\n elif not np.isclose(\n balance,\n other.network.graph[sender][receiver]['balance']\n ):\n return False\n\n for channel in other.network.graph.edges.data(\"balance\"):\n sender, receiver, balance = channel\n if self.network.graph.get_edge_data(sender, receiver) == None:\n return False\n elif not np.isclose(\n balance,\n self.network.graph[sender][receiver]['balance']\n ):\n return False\n\n return True\n\n def __eq__(self, other):\n return (\n self.ln_fee == other.ln_fee and\n self.ln_delay == other.ln_delay and\n self.opening_transaction_size == other.opening_transaction_size and\n self.equal_channels(other) and\n self.base_fee == other.base_fee and\n self.plain_bitcoin == other.plain_bitcoin\n )\n","repo_name":"OrfeasLitos/virtual-payment-channels","sub_path":"simulation/ln.py","file_name":"ln.py","file_ext":"py","file_size_in_byte":12864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23558879081","text":"def main():\n T = int(input())\n outputs = []\n for t in range(1, T+1):\n n = input()\n yes = False\n for a, b in zip(n, n[1:]):\n if a > b:\n yes = True\n if yes:\n end = 0\n for i in range(len(n)-1):\n if n[i] < n[i+1]:\n end = i+1\n elif n[i] > n[i+1]:\n break\n trail = len(n) - end - 1\n out = int(n[:end] + str(int(n[end])-1) + \"9\"*(trail) if trail else n)\n else:\n out = n\n\n outputs.append(\"Case #%d: %s\" % (t, out))\n print(\"\\n\".join(outputs))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/3534.py","file_name":"3534.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10080741559","text":"import time\n\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\n\ndefault_timeout = 10\ndefault_poll = 0.5\n\n\ndef until(method, message, timeout=default_timeout):\n \"\"\"Calls the method until the return value is not False.\"\"\"\n end_time = time.time() + timeout\n while True:\n value = method()\n if value: return\n time.sleep(default_poll)\n if time.time() > end_time: break\n raise TimeoutException(message)\n\n\ndef until_not(method, message, timeout=default_timeout):\n \"\"\"Calls the method until the return value is False.\"\"\"\n end_time = time.time() + timeout\n while True:\n value = method()\n if not value: return\n time.sleep(default_poll)\n if time.time() > end_time: break\n raise TimeoutException(message)\n\n\ndef until_ec(browser: WebDriver, ec: EC, message: str, timeout=default_timeout, poll=default_poll):\n WebDriverWait(browser, timeout=timeout, poll_frequency=poll).until(ec, message=message)\n\n\ndef until_not_ec(browser: WebDriver, ec: EC, message: str, timeout=default_timeout):\n WebDriverWait(browser, timeout=timeout, poll_frequency=default_poll).until_not(ec, message=message)\n","repo_name":"borisoff1994/IBS_aqa_python_test_task","sub_path":"web/utils/wait_utils.py","file_name":"wait_utils.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44272591811","text":"# -*- coding: utf-8 -*-\n# Reza (User:reza1615)\n# Distributed under the terms of the CC-BY-SA 3.0 .\n# -*- coding: utf-8 -*-\nimport codecs\ndict={\nu\"تام\":u\"الاختیار\",\n}\nvari=u'''\n\n \n <%s>%s\n <%s>%s\n \nآیا منظور شما \\1‌\\2است؟\n %s\n %s\n %s\n'''\ncount=0\ntext=u'\\n'\nfor i in dict:\n count+=1\n if u'|' in i:\n item1=u'('+i+u')'\n case1=\"token regexp='yes'\"\n else:\n item1=i\n case1=\"token\"\n\n if u'|' in dict[i]:\n item2=u'('+dict[i]+u')'\n case2=\"token regexp='yes'\"\n else:\n item2=dict[i]\n case2=\"token\"\n\n our_vari=vari %(case1,item1,case2,item2,u'اصلاح فاصلهٔ مجازی میان کلمهٔ مرکب',item1.split(u'|')[0].replace(u'(',u'').replace(u')',u'').strip()+u' '+item2.split(u'|')[0].replace(u'(',u'').replace(u')',u'').strip(),item1.split(u'|')[0].replace(u'(',u'').replace(u')',u'').strip()+u'‌'+item2.split(u'|')[0].replace(u'(',u'').replace(u')',u'').strip())\n text+=our_vari\nwith codecs.open( u'my_patch.txt',mode = 'w',encoding = 'utf8' ) as f:\n f.write( text )\n","repo_name":"reza1615/Persian-Spell-checker","sub_path":"LT/rule_maker.py","file_name":"rule_maker.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"fa","doc_type":"code","stars":79,"dataset":"github-code","pt":"61"} +{"seq_id":"37244025438","text":"# Uses python3\ndef edit_distance(s, t):\n score = [[0 for j in range(len(t)+1)] for i in range(len(s)+1)]\n # intialize first row and column\n for i in range(len(s) + 1):\n score[i][0] = i\n for j in range(len(t) + 1):\n score[0][j] = j\n\n for j in range(1, len(t)+1):\n for i in range(1, len(s)+1):\n insert = score[i][j-1] + 1\n delete = score[i-1][j] + 1\n match = score[i-1][j-1]\n mismatch = score[i-1][j-1] + 1\n if s[i-1] == t[j-1]:\n score[i][j] = min(insert, delete, match)\n else:\n score[i][j] = min(insert, delete, mismatch)\n return score[len(s)][len(t)]\n\nif __name__ == \"__main__\":\n print(edit_distance(input(), input()))\n","repo_name":"SherMM/coursera-ucsd-algorithms-datastructures","sub_path":"AlgorithmicToolBox/Week5/edit_distance/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21600709661","text":"# -------------------------------------------------------------------------------------\n# AutoLoc: Weakly-supervised Temporal Action Localization in Untrimmed Videos. ECCV'18.\n# Authors: Zheng Shou, Hang Gao, Lei Zhang, Kazuyuki Miyazawa, Shih-Fu Chang.\n# -------------------------------------------------------------------------------------\n\n\"\"\"\nScript entry for training & testing on TH14/AN dataset in parallel mode.\nUsage:\n 1. $ mkdir exp//\n 2. Setup configuration in `config.yml`\n 3. Setup proto template in `solver.tpl` and `train.tpl`\n 4. $ python tools/train_prll_net.py --dataset \\\n --expname --num_workers \n\"\"\"\n\nfrom _init import workenv, setup\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport caffe\nimport logging\nimport argparse\n\nimport numpy as np\nimport os.path as osp\n\nfrom multiprocessing import Pool, cpu_count\n\nfrom config import cfg, cfg_from_file\nfrom utils.ops import fetch_videoids, prll_lock, may_change_infix\nfrom proc_net import train, test, fetch_rslt, dump_rslt #noqa\n\n\ndef proc_new_video(phase_key, videoid, logger=None):\n prll_lock.acquire()\n\n caffe.set_device(cfg.GPU_ID)\n caffe.set_mode_gpu()\n\n # Change <_data_file_> in memory\n lst = cfg[phase_key].DATA_PATH.split('.')\n lst[-2] = videoid\n cfg[phase_key].DATA_FILE = '.'.join(lst)\n\n cfg[phase_key].DATA_PATH = osp.join(cfg.DATA_DIR, cfg.DATASET,\n cfg[phase_key].DATA_FILE)\n\n # Would only modify and \n cfg.INFIX = '.{}'.format(videoid)\n\n # Change path to save model in training phase by changing infix.\n # Example: snapshot/rslt_trainnms0.7_by0000981_iter1.caffemodel\n if phase_key == 'TRAIN':\n cfg.SNAPSHOT_PATH = \\\n may_change_infix(cfg.SNAPSHOT_PATH, '_', 'by', videoid)\n\n runner = eval(phase_key.lower())\n net = runner(logger)\n\n return net\n\n\ndef rslt_wrapper(phase_key, videoid, logger=None):\n '''Wrapper for net object, since multiprocessing.apply_async would\n fail silently if job returns unserializable.'''\n net = proc_new_video(phase_key, videoid, logger)\n rslt = fetch_rslt(phase_key, net)\n\n return rslt\n\n\ndef proc_prll(phase_key, num_workers):\n pred_rslts = []\n def _append_rslt(rslt):\n pred_rslts.append(rslt)\n\n num_cpu = cpu_count()\n pool = Pool(min(num_cpu, num_workers))\n\n jobs = []\n videoids = fetch_videoids(phase_key)\n\n for videoid in videoids:\n # net = rslt_wrapper(phase_key, videoid,\n # logging.getLogger(str(videoid)))\n # _append_rslt(net)\n jobs.append(\n pool.apply_async(\n rslt_wrapper,\n args=(phase_key, videoid, logging.getLogger(str(videoid))),\n callback=_append_rslt\n )\n )\n\n pool.close()\n pool.join()\n\n return pred_rslts\n\n\nif __name__ ==\"__main__\":\n with workenv():\n old_settings = np.seterr(all='raise', under='ignore')\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--phase', type=str, required=True,\n choices=['train', 'test'])\n parser.add_argument('--dataset', type=str, required=True,\n choices=['TH14', 'AN'])\n parser.add_argument('--expname', type=str, required=True)\n parser.add_argument('--rsltname', type=str, default='rslt')\n # This argument would only be used when testing.\n parser.add_argument('--pretrained', type=str,\n default='default.caffemodel')\n parser.add_argument('--num_workers', type=int, default=16)\n\n args = parser.parse_args()\n\n config_path = osp.join(cfg.EXP_DIR, args.dataset,\n args.expname, 'config.yml')\n\n # Init\n cfg_from_file(config_path)\n args.phase = args.phase.upper()\n setup(args.phase, args.dataset, args.expname, args.rsltname)\n\n # Check whether specified when testing:\n # a work-around for conditional required argument.\n if args.phase == 'TEST':\n assert args.pretrained != 'default.caffemodel', \\\n 'Speicify pretrained model when testing.'\n cfg.SNAPSHOT_PATH = osp.join(cfg.LOCAL_SNAPSHOT_PATH,\n args.pretrained)\n\n caffe.init_glog(osp.join(cfg.LOG_PATH, '{}{}.'.format(args.phase,\n cfg.INFIX)))\n\n pred_rslts = proc_prll(args.phase, args.num_workers)\n dump_rslt(args.phase, pred_rslts)\n","repo_name":"zhengshou/AutoLoc","sub_path":"tools/proc_prll_net.py","file_name":"proc_prll_net.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"61"} +{"seq_id":"36044635004","text":"__metaclass__ = type\n\n__all__ = [\n 'launchpadlib_credentials_for',\n 'launchpadlib_for',\n 'oauth_access_token_for',\n ]\n\n\nimport shutil\nimport tempfile\n\nfrom launchpadlib.credentials import (\n AccessToken,\n AnonymousAccessToken,\n Credentials,\n )\nfrom launchpadlib.launchpad import Launchpad\nimport transaction\nfrom zope.app.testing import ztapi\nfrom zope.component import getUtility\nfrom zope.publisher.interfaces import IEndRequestEvent\nimport zope.testing.cleanup\n\nfrom lp.registry.interfaces.person import IPersonSet\nfrom lp.services.oauth.interfaces import IOAuthConsumerSet\nfrom lp.services.webapp.adapter import get_request_statements\nfrom lp.services.webapp.interaction import ANONYMOUS\nfrom lp.services.webapp.interfaces import OAuthPermission\nfrom lp.services.webapp.publisher import canonical_url\nfrom lp.testing._login import (\n login,\n logout,\n )\n\n\ndef api_url(obj):\n \"\"\"Find the web service URL of a data model object.\n\n This makes it easy to load up the factory object you just created\n in launchpadlib.\n\n :param: Which web service version to use.\n\n :return: A relative URL suitable for passing into Launchpad.load().\n \"\"\"\n return canonical_url(obj, force_local_path=True)\n\n\ndef oauth_access_token_for(consumer_name, person, permission, context=None):\n \"\"\"Find or create an OAuth access token for the given person.\n :param consumer_name: An OAuth consumer name.\n :param person: A person (or the name of a person) for whom to create\n or find credentials.\n :param permission: An OAuthPermission (or its token) designating\n the level of permission the credentials should have.\n :param context: The OAuth context for the credentials (or a string\n designating same).\n\n :return: An OAuthAccessToken object.\n \"\"\"\n if isinstance(person, basestring):\n # Look up a person by name.\n person = getUtility(IPersonSet).getByName(person)\n if isinstance(context, basestring):\n # Turn an OAuth context string into the corresponding object.\n # Avoid an import loop by importing from launchpad.browser here.\n from lp.services.oauth.browser import lookup_oauth_context\n context = lookup_oauth_context(context)\n if isinstance(permission, basestring):\n # Look up a permission by its token string.\n permission = OAuthPermission.items[permission]\n\n # Find or create the consumer object.\n consumer_set = getUtility(IOAuthConsumerSet)\n consumer = consumer_set.getByKey(consumer_name)\n if consumer is None:\n consumer = consumer_set.new(consumer_name)\n else:\n # We didn't have to create the consumer. Maybe this user\n # already has an access token for this\n # consumer+person+permission?\n existing_token = [token for token in person.oauth_access_tokens\n if (token.consumer == consumer\n and token.permission == permission\n and token.context == context)]\n if len(existing_token) >= 1:\n return existing_token[0]\n\n # There is no existing access token for this\n # consumer+person+permission+context. Create one and review it.\n request_token = consumer.newRequestToken()\n request_token.review(person, permission, context)\n access_token = request_token.createAccessToken()\n return access_token\n\n\ndef launchpadlib_credentials_for(\n consumer_name, person, permission=OAuthPermission.WRITE_PRIVATE,\n context=None):\n \"\"\"Create launchpadlib credentials for the given person.\n\n :param consumer_name: An OAuth consumer name.\n :param person: A person (or the name of a person) for whom to create\n or find credentials.\n :param permission: An OAuthPermission (or its token) designating\n the level of permission the credentials should have.\n :param context: The OAuth context for the credentials.\n :return: A launchpadlib Credentials object.\n \"\"\"\n # Start an interaction so that oauth_access_token_for will\n # succeed. oauth_access_token_for may be called in any layer, but\n # launchpadlib_credentials_for is only called in the\n # PageTestLayer, when a Launchpad instance is running for\n # launchpadlib to use.\n login(ANONYMOUS)\n access_token = oauth_access_token_for(\n consumer_name, person, permission, context)\n logout()\n launchpadlib_token = AccessToken(\n access_token.key, access_token.secret)\n return Credentials(consumer_name=consumer_name,\n access_token=launchpadlib_token)\n\n\ndef _clean_up_cache(cache):\n \"\"\"Clean up a temporary launchpadlib cache directory.\"\"\"\n shutil.rmtree(cache, ignore_errors=True)\n\n\ndef launchpadlib_for(\n consumer_name, person=None, permission=OAuthPermission.WRITE_PRIVATE,\n context=None, version=\"devel\", service_root=\"http://api.launchpad.dev/\"):\n \"\"\"Create a Launchpad object for the given person.\n\n :param consumer_name: An OAuth consumer name.\n :param person: A person (or the name of a person) for whom to create\n or find credentials.\n :param permission: An OAuthPermission (or its token) designating\n the level of permission the credentials should have.\n :param context: The OAuth context for the credentials.\n :param version: The version of the web service to access.\n :param service_root: The root URL of the web service to access.\n\n :return: A launchpadlib Launchpad object.\n \"\"\"\n if person is None:\n token = AnonymousAccessToken()\n credentials = Credentials(consumer_name, access_token=token)\n else:\n credentials = launchpadlib_credentials_for(\n consumer_name, person, permission, context)\n transaction.commit()\n cache = tempfile.mkdtemp(prefix='launchpadlib-cache-')\n zope.testing.cleanup.addCleanUp(_clean_up_cache, (cache,))\n return Launchpad(credentials, None, None, service_root=service_root,\n version=version, cache=cache)\n\n\nclass QueryCollector:\n \"\"\"Collect database calls made in web requests.\n\n These are only retrievable at the end of a request, and for tests it is\n useful to be able to make assertions about the calls made during a\n request: this class provides a tool to gather them in a simple fashion.\n\n :ivar count: The count of db queries the last web request made.\n :ivar queries: The list of queries made. See\n lp.services.webapp.adapter.get_request_statements for more\n information.\n \"\"\"\n\n def __init__(self):\n self._active = False\n self.count = None\n self.queries = None\n\n def register(self):\n \"\"\"Start counting queries.\n\n Be sure to call unregister when finished with the collector.\n\n After each web request the count and queries attributes are updated.\n \"\"\"\n ztapi.subscribe((IEndRequestEvent, ), None, self)\n self._active = True\n\n def __enter__(self):\n self.register()\n return self\n\n def __call__(self, event):\n if self._active:\n self.queries = get_request_statements()\n self.count = len(self.queries)\n\n def unregister(self):\n self._active = False\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.unregister()\n","repo_name":"abramhindle/UnnaturalCodeFork","sub_path":"python/testdata/launchpad/lib/lp/testing/_webservice.py","file_name":"_webservice.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40142416982","text":"import os\nimport random\n\nprint(\"-- hangman startup --\")\n\n#staging word\nword = \"octopus\"\n\n#setup\nincorrect_guess_count = 0\nguessed_word = \"\"\ncorrectly_guessed_letters = \"\"\nincorrectly_guessed_letters = \"\"\n\nWORD_LIST = os.path.join(\"hangman/Scrabble-master/scrabble/sowpods.txt\")\nwordlist = open(WORD_LIST).readlines()\n# Get rid of newlines\nwordlist = [word.lower().strip() for word in wordlist]\n\nhangman_ascii = ['''\n +---+\n | |\n |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\n\n# FUNCTIONS\n\n# startup game w/ instructions\ndef printIntro():\n print(\"\\nThis is a game of hangman. Guess which letters are used to makeup hidden word. Make six wrong guesses and the game is over\")\n\n\n# get a new word\ndef getNewWord():\n global word\n global guessed_word\n word = random.choice(wordlist)\n \n for c in word:\n guessed_word = guessed_word + \"_\"\n\n # print(\"DEBUG The new word is \" + word)\n\n\n# feedback after a guess\ndef displayGameState():\n # print the hangman\n print(hangman_ascii[incorrect_guess_count])\n\n # if we've gone over the allowable guesses do not print game state\n if incorrect_guess_count < 6:\n print(\"\\nWORD: \", end = '')\n for g in guessed_word:\n print(g + \" \", end = '')\n\n print(\"\")\n print(\"Correctly guessed letters: \" + correctly_guessed_letters)\n print(\"Incorrectly guessed letters: \" + incorrectly_guessed_letters)\n # print(\"DEBUG incorrect guesses: \" + str(incorrect_guess_count) + \"\\n\")\n\n\n# check an inputted guess\ndef checkGuess(guess):\n global correctly_guessed_letters\n global guessed_word\n global incorrectly_guessed_letters\n global incorrect_guess_count\n \n if guess in word:\n # print(\"A CORRECT LETTER HAS BEEN FOUND\")\n positions = [pos for pos, char in enumerate(word) if char == guess]\n # print(positions)\n correctly_guessed_letters += guess + \" \"\n \n strlist = list(guessed_word)\n for p in positions:\n strlist[p] = guess\n \n guessed_word = ''.join(strlist)\n else:\n incorrect_guess_count += 1\n incorrectly_guessed_letters = incorrectly_guessed_letters + \" \" + guess\n\ndef mainLoop():\n global incorrect_guess_count\n \n # keep guessing until maxGuesses\n if incorrect_guess_count < 6:\n if guessed_word != word:\n \n input_text = input (\"Guess a letter: \")\n\n checkGuess(input_text)\n displayGameState()\n\n mainLoop()\n else:\n print(\"\\nYOU WON! Feast on the bones of your enemy. \\n\")\n else:\n print(\"\\nThe word was \" + word + \", IDOIT! \\n\")\n\n\n\n# setup\nprintIntro()\n\n# set first word\ngetNewWord()\n\n# display initial game state\ndisplayGameState()\n\n# main loop\nmainLoop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"damonseeley/PythonPit","sub_path":"hangman/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28238126011","text":"# Leia idade e sexo, mostre quantos homens, quantas pessoas > 18 e quantas mulheres < 20\n\nhomem = maior = menor = 0\nwhile True:\n print('=-' * 50)\n print('CADASTRO DE PESSOAS')\n print('=-' * 50)\n idade = int(input('Idade: '))\n sexo = ' '\n while sexo not in 'MF':\n sexo = input('Sexo [M/F]: ').strip().upper()[0]\n if idade > 18:\n maior += 1\n if sexo == 'M':\n homem += 1\n if sexo == 'F' and idade < 20:\n menor += 1\n continuar = ' '\n while continuar not in 'SN':\n continuar = input('Você deseja continuar? [S/N]: ').strip().upper()[0]\n if continuar == 'N':\n break\nprint(f'Maior de 18 {maior}\\nHomens {homem}\\nMulheres menores de 20{menor}')\n","repo_name":"bonilha-rogante/Python","sub_path":"python_curso_em_video/desafio069.py","file_name":"desafio069.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25780157346","text":"# -*- coding: utf-8 -*\n\"\"\"\n:py:class:`ScalarFieldReader`\n\n\"\"\"\nimport numpy as np\n\nfrom senta.common.register import RegisterSet\nfrom senta.common.rule import DataShape, FieldLength, InstanceName\nfrom senta.data.field_reader.base_field_reader import BaseFieldReader\nfrom senta.data.tokenizer.custom_tokenizer import CustomTokenizer\n\n\n@RegisterSet.field_reader.register\nclass ScalarFieldReader(BaseFieldReader):\n \"\"\"单个标量的field_reader,直接返回数据本身(数据可以是单个数字,也可以是单个的明文字符,\n 明文通过json文件中配置的vocab_path去进行转换),shape= [batch_size,1]\n \"\"\"\n def __init__(self, field_config):\n \"\"\"\n :param field_config:\n \"\"\"\n # 换成2.7的语法试试\n BaseFieldReader.__init__(self, field_config=field_config)\n self.paddle_version_code = 1.6\n if field_config.vocab_path and field_config.need_convert:\n self.tokenizer = CustomTokenizer(vocab_file=self.field_config.vocab_path)\n\n def init_reader(self):\n \"\"\" 初始化reader格式\n :return: reader的shape[]、type[]、level[]\n \"\"\"\n shape = [[-1, 1]]\n types = []\n levels = [0]\n if self.field_config.data_type == DataShape.INT:\n types.append('int64')\n elif self.field_config.data_type == DataShape.FLOAT:\n types.append('float32')\n else:\n raise TypeError(\"ScalarFieldReader's data_type must be int or float\")\n\n return shape, types, levels\n\n def convert_texts_to_ids(self, batch_text):\n \"\"\"将一个batch的明文text转成id\n :param batch_text:\n :return:\n \"\"\"\n src_ids = []\n for text in batch_text:\n src_id = text.split(\" \")\n ## 因为是单个标量数据,所以直接取第0个就行\n if self.tokenizer and self.field_config.need_convert:\n scalar = self.tokenizer.covert_token_to_id(src_id[0])\n else:\n scalar = src_id[0]\n src_ids.append(scalar)\n\n return_list = []\n if self.field_config.data_type == DataShape.FLOAT:\n return_list.append(np.array(src_ids).astype(\"float32\").reshape([-1, 1]))\n\n elif self.field_config.data_type == DataShape.INT:\n return_list.append(np.array(src_ids).astype(\"int64\").reshape([-1, 1]))\n\n return return_list\n\n def structure_fields_dict(self, fields_id, start_index, need_emb=True):\n \"\"\"静态图调用的方法,生成一个dict, dict有两个key:id , emb. id对应的是pyreader读出来的各个field产出的id,emb对应的是各个\n field对应的embedding\n :param fields_id: pyreader输出的完整的id序列\n :param start_index:当前需要处理的field在field_id_list中的起始位置\n :param need_emb:是否需要embedding(预测过程中是不需要embedding的)\n :return:\n \"\"\"\n record_id_dict = {}\n record_id_dict[InstanceName.SRC_IDS] = fields_id[start_index]\n record_dict = {}\n record_dict[InstanceName.RECORD_ID] = record_id_dict\n record_dict[InstanceName.RECORD_EMB] = None\n\n return record_dict\n\n def get_field_length(self):\n \"\"\"获取当前这个field在进行了序列化之后,在field_id_list中占多少长度\n :return:\n \"\"\"\n return FieldLength.SINGLE_SCALAR_FIELD\n","repo_name":"baidu/Senta","sub_path":"senta/data/field_reader/scalar_field_reader.py","file_name":"scalar_field_reader.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":1780,"dataset":"github-code","pt":"61"} +{"seq_id":"11282367374","text":"import discord\nfrom discord.ext import commands\nimport re\n\nfrom core import config\nfrom apps.convert import ConversionConn\nclass ConvertEvents(commands.Cog, name='conversion_events'):\n\n def __init__(self, bot):\n self.bot = bot\n self.keys = [val[0] for val in ConversionConn().keys()]\n \n @commands.Cog.listener()\n async def on_message(self, message):\n if message.author != self.bot.user:\n values = self.values_to_convert(message)\n if values:\n for value in values:\n conversions = self.convert_to_cousin(value)\n await message.channel.send(conversions)\n\n def values_to_convert(self, message):\n if any(val in message.content for val in self.keys):\n message_list = message.content.split()\n dataset = re.findall(r'[0-9]+', message.content)\n extracted = list(zip(\n [val for val in message_list for data in dataset if data in val], \n dataset))\n values = []\n for val in extracted:\n m_index = message_list.index(val[0])\n if len(val[1]) == len(message_list[m_index]):\n values.append((val[1], message_list[m_index+1]))\n\n if len(val[1]) < len(message_list[m_index]):\n values.append(tuple(re.findall('(\\d+)(\\w+)', message_list[m_index])[0]))\n return values\n\n def convert_to_cousin(self, value):\n convert = ConversionConn().conversion(value)\n new_val = convert[0]\n if 'x' in new_val:\n new_val = round(eval(new_val.replace('x', value[0])), 2)\n else:\n new_val = round(float(new_val) * float(value[0]), 2)\n return f'{value[0]}{value[1]} = {new_val}{convert[1]}'\n","repo_name":"Poseidon-Dev/janus","sub_path":"apps/convert/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23379956421","text":"'''\nCreated on Apr 1, 2013\n\n@author: pawel\n'''\nimport sys\n \ndef read_case_info(file):\n data = {}\n data['board'] = {}\n data['amount_of_empty'] = 0\n for i in range(4):\n raw_line = file.readline().strip('\\n')\n for j in range(4):\n data['board'][(i,j)] = raw_line[j]\n if raw_line[j] == '.':\n data['amount_of_empty'] += 1\n file.readline()\n return data\n\ndef check(symbol, board, fields):\n for field in fields:\n if board[field] not in [symbol, 'T']:\n return False\n return True\n\ndef check_rows(symbol, board):\n for i in range(4):\n if check(symbol, board, [(i, j) for j in range(4)]):\n return True\n return False\ndef check_cols(symbol, board):\n for i in range(4):\n if check(symbol, board, [(j, i) for j in range(4)]):\n return True\n return False\ndef check_diagonals(symbol, board):\n if check(symbol, board, [(i, i) for i in range(4)]):\n return True\n \n if check(symbol, board, [(3 - i, i) for i in (range(4))]):\n return True\n return False\n\ndef has_won(symbol, board):\n if check_rows(symbol, board):\n return True\n if check_cols(symbol, board):\n return True\n if check_diagonals(symbol, board):\n return True\n return False\n\ndef solve_case(data):\n if has_won('X', data['board']):\n return 'X won'\n elif has_won('O', data['board']):\n return 'O won'\n elif data['amount_of_empty'] > 0:\n return 'Game has not completed'\n else:\n return 'Draw'\n\nfile = open(sys.argv[1], 'r')\nnumber_of_cases = int(file.readline().strip())\ncounter = 0\nresults = []\nwhile number_of_cases > counter:\n case_info = read_case_info(file)\n results.append(solve_case(case_info))\n counter += 1\n \nfile_output = open(sys.argv[2], 'w')\nfor index, result in enumerate(results):\n file_output.write('Case #' + str(index + 1) + ': ' + result + '\\n') \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1675.py","file_name":"1675.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13517192474","text":"\"\"\"\nhttps://openpyxl.readthedocs.io/en/stable/\npip install openpyxl\npipenv install openpyxl\n\"\"\"\nimport openpyxl\nfrom random import uniform\n\n\"\"\"\npedidos = openpyxl.load_workbook('pedidos.xlsx') # coleção de abas.\nnome_planilhas = pedidos.sheetnames\nplanilha1 = pedidos['Página1']\n\nprint(planilha1['b4'].value) # acessando o valor diretamente\n\nfor campo in planilha1['b']: # obtendo os valores da coluna\n print(campo.value)\n\nfor linha in planilha1[a1:c2]: # iterando em um range por linhas\n for coluna in linha:\n print(coluna.value) \n\nfor linha in planilha1: # iterando diretamente na planilha(nao fica muito bom)\n for coluna in linha:\n print(coluna.value) \n\nplanilha1[b3].value = 2200 # alterando dados no objeto que criamos\npedidos.save(\"nova_planilha.xlsx\") # criando uma planilha com esse objeto\n\nfor linha in range(5, 16): # alterando valores de um range de linha usando \"cell\".\n numero_pedido = linha - 1\n planilha1.cell(linha, 1).value = numero_pedido # o segundo argumento é o numero da coluna\n planilha1.cell(linha, 2).value = 1200 + linha\n\n preco = round(uniform(10, 100), 2)\n planilha1.cell(linha, 3).value = preco\n\npedidos.save('nova_planilha.xlsx')\n\"\"\"\n\nplanilha = openpyxl.Workbook() # cria um workbook\nplanilha.create_sheet('Planilha1', 0) # cria uma planilha na aba 0\nplanilha.create_sheet('Planilha2', 1)\n\nplanilha1 = planilha['Planilha1']\nplanilha2 = planilha['Planilha2']\n\nfor linha in range(1, 11): # adicionando os dados\n numero_pedido = linha - 1\n planilha1.cell(linha, 1).value = numero_pedido\n planilha1.cell(linha, 2).value = 1200 + linha\n\n preco = round(uniform(10, 100), 2)\n planilha1.cell(linha, 3).value = preco\n\nfor linha in range(1, 11):\n planilha2.cell(linha, 1).value = f'Luiz {linha} {round(uniform(10, 100), 2)}' # cria nº aliatorio\n planilha2.cell(linha, 2).value = f'Otávio {linha} {round(uniform(10, 100), 2)}'\n planilha2.cell(linha, 3).value = f'Joãozinho {linha} {round(uniform(10, 100), 2)}'\n\nplanilha.save('nova_planilha.xlsx')\n\n","repo_name":"pedromadureira000/python","sub_path":"8-modulos/planilhas/excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40412697359","text":"from django.http import JsonResponse\nfrom rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom .models import Post\nfrom users.models import User\nimport json\nimport uuid\nfrom django.core.cache import cache\n\nCACHE_TTL = 3600 # Cache TTL in seconds\n\n\ndef get_unique_int():\n max_limit = 2147483646\n unique_uuid = uuid.uuid4()\n unique_int = int(unique_uuid.int) % max_limit\n return unique_int\n\n\nclass PostView(APIView):\n\n @staticmethod\n def get(request, post_id):\n try:\n cache_key = f'post:{post_id}'\n cached_data = cache.get(cache_key)\n if cached_data is not None:\n return JsonResponse({\n 'user_id': cached_data['user_id'],\n 'post_id': cached_data['post_id'],\n 'content': cached_data['content'],\n }, status=200)\n post = get_object_or_404(Post, post_id=post_id)\n return JsonResponse(\n {\"post_id\": post.post_id, \"user_id\": post.user_id, \"content\": post.content,\n \"post_date\": post.post_date},\n status=200\n )\n\n cache_key = f'post:{user_id}'\n post_data_json = {\n 'user_id': post.user_id,\n 'post_id': post.post_id,\n 'content': post.content,\n 'post_date': post.post_date\n }\n cache.set(cache_key, post_data_json, CACHE_TTL)\n\n except Post.DoesNotExist:\n return JsonResponse({'error': 'post not found'}, status=404)\n\n @staticmethod\n def put(request, post_id):\n try:\n post = get_object_or_404(Post, post_id=post_id)\n body = json.loads(request.body)\n content = body.get(\"content\") or post.content\n\n Post.objects.filter(post_id=post_id).update(content=content)\n cache_key = f'post:{post_id}'\n cache.delete(cache_key)\n post_data_json = {\n 'user_id': post.user_id,\n 'post_id': post.post_id,\n 'content': content,\n 'post_date': post.post_date\n }\n cache.set(cache_key, post_data_json, CACHE_TTL)\n\n return JsonResponse({\"post_id\": post_id, \"content\": content}, status=200)\n except Post.DoesNotExist:\n return JsonResponse({'error': 'post not found'}, status=404)\n\n @staticmethod\n def delete(request, post_id):\n try:\n post = get_object_or_404(Post, post_id=post_id)\n cache_key = f'post:{post_id}'\n cache.delete(cache_key)\n post.delete()\n return JsonResponse({\"message\": \"Post deleted.\"}, status=200)\n except Post.DoesNotExist:\n return JsonResponse({'error': 'Post not found'}, status=404)\n\n\nclass PostsView(APIView):\n\n @staticmethod\n def post(request):\n body = json.loads(request.body)\n user_id = body.get(\"user_id\")\n content = body.get(\"content\")\n post_id = get_unique_int()\n\n user = User.objects.get(user_id=user_id)\n if user is None:\n return JsonResponse({\"error\": \"User is not authorize to add post.\"}, status=401)\n\n if not user_id or not content:\n return JsonResponse({\"error\": \"User ID and content are required.\"}, status=400)\n\n cache_key = f'post:{post_id}'\n post_data_json = {\n 'user_id': user_id,\n 'post_id': post_id,\n 'content': content\n }\n cache.set(cache_key, post_data_json, CACHE_TTL)\n\n post = Post.objects.create(user_id=user_id, post_id=post_id, content=content)\n return JsonResponse({\"post_id\": post.post_id}, status=200)\n\n @staticmethod\n def get(request):\n posts = Post.objects.all()\n data = [{\"post_id\": post.post_id, \"user_id\": post.user_id, \"content\": post.content} for post in posts]\n return JsonResponse(data, safe=False)\n\n","repo_name":"manishkhandar/microblogging-platform","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18806867593","text":"import math\nimport time\n\nimport requests\nimport tushare as ts\nimport pandas as pd\nfrom pandas import DataFrame\nfrom stockstats import StockDataFrame\n\n# 设置tushare的token\nts.set_token(\"8e3c6429eaa36d0e2b6346489f769d0d459d1f03588c3cc77fd79f58\")\n# tushare主方法\npro = ts.pro_api()\n\ncsv_encoding = 'GBK'\nfilepath: str = \"../data/\"\nstock_file_path: str = filepath + \"stock/{}.csv\"\nstock_factor_file_path: str = filepath + \"factor/{}.csv\"\nstock_list_path: str = filepath + \"stocklist.csv\"\n# 网易地址获取股票历史信息地址\n# http://quotes.money.163.com/service/chddata.html?code=1000001&start=19910403&end=20210823&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP\nwangyi_stock_url: str = \"http://quotes.money.163.com/service/chddata.html?\" \\\n \"code={}&start={}&end={}\" \\\n \"&fields=TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP\"\n\n\n# 获取所有的股票\ndef get_stock_all():\n data: DataFrame = pro.stock_basic(exchange='', list_status='L',\n fields='ts_code, symbol, name, area, industry, fullname,'\n ' enname, cnspell, market, exchange, curr_type,'\n ' list_status, list_date, delist_date, is_hs')\n data.to_csv(stock_list_path, encoding=csv_encoding, index=False)\n\n\n# 将tushare的股票代码转换成网易的股票代码\ndef convert_tushare_code_to_wangyi_code(tushare_code: str) -> str:\n exchange = tushare_code[7:9]\n if exchange == 'SH':\n return '0' + tushare_code[0:6]\n elif exchange == 'SZ':\n return '1' + tushare_code[0:6]\n\n\n# 获取所有的股票历史数据 tushare和网易交易所对应 SZ:1 SH:0\ndef get_all_stocks_history():\n stocks: DataFrame = pd.read_csv(stock_list_path, encoding='GBK')\n for code in stocks['ts_code']:\n get_stock_history(convert_tushare_code_to_wangyi_code(code), '19890101', '20210824')\n\n\n# 获取单个股票历史数据 stock_code:网易的数据 1000001\ndef get_stock_history(stock_code: str, start_date: str, end_date: str):\n url = wangyi_stock_url.format(stock_code, start_date, end_date)\n data = requests.get(url)\n with open(stock_file_path.format(stock_code), \"wb\") as stream:\n stream.write(data.content)\n\n\n# 提取股票的历史复权因子, stock_code:tushare的股票code 000001.SZ\ndef get_stock_factor(stock_code: str) -> DataFrame:\n all_data: DataFrame\n wangyi_stock_code = convert_tushare_code_to_wangyi_code(stock_code)\n # 检查股票是否超过5000条,tushare只支持一次返回5000条\n stock_datas = pd.read_csv(stock_file_path.format(wangyi_stock_code), encoding=\"GBK\")\n if len(stock_datas) <= 3000:\n df = pro.adj_factor(ts_code=stock_code, trade_date='')\n all_data = df\n else:\n for n in range(0, math.ceil(len(stock_datas) / 3000)):\n end_date: str = stock_datas.iloc[n * 3000][\"日期\"]\n start_date: str = (\n stock_datas.iloc[len(stock_datas) - 1][\"日期\"] if (((n + 1) * 3000 - 1) >= len(stock_datas)) else\n stock_datas.iloc[(n + 1) * 3000 - 1][\"日期\"])\n df = pro.adj_factor(ts_code=stock_code, start_date=start_date.replace(\"-\", \"\"),\n end_date=end_date.replace(\"-\", \"\"))\n if n == 0:\n all_data = df\n else:\n all_data = pd.concat([all_data, df], axis=0)\n\n all_data.to_csv(stock_factor_file_path.format(wangyi_stock_code), encoding=\"GBK\")\n\n\n# 提取所有股票的历史复权因子\ndef get_all_stock_factor():\n i = 1\n stocks: DataFrame = pd.read_csv(stock_list_path, encoding='GBK')\n for code in stocks['ts_code']:\n get_stock_factor(code)\n i = i + 1\n print(i)\n if 0 == (i % 100):\n print(\"暂停\")\n # 每166次暂停一分钟,因为接口一分钟只能500次\n time.sleep(60)\n\n\n# 计算前复权价格\ndef calculate_fqq(wangyi_stock_code: str):\n stock_datas = pd.read_csv(stock_file_path.format(wangyi_stock_code), encoding=\"GBK\", index_col='日期')\n stock_factors = pd.read_csv(stock_factor_file_path.format(wangyi_stock_code), encoding=\"GBK\",\n index_col='trade_date')\n qfq = []\n hfq = []\n\n new_factor = stock_factors.iloc[0]['adj_factor']\n for row in stock_datas.itertuples():\n factor_index = int(str.replace(row.Index, '-', ''))\n factor = stock_factors.loc[factor_index]['adj_factor']\n factor = 1 if factor is None else factor\n # print(float(row.收盘价) * factor)\n hfq.append(float(row.收盘价) * factor)\n qfq.append(float(row.收盘价) * factor / new_factor)\n stock_datas['后复权'] = hfq\n stock_datas['前复权'] = qfq\n print(stock_datas)\n\n\n# 计算指标\ndef calculate_zhibiao(wangyi_stock_code: str, fq_flag: int):\n # 股票数据\n stock_datas: pd.DataFrame = pd.read_csv(stock_file_path.format(wangyi_stock_code)\n , encoding=\"GBK\")\n if fq_flag != 0:\n # 复权处理\n # 股票复权因子数据\n stock_factors: pd.DataFrame = pd.read_csv(stock_factor_file_path.format(wangyi_stock_code)\n , encoding=\"GBK\", index_col='trade_date')\n if fq_flag == 1:\n # 前复权\n # 最新的复权因子\n new_factor = stock_factors.iloc[0]['adj_factor']\n stock_datas[\"收盘价\"] = list(\n map(lambda x, y: y * (stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor']) / new_factor,\n stock_datas[\"日期\"], stock_datas[\"收盘价\"]))\n stock_datas[\"最高价\"] = list(\n map(lambda x, y: y * (stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor']) / new_factor,\n stock_datas[\"日期\"], stock_datas[\"最高价\"]))\n stock_datas[\"最低价\"] = list(\n map(lambda x, y: y * (stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor']) / new_factor,\n stock_datas[\"日期\"], stock_datas[\"最低价\"]))\n stock_datas[\"开盘价\"] = list(\n map(lambda x, y: y * (stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor']) / new_factor,\n stock_datas[\"日期\"], stock_datas[\"开盘价\"]))\n stock_datas[\"前收盘\"] = list(\n map(lambda x, y: y * (stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor']) / new_factor,\n stock_datas[\"日期\"], stock_datas[\"前收盘\"]))\n elif fq_flag == 2:\n # 后复权\n stock_datas[\"收盘价\"] = list(\n map(lambda x, y: y * stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor'],\n stock_datas[\"日期\"], stock_datas[\"收盘价\"]))\n stock_datas[\"最高价\"] = list(\n map(lambda x, y: y * stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor'],\n stock_datas[\"日期\"], stock_datas[\"最高价\"]))\n stock_datas[\"最低价\"] = list(\n map(lambda x, y: y * stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor'],\n stock_datas[\"日期\"], stock_datas[\"最低价\"]))\n stock_datas[\"开盘价\"] = list(\n map(lambda x, y: y * stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor'],\n stock_datas[\"日期\"], stock_datas[\"开盘价\"]))\n stock_datas[\"前收盘\"] = list(\n map(lambda x, y: y * stock_factors.loc[int(str.replace(x, '-', ''))]['adj_factor'],\n stock_datas[\"日期\"], stock_datas[\"前收盘\"]))\n\n # 过滤停牌的交易日,条件\"收盘价\",\"最高价\",\"最低价\",\"开盘价\",\"成交量\"均为0\n stock_datas = stock_datas[(stock_datas[\"收盘价\"] != 0) & (stock_datas[\"最高价\"] != 0)\n & (stock_datas[\"最低价\"] != 0) & (stock_datas[\"开盘价\"] != 0)\n & (stock_datas[\"成交量\"] != 0)]\n # 修改列\n stock_datas.rename(\n columns={'日期': 'date', '收盘价': 'close', '最高价': 'high', '最低价': 'low', '开盘价': 'open', '成交量': 'volume',\n '换手率': 'tor', '涨跌额': 'chgamt', '涨跌幅': 'chg'}, inplace=True)\n # 将数据倒序输出\n stock_datas = stock_datas.reindex(index=stock_datas.index[::-1])\n stock = StockDataFrame.retype(stock_datas)\n # sma\n stock['close_5_sma'], stock['close_10_sma'], stock['close_30_sma'], stock['close_60_sma']\n # ema\n # boll:boll-中线 boll_ub:boll-up上线 boll_lb: boll下线\n stock['boll'], stock['boll_ub'], stock['boll_lb']\n # macd:DIF macds:DEA macdh*2:MACD\n stock['macd'], stock['macds'], stock['macdh']\n # kdj\n stock['kdjk'], stock['kdjd'], stock['kdjj']\n # rsi\n stock['rsi_6'], stock['rsi_12'], stock['rsi_24']\n\n stock.reset_index(level=None, drop=False, inplace=True, col_level=0, col_fill='')\n\n return stock\n","repo_name":"zhangyunqi/pystock","sub_path":"pachong/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"42230359950","text":"from collections import deque\r\n\r\nn, m = map(int, input().split())\r\n\r\ngraph = [] # 2차원 리스트의 미로 정보\r\nfor _ in range(n):\r\n graph.append(list(map(int, input())))\r\n\r\ndef bfs(x, y):\r\n queue = deque()\r\n queue.append((x,y))\r\n\r\n # 이동 방향(상하좌우)\r\n dx = [-1, 1, 0, 0]\r\n dy = [0, 0, -1, 1]\r\n\r\n while queue:\r\n x, y = queue.popleft() # 현재 좌표를 가져옴\r\n for i in range(4): # 상하좌우로 이동한 위치 확인\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if nx < 0 or nx >= n or ny < 0 or ny >= m: # 공간을 벗어난 경우 무시\r\n continue\r\n if graph[nx][ny] == 0: # 이동할 수 없는 칸이면 무시 \r\n continue\r\n if graph[nx][ny] == 1: # 처음 방문한 노드이면 해당 좌표 값 +1(= 방문했다는 표시)\r\n graph[nx][ny] = graph[x][y] + 1\r\n queue.append((nx, ny)) # queue에 이동한 좌표 추가\r\n \r\n return graph[n-1][m-1] # (n, m)까지의 최단 거리 반환\r\n\r\nprint(bfs(0, 0))","repo_name":"dduniverse/Algorithm","sub_path":"백준/Silver/2178. 미로 탐색/미로 탐색.py","file_name":"미로 탐색.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23450445181","text":"#!/usr/bin/env python3\n#coding: utf-8\nimport sys\nimport unittest\nfrom io import StringIO\n\ndef solve_case(line):\n shymax, shyness_str = line.strip().split()\n shymax = int(shymax)\n if shymax == 0:\n return 0\n people = []\n stand_so_far = 0\n required = 0\n for index, digit in enumerate(shyness_str):\n digit = int(digit)\n if index == 0:\n stand_so_far += digit\n continue\n if digit > 0:\n people.append((index, digit))\n for shyness, number in people:\n if stand_so_far < shyness:\n required += shyness-stand_so_far\n stand_so_far += shyness\n stand_so_far += number\n\n return required\n\n\ndef solve(inp, outp):\n inp.readline()\n for index, line in enumerate(inp):\n result = solve_case(line)\n outp.write('Case #%d: %d\\n' % (index+1, result))\n outp.flush()\n\n\ntest_data = [\n'4 11111',\n'1 09',\n'5 110011',\n'0 1',\n'5 000009'\n]\n\ntest_data = '%d\\n' % len(test_data) + ''.join(x+'\\n' for x in test_data)\n\ntest_result = \\\n'''Case #1: 0\nCase #2: 1\nCase #3: 2\nCase #4: 0\nCase #5: 5\n'''\n\nclass MagicTest(unittest.TestCase):\n def test_magic_usual(self):\n outp = StringIO()\n #solve(StringIO(test_data), outp)\n solve(open('A-small0.in'), outp)\n self.assertEqual(outp.getvalue(), test_result)\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1 and sys.argv[1] == '--test':\n sys.argv.pop()\n unittest.main()\n else:\n solve(sys.stdin, sys.stdout)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1901.py","file_name":"1901.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23406920981","text":"#! c:\\python33\\python.exe\r\n\r\nimport sys\r\nimport re\r\n\r\nCV_MAP = {}\r\n\r\ndef genMap():\r\n\tglobal CV_MAP\r\n\tvowels = re.compile( r\"[aeiou]\" )\r\n\tcord = ord('c')\r\n\tvord = ord('v')\r\n\t\r\n\tfor c in range( ord('a'), ord('z') + 1 ):\r\n\t\tchar_ord = chr(c)\r\n\t\tif vowels.match( char_ord ):\r\n\t\t\tCV_MAP[c] = vord\r\n\t\telse:\r\n\t\t\tCV_MAP[c] = cord\r\n\t\r\ndef solve( input ):\r\n\tglobal CV_MAP\r\n\t\r\n\tname, n = input.readline().rstrip().split()\r\n\t\r\n\tname = name.translate( CV_MAP )\r\n\tn = int(n)\r\n\tnstring = \"c\" * n\r\n\tnstring_ct = 0\r\n\t\r\n\t#print( \"name {} nstring {}\".format( name, nstring ) )\r\n\t\r\n\tcount = 0\r\n\t\r\n\tfor i in range( len( name ) ):\r\n\t\tfor j in range( i + 1, len(name) + 1 ):\r\n\t\t\tif nstring in name[i:j]:\r\n\t\t\t\tcount += 1\r\n\t\t\t\t#print( \"match: {}\".format( name[i:j] ) )\r\n\t\t\r\n\treturn count\r\n\r\nif __name__ == '__main__':\r\n\tinput = open( sys.argv[1], \"r\" )\r\n\toutput = open( sys.argv[2], \"w\" )\r\n\t\r\n\ttc_count = int( input.readline().rstrip() )\r\n\tgenMap()\r\n\t\r\n\tfor tc in range( 1, tc_count + 1):\r\n\t\tprint( \"Solving case: {}\".format( tc ) )\r\n\t\t\r\n\t\toutput.write( \"Case #{}: {}\\n\".format( tc, solve( input ) ) )\r\n\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_126/546.py","file_name":"546.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13672472662","text":"# Author: cym\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef draw_scatter(name, cost, value):\n \"\"\"\n :param n: 点的数量,整数\n :param s:点的大小,整数\n :return: None\n \"\"\"\n # 加载数据\n name = \"\".join(list(name)[:-4])\n x = []\n y = []\n for k, v in cost.items():\n x.append(v)\n y.append(value[k])\n # 通过切片获取横坐标x1\n # x1 = data[:, 0]\n # 通过切片获取纵坐标R\n # y1 = data[:, 3]\n # 横坐标x2\n # x2 = np.random.uniform(0, 5, n)\n # 纵坐标y2\n # y2 = np.array([3] * n)\n # 创建画图窗口\n fig = plt.figure()\n # 将画图窗口分成1行1列,选择第一块区域作子图\n ax1 = fig.add_subplot(1, 1, 1)\n # 设置标题\n # ax1.set_title('Result Analysis')\n # 设置横坐标名称\n font1 = {'family': 'Times New Roman',\n 'weight': 'normal',\n 'size': 18,\n }\n ax1.set_xlabel('node cost', font1)\n # 设置纵坐标名称\n ax1.set_ylabel('node value', font1)\n # 画散点图\n color = '#00CED1' # 点的颜色\n ax1.scatter(x, y, c=color, marker='o', alpha=0.4)\n # 画直线图\n # ax1.plot(x2, y2, c='b', ls='--')\n # 调整横坐标的上下界\n plt.xlim(xmax=1, xmin=0)\n # 显示\n\n plt.savefig('./out/' + 'CV_' + name + '.png', dpi=300)\n plt.show()\n\n","repo_name":"supercym/BoundedCost","sub_path":"drawscatter.py","file_name":"drawscatter.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23567823331","text":"def find2(n):\n sm = 0\n pow2 = 1\n while n > sm:\n sm = pow2 + sm\n pow2 *= 2\n return (sm, pow2//2)\n \ndef sol(n, k):\n sm, pow2 = find2(k)\n val = (n-(sm-pow2))\n q = int(val/((sm-pow2)+1))\n a = val % pow2\n b = pow2 - a\n left = k - (pow2 - 1)\n if left <= a:\n #print(q+1)\n return(unpack(q+1))\n else:\n #print(q)\n return(unpack(q))\n \ndef unpack(i):\n if i % 2 == 0:\n return (i//2, i//2-1)\n else:\n return ((i-1)//2, (i-1)//2)\n\n\nt = int(input())\nfor i in range(1, t + 1):\n n, k = [int(s) for s in input().split(\" \")]\n a, b = sol(n, k)\n print(\"Case #{}: {} {}\".format(i, a, b))\n \n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1599.py","file_name":"1599.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18918801185","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n'极宇舟天笔试题(2019.3.28)'\n\n\"\"\"\n给出一个区间的集合,请合并所有重叠的区间。(leetcode056)\n\"\"\"\n# Definition for an interval.\nclass Interval:\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\nclass Solution:\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: List[Interval]\n \"\"\"\n if len(intervals) < 2:\n return intervals\n intervals.sort(key=lambda x: x.start)\n res = []\n for interval in intervals:\n if not res or res[-1].end < interval.start:\n res.append(interval)\n else:\n res[-1].end = max(res[-1].end, interval.end)\n temp = []\n for i in res:\n temp.append([i.start, i.end])\n return temp\n# 测试:\n# [[2, 3], [1, 2], [4, 7], [4, 5], [7, 9]]\n# a = Interval(2, 3)\n# b = Interval(1, 2)\n# c = Interval(4, 7)\n# d = Interval(4, 5)\n# e = Interval(7, 9)\n\na = Interval(1, 3)\nb = Interval(2, 6)\nc = Interval(8, 10)\nd = Interval(15, 18)\n\nS = Solution()\nprint(S.merge([a,b,c,d,]))\n","repo_name":"lxh1997zj/-offer_and_LeetCode","sub_path":"面试笔试/极宇舟天(2019.3.28)/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11272298577","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 27 18:19:54 2021\r\n\r\n@author: Kodeskolen\r\n\"\"\"\r\n\r\nfrom pylab import arange, plot, show\r\n\r\n\r\nx = arange(0, 11)\r\ny = x**2 - 1\r\n\r\nprint(x)\r\nprint(y)\r\n\r\nplot(x, y)\r\nshow()","repo_name":"kodeskolen/tekna_h21_intro","sub_path":"dag2/Kode/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30692855927","text":"from __future__ import annotations\n\nimport logging\nfrom typing import Optional, Final\n\nfrom redis.client import Redis\n\nfrom base import Worker\nfrom constants import IN, COUNT, FNAME\n\n\nclass MyRedis:\n def __init__(self):\n self.rds: Final = Redis(host='localhost', port=6379, password='pass',\n db=0, decode_responses=False)\n self.rds.flushall()\n self.rds.xgroup_create(IN, Worker.GROUP, id=\"0\", mkstream=True)\n self.file_map = {}\n\n def get_timestamp(self) -> float:\n timestamp = self.rds.time()\n return float(f'{timestamp[0]}.{timestamp[1]}')\n\n def add_file(self, fname: str) -> None:\n id = self.rds.fcall('add_file', 3, fname, IN, FNAME)\n self.file_map[id.decode()] = fname\n # id = self.rds.xadd(IN, {FNAME: fname})\n # self.rds.hset('latency', id, self.get_timestamp())\n\n def top(self, n: int) -> list[tuple[bytes, float]]:\n return self.rds.zrevrangebyscore(COUNT, '+inf', '-inf', 0, n,withscores=True)\n\n def get_latency(self) -> list[float]:\n lat = []\n lat_data = self.rds.hgetall(\"latency\")\n for k in sorted(lat_data.keys()):\n v = lat_data[k]\n lat.append(float(v.decode()))\n return lat\n \n def write_to_file(self):\n lat = []\n f = open(\"c0.txt\", \"a\")\n lat_data = self.rds.hgetall(\"latency\")\n for k in sorted(lat_data.keys()):\n v = lat_data[k]\n f.write(self.file_map[k.decode()][24:-4] + ', ' + lat_data[k].decode() + '\\n')\n f.close()\n\n def read(self, worker: Worker) -> Optional[tuple[bytes, dict[bytes, bytes]]]:\n claimed_messages = self.rds.xautoclaim(name = IN, \n groupname=Worker.GROUP, \n consumername=worker.name, \n min_idle_time=500,\n count=1)\n if claimed_messages[1]:\n return claimed_messages[1][0]\n else:\n path_ds = self.rds.xreadgroup(groupname=Worker.GROUP, \n consumername=worker.name, \n streams = {IN:'>'}, \n count = 1)\n if len(path_ds)==0:\n return None, None\n else:\n return path_ds[0][1][0]\n\n def write(self, id: bytes, wc: dict[str, int]) -> None:\n keys = list(wc.keys())\n vals = list(wc.values())\n num = len(keys)\n\n # Run the lua script atomically\n in_time = float(self.rds.hget('latency', id).decode())\n self.rds.fcall('add_wc', 5+2*num, id, IN, Worker.GROUP ,COUNT, num, *keys, *vals)\n out_time = float(self.rds.hget('latency', id).decode())\n # out_time = self.get_timestamp()\n # in_time = float(self.rds.hget('latency', id).decode())\n # latency = out_time-in_time\n # print(latency, out_time, in_time)\n # self.rds.hset('latency', id, latency)\n\n def is_pending(self):\n pending_info = self.rds.xpending(IN, Worker.GROUP)\n return pending_info['pending']!=0\n","repo_name":"tan90cot0/COL733-Cloud-Computing","sub_path":"Lab3-Stream-Processing/mrds.py","file_name":"mrds.py","file_ext":"py","file_size_in_byte":2933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6500015436","text":"# rollback.py\nimport pymysql as py\n\ndb = py.connect(\"localhost\",\"root\",\"a123456\",\n\t\t\t\t\"db3\",port=3306,charset=\"utf8\")\n\ncur = db.cursor()\ntry:\n\tcur.execute(\"update CCB set money=95000 where name='轉錢';\")\n\tcur.execute(\"update ICBC set money=7000 where name='借錢';\")\n\tdb.commit()\n\tprint(\"ok\")\nexcept Exception as e:\n\tprint(e)\n\tdb.rollback()\nfinally:\n\tcur.close()\n\tdb.close()","repo_name":"Sapphire0912/Programming","sub_path":"MySQL/MySQL程式碼/rollback.py","file_name":"rollback.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73702060354","text":"import random\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nfrom bs4 import BeautifulSoup\nimport requests\nimport csv\n\nHEADERS = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,'\n 'application/signed-exchange;v=b3;q=0.9',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/94.0.4606.81 Safari/537.36 '\n}\n\n\ndef get_source_html(url):\n driver = webdriver.Chrome(executable_path=\"/usr/local/bin/chromedriver\")\n driver.maximize_window()\n\n try:\n driver.get(url=url)\n driver.implicitly_wait(10)\n time.sleep(3)\n\n while True:\n find_last_element = driver.find_element_by_class_name('shop-clear')\n if driver.find_elements_by_tag_name(\"li\"):\n with open('html-pages/hair-care.html', 'w') as file:\n file.write(driver.page_source)\n break\n else:\n actions = ActionChains(driver)\n actions.move_to_element(find_last_element).perform()\n time.sleep(3)\n\n except Exception as e:\n print(e)\n finally:\n driver.close()\n driver.quit()\n\n\ndef get_items_url(file_path):\n with open(file_path) as file:\n src = file.read()\n\n soup = BeautifulSoup(src, 'lxml')\n item_divs = soup.find_all('div', class_='product-image')\n urls = []\n for item in item_divs:\n item_url = item.find('a').get('href')\n urls.append(item_url)\n\n with open('hair-care-urls.txt', 'w') as file:\n for url in urls:\n file.write(f'{str(url)}\\n')\n print(\"SUCCESSFULLY COLLECTED\")\n\n\ndef get_data(file_path):\n with open(file_path) as file:\n url_list = [url.strip() for url in file.readlines()]\n\n for url in url_list:\n response = requests.get(url=url, headers=HEADERS)\n soup = BeautifulSoup(response.text, 'lxml')\n\n try:\n title = soup.find('h1').find('a').text.strip()\n except Exception as e:\n title = \"Test\"\n try:\n price = soup.find('span', class_='css-12nhxov-paragraph-sansSerif-currentPriceLabel').text\n except Exception as e:\n price = \"Test price\"\n try:\n img = soup.find_all('img')[31]['alt'].replace(' ', '-').replace('/', '') + '.jpg'\n except Exception as e:\n img = \"Test Image\"\n try:\n description = soup.find_all(\"div\",\n class_=\"css-g0vocb-paragraph-sansSerif-accordionInnerContent \"\n \"accordion-inner-content\")[1].text\n except Exception as e:\n description = \"Test Description\"\n random_id = random.randrange(145, 182)\n\n data = {\n 'ID': random_id,\n 'Type': 'simple',\n 'Name': title,\n 'Published': 1,\n 'Is featured?': 1,\n 'Visibility in catalog': 'visible',\n 'Short description': description[15:],\n 'Tax status': 'taxable',\n 'In stock?': 1,\n 'Backorders allowed?': 0,\n 'Sold individually?': 0,\n 'Allow customer reviews?': 1,\n 'Regular price': price,\n 'Categories': 'body',\n 'Tags': 'body',\n 'Position': 0,\n 'Images': 'http://localhost:8888/wp-content/uploads/2021/10/' + img.replace('+', '').replace('&', '').replace('---', '-').replace('--', '-').replace('(', '').replace(')', '').replace('é', 'e').replace(\"'\", ''),\n }\n write_csv(data)\n\n\ndef write_csv(data):\n with open('all-new-products.csv', 'a') as csv_file:\n writer = csv.writer(csv_file, delimiter=';')\n writer.writerow((data['ID'], data['Type'], data['Name'], data['Published'], data['Is featured?'],\n data['Visibility in catalog'], data['Short description'], data['Tax status'],\n data['In stock?'], data['Backorders allowed?'], data['Sold individually?'],\n data['Allow customer reviews?'], data['Regular price'], data['Categories'],\n data['Tags'], data['Images'], data['Position'],\n ))\n\n\ndef main():\n # get_source_html(url=\"https://www.mecca.com.au/hair-care/\")\n # get_items_url(file_path='/Users/azamatjumma/Desktop/Selenium/html-pages/body-personal-care.html')\n get_data(file_path='/Users/azamatjumma/Desktop/Selenium/body-personal-care-urls.txt')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jumabekovs/Selenium-mecca.au.com","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73506697474","text":"def soma_pares ():\n global x\n soma=0\n if x%2==0:\n primeiro=x\n ultimo = x+10\n if x%2==1:\n primeiro=x+1\n ultimo= x+11\n for i in range (primeiro,ultimo,2 ):\n soma+=i\n print(soma)\n\nx = int(input())\nwhile x!=0:\n soma_pares()\n x= int(input())","repo_name":"Jumaruba/URI","sub_path":"PYTHON/1159(accepted).py","file_name":"1159(accepted).py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"14958852183","text":"\"\"\"You are climbing a staircase. It takes n steps to reach the top.\n\nEach time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\n\n\n\nExample 1:\n\nInput: n = 2\nOutput: 2\nExplanation: There are two ways to climb to the top.\n1. 1 step + 1 step\n2. 2 steps\nExample 2:\n\nInput: n = 3\nOutput: 3\nExplanation: There are three ways to climb to the top.\n1. 1 step + 1 step + 1 step\n2. 1 step + 2 steps\n3. 2 steps + 1 step\n\n\nConstraints:\n\n1 <= n <= 45\"\"\"\n\n\ndef climbstairs(n):\n import math\n\n if 0 < n <= 2:\n return n\n elif 3 <= n <= 45:\n check = n // 2\n result = 1\n for poss in range(1, check + 1):\n result += math.comb(n - poss, poss)\n return result\n else:\n print('Its more than 45 steps!, so given number of steps is exceeds limit')\n\n return\n\n\nprint(climbstairs(5))\n","repo_name":"ImSakunthala/leetcode","sub_path":"Beginner_level/dynamic_programming/climbing_Stairs.py","file_name":"climbing_Stairs.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4988832614","text":"from distutils.command.build_scripts import first_line_re\nimport random\nimport sys\nfrom typing import List\n\nsys.setrecursionlimit(20000) # Python recursion is limited to 993 calls.\n\nclass Node:\n\n def __init__(self, value:int, next_node = None):\n self.value = value\n self.next_node = next_node\n\ndef build_nodes():\n \"\"\"Builds a linked list of nodes.\n Returns the 1 node in the linked list.\n \"\"\"\n values_of_nodes = [random.randint(500,2000) for _ in range(19000)]\n prev_node = None\n first_node = None\n for value in values_of_nodes:\n node = Node(value)\n if prev_node is not None:\n prev_node.next_node = node\n else:\n first_node = node\n prev_node = node\n return first_node\n\ndef print_linked_list(node: Node):\n \"\"\"\n Prints the linked list nodes.\n \"\"\"\n if node.next_node is not None:\n print(f\"Node: {node.value} -- Next node: {node.next_node.value}\")\n print_linked_list(node.next_node)\n\n\ndef invert_linked_list(node: Node, prev_node: Node = None):\n if node == None:\n return prev_node\n temp_node = node.next_node\n if prev_node is None:\n node.next_node = None\n else:\n node.next_node = prev_node\n prev_node = node\n return invert_linked_list(node=temp_node, prev_node=prev_node)\n\ndef invert_linked_list_exercise_solution(node: Node):\n \"\"\"Invert a linked list youtube solution.\n https://www.youtube.com/watch?v=XDO6I8jxHtA\n\n Args:\n node (Node): The starting node.\n \"\"\"\n prev_node = None\n while node:\n temp = node\n node = node.next_node\n temp.next_node = prev_node\n prev_node = temp\n return prev_node\n\nnode = build_nodes()\nprint(f\"Linked list of nodes:\")\nprint_linked_list(node)\n\nprint(\"Inverted linked list:\")\nprint_linked_list(invert_linked_list(node))\n\nnode = build_nodes()\nprint(\"\\n2nd linked list of nodes\")\nprint_linked_list(node)\nprint(\"Inverted linked list 2nd version\")\nprint_linked_list(invert_linked_list_exercise_solution(node))\n\n","repo_name":"OblackatO/ITSecurity_CS_Topics","sub_path":"algorithms/invert_linked_list.py","file_name":"invert_linked_list.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8645892927","text":"from django import forms\nfrom django.forms import TextInput, Textarea\n\nfrom category.models import Category\n\n\nclass CategoryForm(forms.ModelForm):\n class Meta:\n model = Category\n fields = ['name', 'description', 'active']\n widgets = {\n 'name': TextInput(attrs={'placeholder': 'Please enter category name', 'class': 'form-control'}),\n 'description': Textarea(attrs={'placeholder': 'Please enter category description', 'class': 'form-control'})\n }\n\n def __init__(self, *args, **kwargs):\n super(CategoryForm, self).__init__(*args, **kwargs)\n self.fields['name'].label = 'Your name'\n # self.fields['name'].widget = HiddenInput()\n\n def clean(self):\n cleaned_data = self.cleaned_data\n name_input = self.cleaned_data.get('name')\n all_categories = Category.objects.all()\n for category in all_categories:\n if name_input == category.name:\n msg = 'This name already exist in db!'\n self._errors['name'] = self.error_class([msg])\n\n return cleaned_data\n\n\nclass CategoryFormUpdate(forms.ModelForm):\n class Meta:\n model = Category\n # fields = '__all__'\n fields = ['name', 'description', 'active']\n widgets = {\n 'name': TextInput(attrs={'placeholder': 'Please enter category name', 'class': 'form-control'}),\n 'description': Textarea(\n attrs={'placeholder': 'Please enter category description', 'class': 'form-control'})\n }\n\n def __init__(self, *args, **kwargs):\n super(CategoryFormUpdate, self).__init__(*args, **kwargs)\n self.fields['name'].label = 'Your name'\n","repo_name":"alexnd22/eShopProject","sub_path":"category/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8050141323","text":"from rest_framework.renderers import JSONRenderer\nfrom common.response_code import SUCCESS\n\n\nclass StandardJSONRender(JSONRenderer):\n\n def render(self, data, accepted_media_type=None, renderer_context=None):\n data = {} if data is None else data\n if isinstance(data, dict) and data.get('success') is not None:\n standard_data = data\n else:\n standard_data = {'success': SUCCESS[0], 'code': SUCCESS[1], 'message': SUCCESS[2], 'data': data}\n\n return super().render(standard_data, accepted_media_type=accepted_media_type, renderer_context=renderer_context)\n","repo_name":"zem12345678/study_online_backend","sub_path":"common/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27046775993","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nclass Perceptron:\r\n def __init__(self, lr_w=0.00001, lr_b=0.01, epochs=3):\r\n self.w = np.random.rand(1, 1)\r\n self.b = np.random.rand(1, 1)\r\n self.lr_w = lr_w\r\n self.lr_b = lr_b\r\n self.epochs = epochs\r\n self.Errors = []\r\n self.Errors_test = []\r\n self.W = []\r\n self.B = []\r\n \r\n def fit(self, X_train, X_test, Y_train, Y_test):\r\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 6))\r\n \r\n for epoch in range(self.epochs):\r\n for i in range(X_train.shape[0]): \r\n x = X_train[i]\r\n y = Y_train[i]\r\n \r\n y_pred = x * self.w + self.b\r\n e = y - y_pred\r\n \r\n self.w += self.lr_w * e * x\r\n self.b += self.lr_b * e * 1\r\n self.W.append(self.w)\r\n self.B.append(self.b)\r\n \r\n Y_pred = X_train * self.w + self.b\r\n Error = np.mean(Y_train - Y_pred) ** 2\r\n self.Errors.append(Error)\r\n ax2.clear()\r\n ax2.plot(self.Errors)\r\n\r\n Y_pred_test = X_test * self.w + self.b\r\n Error_test = np.mean(Y_test - Y_pred_test) ** 2\r\n self.Errors_test.append(Error_test)\r\n ax3.clear()\r\n ax3.plot(self.Errors_test)\r\n \r\n ax1.clear()\r\n ax1.scatter(X_train,Y_train, c='blue')\r\n ax1.plot(X_train,Y_pred, c='red')\r\n \r\n plt.show() \r\n np.save('weight_and_biases.npy', self.W, self.B)\r\n\r\n def predict(self, x):\r\n y_pred_answer = np.matmul(x, self.w) + self.b\r\n return y_pred_answer\r\n\r\n def evaluate(self, X, Y, loss='MSE'):\r\n Y_pred = []\r\n for i in range(X.shape[0]):\r\n Y_pred.append(self.predict(X[i]))\r\n Y_pred = np.array(Y_pred)\r\n \r\n E = Y - Y_pred\r\n \r\n if loss == \"MSE\":\r\n return np.mean(E ** 2)\r\n elif loss == \"MAE\":\r\n return np.mean(np.abs(E))","repo_name":"AliYqb/AliYqb-PyLearn-SajjadAemmi","sub_path":"Assignment39/Weather/Perceptron.py","file_name":"Perceptron.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"35831589764","text":"#!/bin/python3\n# -*- coding: UTF-8 -*-\n\n\"\"\"\n请实现 copyRandomList 函数,复制一个复杂链表。\n在复杂链表中,每个节点除了有一个 next 指针指向下一个节点,还有一个 random 指针指向链表中的任意节点或者 null。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/fu-za-lian-biao-de-fu-zhi-lcof\n\"\"\"\n\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\n\"\"\"\n1. 创建当前链表与复制链表交替出现的链表;\n2. 遍历新链表,为复制后的链表的 random 赋值;\n3. 遍历链表,将当前链表与复制后的链表分开。\n\"\"\"\n\n\nclass Solution:\n def copy_random_list(self, head: 'Node') -> 'Node':\n if not head:\n return head\n cur_node = head\n cur_node = self.create_new_list(cur_node)\n cur_node = self.correct_random_value(cur_node)\n cur_node, new_node = self.split_list(cur_node)\n return new_node\n\n def create_new_list(self, head: 'Node') -> 'Node':\n node_a = head\n\n while node_a:\n node_b = Node(node_a.val, node_a.next, node_a.random)\n node_a.next = node_b\n node_a = node_b.next\n return head\n\n def correct_random_value(self, head: 'Node') -> 'Node':\n if not head or not head.next:\n return head\n node_b = head.next\n\n while node_b:\n if node_b.random is not None:\n node_b.random = node_b.random.next\n if node_b.next and node_b.next.next:\n node_b = node_b.next.next\n else:\n break\n return head\n\n def split_list(self, head: 'Node') -> ('Node', 'Node'):\n if not head:\n return None, None\n if not head.next:\n return head, None\n head_a, head_b = head, head.next\n node_a, node_b = head_a, head_b\n while node_b:\n node_a.next = node_b.next\n node_a = node_a.next\n if node_a and node_a.next:\n node_b.next = node_a.next\n node_b = node_b.next\n return head_a, head_b\n\n\ndef construct_list(node_list: ['Node']) -> 'Node':\n if not node_list:\n return None\n tmp_list = list()\n head = Node(node_list[0][0])\n node_a = head\n tmp_list.append(node_a)\n for i in range(1, len(node_list)):\n node_b = Node(node_list[i][0])\n node_a.next = node_b\n node_a = node_b\n tmp_list.append(node_a)\n\n node_a = head\n for item in node_list:\n if item[1] is not None:\n node_a.random = tmp_list[item[1]]\n node_a = node_a.next\n return head\n\n\nif __name__ == \"__main__\":\n head_list = [[7, None], [13, 0], [11, 4], [10, 2], [1, 0]]\n head = construct_list(head_list)\n head_b = Solution().copy_random_list(head)\n node_a = head_b\n while node_a:\n if node_a.random is None:\n print('[' + str(node_a.val) + ', None]')\n else:\n print('[' + str(node_a.val) + ', ' + str(node_a.random.val) + ']')\n node_a = node_a.next\n\n\n\n\n\n","repo_name":"littleshuang/PythonCodes","sub_path":"leetcode/CopyRandomList.py","file_name":"CopyRandomList.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8220700834","text":"import json\nfrom rest_framework.response import Response\nfrom django.shortcuts import render, redirect\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom rest_framework.decorators import api_view\nfrom .db import db\nfrom datetime import datetime\n\n\n\n@api_view(['POST'])\ndef add_budget(request):\n try:\n data = json.loads(request.body)\n print(data)\n year = data['year']\n month = data['month']\n\n # Check if budget already exists for the given year and month\n existing_budget = db.budget.find_one({'year': year, 'month': month})\n if existing_budget:\n return JsonResponse({'status': 400, 'message': 'Budget already exists for the given year and month'})\n\n new_budget = {\n 'budget': data['budget'],\n 'month': month,\n 'year': year\n }\n print(new_budget)\n db.budget.insert_one(new_budget)\n return JsonResponse({'status': 200, 'message': 'Budget added successfully'})\n except:\n return JsonResponse({'status': 505, 'message': 'Something went wrong'})\n \n\n@api_view(['POST'])\ndef add_credit(request):\n try:\n data = json.loads(request.body)\n print(data)\n date = str(data['creditDate'])\n year = date[:4]\n month = date[5:7] \n new_credit= {\n 'creditDetail': data['creditDetail'],\n 'creditAmount': data['creditAmount'],\n 'creditDate': data['creditDate'],\n 'month':month,\n 'year':year\n }\n db.credit.insert_one(new_credit)\n return JsonResponse({'status': 200, 'message': 'credit added successfully'})\n except:\n return JsonResponse({'status': 505, 'message': 'Something went wrong'})\n\n\n@api_view(['POST'])\ndef add_debit(request):\n try:\n data = json.loads(request.body)\n date = str(data['debitDate'])\n year = date[:4]\n month = date[5:7]\n new_debit= {\n 'debitDetail': data['debitDetail'],\n 'debitAmount': data['debitAmount'],\n 'debitDate': data['debitDate'],\n 'month':month,\n 'year':year\n }\n db.debit.insert_one(new_debit)\n return JsonResponse({'status': 200, 'message': 'debit added successfully'})\n except:\n return JsonResponse({'status': 505, 'message': 'Something went wrong'})\n\n@api_view(['GET'])\ndef card_detail(request):\n try:\n current_date = datetime.now()\n month = current_date.month\n year = current_date.year\n budget = db.budget.find_one({'year': year, 'month': month})\n\n if budget:\n budget['_id'] = str(budget['_id'])\n return JsonResponse({'status': 200, 'message': 'Budget found', 'budget': budget})\n else:\n return JsonResponse({'status': 404, 'message': 'Budget not found'})\n\n except Exception as e:\n return JsonResponse({'status': 500, 'message': str(e)})\n\n\ndef total_income(request):\n try:\n current_date = datetime.now()\n month = current_date.month\n year = current_date.year\n\n total_income = db.credit.aggregate([\n {'$match': {'creditDate': {'$regex': f'{year}-{month:02}-.+'}}},\n {'$group': {'_id': None, 'totalIncome': {'$sum': '$creditAmount'}}}\n ])\n \n total_income = list(total_income)\n\n if total_income:\n total_income = total_income[0]['totalIncome']\n return JsonResponse({'status': 200, 'message': 'Total income found', 'totalIncome': total_income})\n else:\n return JsonResponse({'status': 404, 'message': 'Total income not found'})\n\n except Exception as e:\n print(e)\n return JsonResponse({'status': 500, 'message': str(e)})\n\n\n\n@api_view(['GET'])\ndef total_expense(request):\n try:\n current_date = datetime.now()\n month = current_date.month\n year = current_date.year\n\n total_expense = db.debit.aggregate([\n {'$match': {'debitDate': {'$regex': f'{year}-{month:02}-.+'}}},\n {'$group': {'_id': None, 'totalExpense': {'$sum': '$debitAmount'}}}\n ])\n\n total_expense = list(total_expense)\n\n if total_expense:\n total_expense = total_expense[0]['totalExpense']\n return JsonResponse({'status': 200, 'message': 'Total expense found', 'totalExpense': total_expense})\n else:\n return JsonResponse({'status': 404, 'message': 'Total expense not found'})\n\n except Exception as e:\n print(e)\n return JsonResponse({'status': 500, 'message': str(e)})\n\n\nfrom datetime import datetime\n\n@api_view(['GET'])\ndef list_expense(request):\n try:\n current_date = datetime.now()\n month = str(current_date.month).zfill(2)\n year = str(current_date.year) \n\n documents = db.debit.find({'year': year, 'month': month})\n\n data = []\n for document in documents:\n data.append({\n 'debitAmount': document['debitAmount'],\n 'debitDate': document['debitDate'],\n 'debitDetail': document['debitDetail']\n })\n print(data)\n\n return JsonResponse({'status': 200, 'message': 'Expense documents found', 'data': data})\n \n except Exception as e:\n return JsonResponse({'status': 500, 'message': str(e)})\n\n\n@api_view(['GET'])\ndef list_income(request):\n try:\n current_date = datetime.now()\n month = str(current_date.month).zfill(2)\n year = str(current_date.year) \n\n documents = db.credit.find({'year': year, 'month': month})\n\n data = []\n for document in documents:\n data.append({\n 'creditAmount': document['creditAmount'],\n 'creditDate': document['creditDate'],\n 'creditDetail': document['creditDetail']\n })\n print(data)\n\n return JsonResponse({'status': 200, 'message': 'income documents found', 'data': data})\n \n except Exception as e:\n return JsonResponse({'status': 500, 'message': str(e)})\n\n\n\n","repo_name":"bujair111/finace_management_backend","sub_path":"myproject/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25845973451","text":"#!/usr/bin/env python3\nimport math\nimport wpilib\n\n\nfrom wpilib.drive import MecanumDrive\nfrom networktables import NetworkTables\nfrom wpilib.controller import PIDController\nfrom wpilib.controller import SimpleMotorFeedforwardMeters\nfrom wpilib.geometry import Pose2d, Rotation2d, Translation2d\nfrom wpilib.kinematics import (\n ChassisSpeeds,\n MecanumDriveKinematics,\n MecanumDriveOdometry,\n MecanumDriveWheelSpeeds,\n)\n\nGEAR_RATIO = 10.75\n# measurements in metres\nTRACK_WIDTH = 0.579 # measured by characterization\nWHEEL_CIRCUMFERENCE = 0.0254 * 6 * math.pi\nXOffset = 0.288\nYOffset = 0.257\n\n\nclass MyRobot(wpilib.TimedRobot):\n \"\"\"Main robot class\"\"\"\n\n # Channels on the roboRIO that the motor controllers are plugged in to\n frontLeftChannel = 1\n rearLeftChannel = 2\n frontRightChannel = 3\n rearRightChannel = 4\n\n # The channel on the driver station that the joystick is connected to\n lStickChannel = 0\n rStickChannel = 1\n WHEEL_DIAMETER = 0.5 # 6 inches\n ENCODER_COUNTS_PER_REV = 3\n\n def robotInit(self):\n \"\"\"Robot initialization function\"\"\"\n self.frontLeftMotor = wpilib.Talon(self.frontLeftChannel)\n self.rearLeftMotor = wpilib.Talon(self.rearLeftChannel)\n self.frontRightMotor = wpilib.Talon(self.frontRightChannel)\n self.rearRightMotor = wpilib.Talon(self.rearRightChannel)\n\n self.lstick = wpilib.Joystick(self.lStickChannel)\n self.rstick = wpilib.Joystick(self.rStickChannel)\n self.sd = NetworkTables.getTable(\"SmartDashboard\")\n\n # Position gets automatically updated as robot moves\n self.gyro = wpilib.AnalogGyro(1)\n\n #Create the DriveTrain\n self.drive = MecanumDrive(\n self.frontLeftMotor,\n self.rearLeftMotor,\n self.frontRightMotor,\n self.rearRightMotor,\n )\n #Create The Encoders\n self.f_l_encoder = wpilib.Encoder(0, 1)\n self.f_l_encoder.setDistancePerPulse(\n (.0254* 6 * math.pi) / 1024\n )\n #self.f_l_encoder.setSimDevice(0)\n\n self.r_l_encoder = wpilib.Encoder(3, 4)\n self.r_l_encoder.setDistancePerPulse(\n (.0254* 6 * math.pi) / 1024\n )\n #self.r_l_encoder.setSimDevice(1)\n\n self.f_r_encoder = wpilib.Encoder(1, 2)\n self.f_r_encoder.setDistancePerPulse(\n (.0254* 6 * math.pi) / 1024\n )\n #self.f_r_encoder.setSimDevice(2)\n\n self.r_r_encoder = wpilib.Encoder(3, 4)\n self.r_r_encoder.setDistancePerPulse(\n (.0254* 6 * math.pi) / 1024\n )\n #self.r_r_encoder.setSimDevice(3)\n\n # Setting up Kinematics (an algorithm to determine chassi speed from wheel speed)\n # The 2d Translation tells the algorithm how far off center (in Meter) our wheels are\n # Ours are about 11.3 (x) by 10.11(y) inches off, so this equates to roughly .288 X .257 Meters\n # We use the X and Y Offsets above.\n\n m_frontLeftLocation = Translation2d(XOffset, YOffset)\n m_frontRightLocation = Translation2d(XOffset, (-1 * YOffset))\n m_backLeftLocation = Translation2d((-1 * XOffset), (YOffset))\n m_backRightLocation = Translation2d((-1 * XOffset), (-1 * YOffset))\n\n # Creat our kinematics object using the wheel locations.\n self.m_kinematics = MecanumDriveKinematics(\n m_frontLeftLocation,\n m_frontRightLocation,\n m_backLeftLocation,\n m_backRightLocation,\n )\n # Create the Odometry Object\n self.MecanumDriveOdometry = MecanumDriveOdometry(\n self.m_kinematics,\n Rotation2d.fromDegrees(-self.gyro.getAngle()),\n Pose2d(0, 0, Rotation2d(0)),\n )\n\n # Now that we have created the ability to see wheel speeds, chassis speeds and our position\n # Let us start to use feedforward to try to lock our robot into a specific speed.\n self.feedForward = SimpleMotorFeedforwardMeters(kS=0.194, kV=.5, kA=0.457)\n\n #def robotPeriodic(self):\n # Odometry Update\n # First, get the wheel speeds...\n \n # Next, we need to grab the Gyro angle and send it into the odometry. It must be inverted because gyros v Wpilib are backwards\n \n\n\n\n def disabled(self):\n \"\"\"Called when the robot is disabled\"\"\"\n while self.isDisabled():\n wpilib.Timer.delay(0.01)\n\n def autonomousInit(self):\n \"\"\"Called when autonomous mode is enabled\"\"\"\n self.timer = wpilib.Timer()\n self.timer.start()\n self.f_l_encoder.reset()\n self.r_l_encoder.reset()\n self.f_r_encoder.reset()\n self.r_r_encoder.reset()\n self.lastCount = 0\n\n\n def autonomousPeriodic(self):\n preChassis = ChassisSpeeds()\n preChassis.vx = 5.0\n preChassis.vy = 0.0\n preChassis.omega = 0.0\n # Convert to wheel speeds\n speeds = self.m_kinematics.toWheelSpeeds(preChassis)\n self.wheelSpeeds = MecanumDriveWheelSpeeds(\n self.f_l_encoder.getRate(),\n self.r_l_encoder.getRate(),\n self.f_r_encoder.getRate(),\n self.r_r_encoder.getRate(),\n )\n gyroAngle = Rotation2d.fromDegrees(-self.gyro.getAngle())\n # Finally, we can update the pose...\n self.m_pose = self.MecanumDriveOdometry.update(gyroAngle, self.wheelSpeeds)\n #For Kinematics, we need to update the wheelspeeds\n CurrentChassis = self.m_kinematics.toChassisSpeeds(self.wheelSpeeds)\n print(CurrentChassis)\n print(self.f_l_encoder.getDistancePerPulse())\n print('difference')\n print(self.f_l_encoder.get()-self.lastCount)\n print('rate')\n print(self.r_r_encoder.getRate())\n print('lastCount')\n self.lastCount = self.f_l_encoder.get()\n print(self.lastCount)\n \n\n \n \n\n '''\n \n left_front = self.feedForward.calculate(speeds.frontLeft)s\n right_front = self.feedForward.calculate(speeds.frontRight)\n left_rear = self.feedForward.calculate(speeds.rearLeft)\n right_rear = self.feedForward.calculate(speeds.rearRight)'''\n\n #print(left_front, right_front, left_rear,right_rear)\n \n\n\n if self.timer.get() < 2.0:\n # self.drive.driveCartesian(1, 1, 1, 1) #<---- This is using the drive method built into the mecanum dirve.\n # Maybe we want to use something with more control, like feedforward...\n '''self.frontLeftMotor.set(-left_front)\n self.rearLeftMotor.set(right_front)\n self.frontRightMotor.set(-left_rear)\n self.rearRightMotor.set(right_rear)'''\n self.drive.driveCartesian(1, 0, 0, 0)\n elif self.timer.get()>2 and self.timer.get()<4:\n self.drive.driveCartesian(1, 0, 0, 0)\n else:\n self.drive.driveCartesian(0, 0, 0, 0)\n\n \n\n \n\n def teleopPeriodic(self):\n \"\"\"Called when operation control mode is enabled\"\"\"\n\n # self.drive.driveCartesian(\n # self.lstick.getX(), -self.lstick.getY(), self.rstick.getX(), 0\n # )\n\n self.drive.driveCartesian(\n self.lstick.getX(), -self.lstick.getY(), self.lstick.getRawAxis(2), 0\n )\n\n\nif __name__ == \"__main__\":\n wpilib.run(MyRobot)\n","repo_name":"Oscats/2020MentorMaster","sub_path":"physicsTrajectory/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11515555276","text":"#Crie um programa que sorteie e liste um determinado número de\n#funcionários. Por exemplo, sortear aleatoriamente e listar 5 funcionários, SEM\n#REPETIR.\n\nfrom FakeDB import FakeDB\nimport random\n\nnum = int(input(\"Insira a quantidade de Funcionários: \"))\n\nDB = FakeDB()\nDB.gerarFuncionarios()\n\nlista = []\nwhile len(lista) < num:\n func = random.choice(DB.listFuncionarios)\n\n ilista = True\n\n for item in lista:\n if(lista.__contains__(func)):\n ilista = False\n\n if(ilista):\n lista.append(func)\n\nfor func in lista:\n print(func.nome + \" \" + func.sobrenome)\n\n\n\n\n","repo_name":"GabriellyC/AtividadeA2Parte1","sub_path":"POO_04/POO04.py","file_name":"POO04.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34738045579","text":"import torch\nimport torch.nn as nn\n\n\nclass Generator(nn.Module):\n def __init__(self, noise_dim, channels_num, hidden_dim):\n super(Generator, self).__init__()\n self._net = nn.Sequential(\n # [N, noise_dim, 1, 1]\n nn.ConvTranspose2d(noise_dim, hidden_dim*16, kernel_size=4, stride=1, padding=0),\n nn.BatchNorm2d(hidden_dim*16),\n nn.ReLU(),\n # [N, hidden_dim*16, 4, 4]\n nn.ConvTranspose2d(hidden_dim*16, hidden_dim*8, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(hidden_dim*8),\n nn.ReLU(),\n nn.ConvTranspose2d(hidden_dim * 8, hidden_dim * 4, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(hidden_dim * 4),\n nn.ReLU(),\n nn.ConvTranspose2d(hidden_dim * 4, hidden_dim * 2, kernel_size=4, stride=2, padding=1),\n nn.BatchNorm2d(hidden_dim * 2),\n nn.ReLU(),\n nn.ConvTranspose2d(hidden_dim * 2, channels_num, kernel_size=4, stride=2, padding=1),\n # [N, channels_num, 64,64]\n nn.Tanh(),\n )\n\n def forward(self, x):\n return self._net(x)","repo_name":"ErezSC42/gan_experiments","sub_path":"models/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23465350471","text":"# encoding: UTF-8\r\n\r\nfrom __future__ import absolute_import, division\r\n\r\nimport collections\r\nimport itertools\r\nimport sys\r\n\r\nclass gcj:\r\n IN = open('D:\\code jam\\input.in', 'r')\r\n OUT = open('D:\\code jam\\output.txt', 'w')\r\n buf = None\r\n\r\n identity = lambda x: x\r\n\r\n @classmethod\r\n def _read_line(cls):\r\n if cls.buf:\r\n res = cls.buf\r\n cls.buf = None\r\n else:\r\n res = cls.IN.readline()\r\n if not res:\r\n raise EOFError()\r\n return res\r\n\r\n @classmethod\r\n def line(cls, conv=identity):\r\n line = cls._read_line()\r\n return conv(line.rstrip(b'\\r\\n'))\r\n\r\n @classmethod\r\n def splitline(cls, conv=identity):\r\n line = cls._read_line()\r\n return [conv(x) for x in line.split()]\r\n\r\n @classmethod\r\n def whitespace(cls):\r\n line = None\r\n while not line:\r\n line = cls._read_line()\r\n i = 0\r\n l = len(line)\r\n while i < l and line[i].isspace():\r\n i += 1\r\n line = line[i:]\r\n cls.buf = line\r\n\r\n @classmethod\r\n def token(cls, conv=identity):\r\n cls.whitespace()\r\n line = cls._read_line()\r\n i = 0\r\n l = len(line)\r\n while i < l and not line[i].isspace():\r\n i += 1\r\n cls.buf = line[i:] if i < l else None\r\n return conv(line[:i])\r\n\r\n @classmethod\r\n def tokens(cls, cnt, conv=identity):\r\n #tokens=[]\r\n #for _ in range(cnt):\r\n # tokens.append(cls.token(conv))\r\n #return tokens \r\n return [cls.token(conv) for _ in range(cnt)]\r\n\r\n current_case = 0\r\n\r\n @classmethod\r\n def case(cls):\r\n cls.current_case += 1\r\n return 'Case #{}:'.format(cls.current_case)\r\n \r\n @classmethod\r\n def writefile(cls, case, solve):\r\n cls.OUT.write( case + \" \" + str(solve) + '\\n')\r\n return\r\n \r\n\r\ndef solve():\r\n #Get Variables\r\n X,R,C = gcj.tokens(3, int) #can be token(int) or tokens(N, int) # can be int or str\r\n\r\n #print('A:', A) \r\n #print('B:', B) \r\n #print('K:', K) \r\n\r\n #SOLVE\r\n L = min(R,C)\r\n H = max(R,C)\r\n \r\n if (X > H ):\r\n return \"RICHARD\"\r\n \r\n if ((R*C)%X!=0):\r\n return \"RICHARD\"\r\n \r\n if X >= 7 and L>3:\r\n return \"RICHARD\"\r\n\r\n #odd\r\n if X%2!=0:\r\n #side = (X+1)/2\r\n if L < (X+1)/2:\r\n return \"RICHARD\"\r\n #even\r\n if X%2==0:\r\n small_side=X/2\r\n large_side=small_side+1\r\n if L < small_side or H < large_side:\r\n return \"RICHARD\"\r\n \r\n if L==2:\r\n if X>2*(H-X)+3:\r\n return \"RICHARD\"\r\n \r\n return \"GABRIEL\"\r\n\r\ndef main():\r\n t = gcj.token(int)\r\n for _ in range(t):\r\n case = gcj.case()\r\n if case == \"Case #8:\":\r\n j=8\r\n result = solve()\r\n \r\n gcj.writefile(case, result)\r\n print(case, result)\r\n\r\nmain()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_158/549.py","file_name":"549.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30244056261","text":"# using while loop display numbers 1 to 100\n\nnum1 = int(input(\"enter first number :\"))\nnum2= int(input(\"enter second number\"))\n\nif num1 < num2:\n while num1 <= num2:\n print(num1)\n num1 += 1\nelse:\n print(\"enter valid numbers\")\n\n\n# while loop\n\n# print numbers 1 to 10\n\nprint(\"START\")\nx = 1\nwhile x <= 10:\n print(x)\n x += 1\nprint(\"END\")\n\n\n# Display Numbers between any two numbers\nx = int(input(\"Enter First Number :\"))\ny = int(input(\"Enter Second Number :\"))\nif x < y:\n\n while x <= y:\n x += 1\n print(x)\n print(\"END The While loop\")\nelse:\n print(\"Enter valid numbers\")\n\n\n\n\n","repo_name":"pavitrakiran1992/sample","sub_path":"VN2_T03_064/_08_Loops/_practice _whileloops/_display_numbers.py","file_name":"_display_numbers.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70189812355","text":"\"\"\"\n\nThis approach will implement probabilisitc roadmap to obtain an obstacle free\npath to a goal within a simulation of San Francisco.\n\n\"\"\"\n\nimport argparse\nimport time\nimport msgpack\nfrom enum import Enum, auto\nimport sys\n\nimport numpy as np\n\nfrom planning_utils import *\n\nfrom udacidrone import Drone\nfrom udacidrone.connection import MavlinkConnection\nfrom udacidrone.messaging import MsgID\nfrom udacidrone.frame_utils import global_to_local\n\nfrom skimage.morphology import medial_axis\nfrom skimage.util import invert\n\nfrom sklearn.neighbors import KDTree\n\nimport networkx as nx\nfrom shapely.geometry import Polygon, Point, LineString\nfrom queue import PriorityQueue\n\n\nclass States(Enum):\n \"\"\"\n The possible states that the drone can exist within.\n \"\"\"\n MANUAL = auto()\n ARMING = auto()\n TAKEOFF = auto()\n WAYPOINT = auto()\n LANDING = auto()\n DISARMING = auto()\n PLANNING = auto()\n\n\nclass MotionPlanning(Drone):\n \"\"\"\n Inherit from Drone class. This class interfaces with the flight controller.\n \"\"\"\n\n def __init__(self, connection):\n super().__init__(connection)\n\n self.target_position = np.array([0.0, 0.0, 0.0])\n self.waypoints = []\n self.in_mission = True\n self.check_state = {}\n\n # hold navigation waypoints. these are waypoints obtained from medial-axis graph planning.\n # these will guide the drone to \"mini-goals\", allowing the drone to perform local planning\n # in order to avoid sudden obstacles, environment hazards, and/or any differentiations\n # in the map/environment.\n self.navigation_waypoints = []\n\n # a list of polygons. create a KDTree that uses the centroid of the\n # polygon to sort in space, and also appends the polygon itself and the\n # height of the polygon.\n self.polygons = 0\n self.polygon_centroids = 0\n self.polygon_centroids_tree = 0\n\n self.grid = 0\n\n # gate for finite state machine\n self.initial_run = True\n\n # lets us know when the final navigation point is within the local planner so that we can\n # stop planning.\n # ideally, this value should become true when at the final location.\n self.stop_planning = False\n\n # target altitude\n self.TARGET_ALTITUDE = 0\n\n # initial state\n self.flight_state = States.MANUAL\n\n # register all your callbacks\n self.register_callback(MsgID.LOCAL_POSITION, self.local_position_callback)\n self.register_callback(MsgID.LOCAL_VELOCITY, self.velocity_callback)\n self.register_callback(MsgID.STATE, self.state_callback)\n\n\n\n def local_position_callback(self):\n\n if self.flight_state == States.TAKEOFF:\n\n # if we have reached target altitude, call waypoint_transitition\n if -1.0 * self.local_position[2] > 0.95 * self.target_position[2]:\n self.waypoint_transition()\n\n elif self.flight_state == States.WAYPOINT:\n\n # check if the drone is within 1 meter of the target position.\n # this is the deadband.\n if np.linalg.norm(self.target_position[0:2] - self.local_position[0:2]) < 10.0:\n\n # if there are more waypoints to fly to, call a function that will publish the\n # waypoint.\n if (len(self.waypoints) > 0 or len(self.navigation_waypoints) > 0):\n self.waypoint_transition()\n\n else:\n\n # no waypoints to go toward, check if I have hovering in order to land\n if np.linalg.norm(self.local_velocity[0:2]) < 1.0:\n self.landing_transition()\n\n\n def velocity_callback(self):\n if self.flight_state == States.LANDING:\n if self.global_position[2] - self.global_home[2] < 0.1:\n if abs(self.local_position[2]) < 0.01:\n self.disarming_transition()\n\n\n\n def state_callback(self):\n if self.in_mission:\n if self.flight_state == States.MANUAL:\n self.arming_transition()\n elif self.flight_state == States.ARMING:\n if self.armed:\n self.plan_path()\n elif self.flight_state == States.PLANNING:\n self.takeoff_transition()\n elif self.flight_state == States.DISARMING:\n if ~self.armed & ~self.guided:\n self.manual_transition()\n\n def arming_transition(self):\n self.flight_state = States.ARMING\n print(\"arming transition\")\n self.arm()\n self.take_control()\n\n def takeoff_transition(self):\n self.flight_state = States.TAKEOFF\n print(\"takeoff transition\")\n self.takeoff(self.target_position[2])\n\n def waypoint_transition(self):\n\n \"\"\"\n\n Call the local and global planner to obtain\n the next waypoint.\n\n \"\"\"\n\n self.plan_path()\n\n self.flight_state = States.WAYPOINT\n print(\"waypoint transition\")\n self.target_position = self.waypoints.pop(0)\n print('target position', self.target_position)\n self.cmd_position(self.target_position[0], self.target_position[1], self.target_position[2], self.target_position[3])\n\n def landing_transition(self):\n self.flight_state = States.LANDING\n print(\"landing transition\")\n self.land()\n\n def disarming_transition(self):\n self.flight_state = States.DISARMING\n print(\"disarm transition\")\n self.disarm()\n self.release_control()\n\n def manual_transition(self):\n self.flight_state = States.MANUAL\n print(\"manual transition\")\n self.stop()\n self.in_mission = False\n\n def send_waypoints(self):\n print(\"Sending waypoints to simulator ...\")\n data = msgpack.dumps(self.waypoints)\n self.connection._master.write(data)\n\n def plan_path(self):\n\n # gateway. If a local path is found, will publish waypoints.\n # if a local path is not found, will attempt to find a path\n # again.\n path_found = False\n\n while not self.stop_planning and not path_found:\n\n # if this is the very first run, plan a path using medial-axis graph.\n # this planning occurs very quickly and will serve as a guiding set of waypoints for the drone's\n # local planner.\n if self.initial_run:\n\n navigation_path_found = False\n\n # continue to iterate until we have found a path using medial-axis.\n while not navigation_path_found:\n\n self.flight_state = States.PLANNING\n\n print(\"Searching for navigation path ...\")\n\n # set target altitude and safety distance to obstacles.\n self.TARGET_ALTITUDE = 1\n\n # a safety distance of 8 works very well given the limitations of the simulation.\n # Do not alter this value.\n SAFETY_DISTANCE = 8\n self.target_position[2] = self.TARGET_ALTITUDE\n\n # read lat0, lon0 from colliders into floating point values,\n # and set home position to these values.\n # the home position is not the current position of the drone, but a predefined\n # location.\n with open('colliders.csv') as f:\n first_line = f.readline()\n item_1, item_2 = first_line.split(', ')\n lat = float(item_1.split(' ')[1])\n lon = float(item_2.split(' ')[1])\n self.set_home_position(lon,lat,0.0)\n print('extracted home lat and lon from colliders')\n\n # retrieve current global position (in latitude and longitude)\n curr_global_pos = self.global_position\n\n # convert global position to local position.\n curr_local_position = global_to_local(curr_global_pos, self.global_home)\n\n # print out this useful information for debug\n #print('global home {0}, global position {1}, local position {2}'.format(self.global_home, self.global_position, self.local_position))\n\n # Read in 2.5D obstacle map.\n # NOTE: this obstacle map IS NOT 1:1 with simulation. That is, the\n # obstacles in this file are not laid out exactly like the simulator.\n data = np.loadtxt('colliders.csv', delimiter=',', dtype='Float64', skiprows=2)\n\n # Using the obstacle data, create a GRID using a target altitude and safety margin around obstacles.\n grid, north_offset, east_offset = create_grid(data, self.TARGET_ALTITUDE, SAFETY_DISTANCE)\n self.north_offset = north_offset\n self.east_offset = east_offset\n #print(\"North offset = {0}, east offset = {1}\".format(north_offset, east_offset))\n\n # run medial axis on the inverted grid to find a graph.\n # for more information on medial axis, view the readme.\n skeleton = medial_axis(invert(grid))\n\n # set grid start position to current position\n grid_start = (-north_offset + int(curr_local_position[0]), -east_offset + int(curr_local_position[1]))\n\n # check if LAT/LON arguments are passed in for a goal.\n grid_goal = 0\n lat, lon = args.lat, args.lon\n\n if lat == '' or lon == '':\n # no latitude and longitude goals passed in.\n # here are some hardcoded goals.\n # have only 1 goal activate at a time.\n print('No latitude or longitude value passed in. Will fly to hardcoded goal location')\n grid_goal = (815, 221)\n #grid_goal = (650, 60)\n #grid_goal = (612, 540)\n #grid_goal = (381, 331)\n #grid_goal = (86, 846)\n #grid_goal = (337, 812)\n\n else:\n\n print('goal latitude and longitude read from command line')\n\n # convert string to float\n lat, lon = np.float64(lat), np.float64(lon)\n\n # long, lat, altitude\n goal_global_position = np.array([lon, lat, 0.082])\n\n # convert global coordinates to local coordinates\n goal_local_position = global_to_local(goal_global_position, self.global_home)\n\n # find goal in grid using offsets obtains above.\n grid_goal = (-north_offset + int(goal_local_position[0]), -east_offset + int(goal_local_position[1]))\n\n # find nearest points to graph skeleton from grid start/stop locations\n skel_start, skel_goal = find_start_goal(skeleton, grid_start, grid_goal)\n\n # Run A* to find a path from start to goal\n print('Local Start and Goal: ', skel_start, skel_goal)\n # the heuristic is a simple euclidean distance.\n navigation_path_found, path, _ = a_star_medial_axis(invert(skeleton).astype(np.int), heuristic_euc, tuple(skel_start), tuple(skel_goal))\n\n if navigation_path_found:\n\n # prune path to minimize number of waypoints.\n # run twice. will improve resulting waypoint sequence.\n print('pruning global path')\n path = path_prune(path, grid)\n path = path_prune(path, grid)\n\n # Convert global path to waypoints\n waypoints = [[int(p[0] + north_offset), int(p[1] + east_offset), int(self.TARGET_ALTITUDE), 0] for p in path]\n\n # Set self.navigation_waypoints. These waypoints will be used as a GUIDE for receding horizon planning.\n # Start from current location.\n self.navigation_waypoints = waypoints[1:]\n self.waypoints = waypoints\n\n # extract polyons and create a KDTree from the returned\n # list of polygon centroids.\n self.polygons, self.polygon_centroids = extract_polygons(data, SAFETY_DISTANCE)\n self.polygon_centroids_tree = KDTree(self.polygon_centroids)\n\n # set up work has completed.\n self.initial_run = False\n\n # send guiding waypoints to simulator for visualization\n self.send_waypoints()\n\n # waypoints will be populated by the local planner. Not this sequence, which obtains the\n # global plan\n self.waypoints = []\n\n else:\n\n print('could not find navigational path')\n\n\n # at this point, the global plan exists.\n # will implent receding horizon local planner using Probabilistic Roadmap.\n\n temp_local_position = 0\n\n # obtain the local position\n global_position = self.global_position\n local_position = global_to_local(global_position, self.global_home)\n\n if sum(self.target_position) == 1:\n # if there is no target position set, make the current location of the drone the target position.\n temp_local_position = [int(local_position[0]), int(local_position[1])]\n else:\n # plan to the current target position.\n temp_local_position = [int(self.target_position[0]), int(self.target_position[1])]\n\n # the first navigation waypoint in the list will serve as a guide to the local planner.\n # extract just the (x,y).\n temp_goal = self.navigation_waypoints[0]\n temp_goal = [temp_goal[0], temp_goal[1]]\n\n # find distance between the target waypoint and the navigation waypoint.\n dist_nav_goal = heuristic_euc(temp_local_position, temp_goal)\n\n # if the drone is close to the navigation waypoint and there are greater than 1 navigation waypoints in the list,\n # pop the first navigation waypoint and use the next waypoint as a guide.\n if dist_nav_goal < 10.0 and len(self.navigation_waypoints) > 1:\n self.navigation_waypoints.pop(0)\n print('POPPING WAYPOINT')\n\n\n # get k samples within local space.\n # the horizon is defined within the get_samples() function.\n num_samples = 500\n samples = get_samples(temp_local_position, num_samples)\n\n # using KD tree containing obstacles, remove all nodes that exist within, or close to, an obstacle.\n # will return a list that contains nodes in the free space.\n nodes = remove_collisions(samples, self.polygons, self.polygon_centroids, self.polygon_centroids_tree, self.TARGET_ALTITUDE)\n\n \"\"\"\n Uncomment this section to visualize the nodes that exist in free space.\n\n new_list = []\n\n for p in nodes:\n new_node = [int(p[0]), int(p[1]), int(self.TARGET_ALTITUDE), 0]\n new_list.append(new_node)\n\n hold_waypoints = self.waypoints\n self.waypoints = new_list\n self.send_waypoints()\n self.waypoints = hold_waypoints\n \"\"\"\n\n\n # create a graph from these nodes.\n connections_per_node = 5\n g, _ = create_graph(nodes, self.polygons, self.polygon_centroids, self.polygon_centroids_tree, connections_per_node, self.TARGET_ALTITUDE)\n\n # find closest graph nodes to start position and goal position\n nav_point = [int(self.navigation_waypoints[0][0]), int(self.navigation_waypoints[0][1])]\n start_point = [int(temp_local_position[0]), int(temp_local_position[1])]\n dist_node_to_goal = 1000000\n dist_node_to_start = 1000000\n goal_node = 0\n start_node = 0\n\n for node in list(g.nodes):\n\n iteration_goal_dist = heuristic_euc(node, nav_point)\n iteration_start_dist = heuristic_euc(node, start_point)\n\n if iteration_goal_dist < dist_node_to_goal:\n dist_node_to_goal = iteration_goal_dist\n goal_node = node\n\n if iteration_start_dist < dist_node_to_start:\n dist_node_to_start = iteration_start_dist\n start_node = node\n\n \"\"\"\n Uncomment this section to visualize the nodes that exist in free space.\n\n new_list = []\n\n for p in nodes:\n new_node = [int(p[0]), int(p[1]), int(self.TARGET_ALTITUDE), 0]\n new_list.append(new_node)\n\n hold_waypoints = self.waypoints\n self.waypoints = new_list\n self.send_waypoints()\n self.waypoints = hold_waypoints\n \"\"\"\n\n # perform a* on the graph using the goal node and start node\n path_found, path, _ = a_star_graph(g, heuristic_euc, start_node, goal_node)\n\n if path_found:\n\n # minimize the number of waypoints found. Produces a smoother path.\n print('pruning local graph')\n path = graph_prune(path, self.polygons, self.polygon_centroids, self.polygon_centroids_tree, self.TARGET_ALTITUDE)\n\n # set up waypoints for publishing to simulator\n waypoints = [[int(p[0]), int(p[1]), int(self.TARGET_ALTITUDE), 0] for p in path]\n self.waypoints = waypoints[1:]\n self.send_waypoints()\n\n # check if we are at the final navigation point (our destination)\n temp_local_position = [int(self.local_position[0]), int(self.local_position[1])]\n temp_goal = self.navigation_waypoints[0]\n temp_goal = [temp_goal[0], temp_goal[1]]\n dist_nav_goal = heuristic_euc(temp_local_position, temp_goal)\n\n if dist_nav_goal < 5 and len(self.navigation_waypoints) == 1:\n # distance to goal is less than local planner and it is the last waypoint. stop planning here.\n # pop last waypoint.\n # If navigation_waypoints[] or waypoints[] are empty, landing_transition is signaled.\n self.navigation_waypoints.pop(0)\n self.stop_planning = True\n print('stopping planning')\n\n def start(self):\n self.start_log(\"Logs\", \"NavLog.txt\")\n\n print(\"starting connection\")\n self.connection.start()\n\n # Only required if they do threaded\n # while self.in_mission:\n # pass\n\n self.stop_log()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--port', type=int, default=5760, help='Port number')\n parser.add_argument('--host', type=str, default='127.0.0.1', help=\"host address, i.e. '127.0.0.1'\")\n\n # Add in command line option to pass in latitude and longitude\n parser.add_argument('--lat', type=str, default='', help='')\n parser.add_argument('--lon', type=str, default='', help='')\n args = parser.parse_args()\n\n conn = MavlinkConnection('tcp:{0}:{1}'.format(args.host, args.port), timeout=60)\n drone = MotionPlanning(conn)\n time.sleep(1)\n\n drone.start()\n","repo_name":"Shawn-Ricardo/Autonomous-Flying","sub_path":"Planning/probabilistic_roadmap.py","file_name":"probabilistic_roadmap.py","file_ext":"py","file_size_in_byte":19875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32648585456","text":"class Solution:\n def minPathSum(self, grid: List[List[int]]) -> int:\n n = len(grid)\n m = len(grid[0])\n def f(i,j):\n if i == n-1 and j == m- 1:\n return grid[i][j]\n if i >= n or j >= m:\n return 1000000000\n \n return grid[i][j] + min(f(i+1,j), f(i,j+1))\n \n dp = [[-1] * m for _ in range(n)]\n def mem(i,j):\n if i == n-1 and j == m- 1:\n return grid[i][j]\n if i >= n or j >= m:\n return 1000000000\n if dp[i][j] != -1:\n return dp[i][j]\n dp[i][j] = grid[i][j] + min(mem(i+1,j), mem(i,j+1))\n return dp[i][j]\n return mem(0,0)\n ","repo_name":"parasv24/grind","sub_path":"0064-minimum-path-sum/0064-minimum-path-sum.py","file_name":"0064-minimum-path-sum.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14913509606","text":"import os\nimport numpy as np\nimport pandas as pd\nimport cv2\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sys import maxsize\n\nRAW_DATA_DIR = \"presidential_videos/\"\nDATA_DIR = \"dataset/\"\nCSV_FILE = \"Labels.csv\"\nOUTPUT_CSV_FILE = \"input.csv\"\n\nCLASS_NAMES = [\"Positive\", \"Negative\", \"Neutral\"]\nCATEGORY_NAMES = [\"Training\", \"PublicTest\", \"PrivateTest\"]\nOUTPUT_CSV_HEADERS = [\"emotion\", \"pixels\", \"Usage\"]\n\n\ndef cropImage(image):\n gray = image\n if len(image.shape) == 3:\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n w, h = 0, 0\n for i in range(3, 6):\n faceCascadeProfile = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_profileface.xml\")\n faceCascadeFront = cv2.CascadeClassifier(cv2.data.haarcascades + \"haarcascade_frontalface_default.xml\")\n facesProfile = faceCascadeProfile.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=i,\n minSize=(30, 30)\n )\n\n facesFront = faceCascadeFront.detectMultiScale(\n gray,\n scaleFactor=1.3,\n minNeighbors=i,\n minSize=(30, 30)\n )\n\n if len(facesProfile) != 0:\n x, y, w, h = facesProfile[0]\n break\n elif len(facesFront) != 0:\n x, y, w, h = facesFront[0]\n break\n\n if w == 0 or h == 0:\n return None\n\n grayFace = gray[y:y + h, x:x + w]\n grayFace = cv2.resize(grayFace, (48, 48))\n\n return grayFace\n\n\ndef videoToImage(videoFile, desDir = \"\", crop=True):\n videoName = os.path.splitext(videoFile)[0]\n vidcap = cv2.VideoCapture(RAW_DATA_DIR + videoFile)\n\n # get frame count\n frameCnt = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n # set current frame at the middle of the video\n vidcap.set(cv2.CAP_PROP_POS_FRAMES, frameCnt // 2)\n success = True\n image = None\n\n while success:\n success, image = vidcap.read()\n\n if not success:\n break\n\n if crop:\n image = cropImage(image)\n\n if image is None:\n continue\n break\n\n if image is None:\n raise ValueError(\"Face not found.\")\n\n if desDir != \"\":\n cv2.imwrite(os.path.join(desDir, \"%s.jpg\" % videoName), image)\n print(\"Converted %s to %s successfully!\" % (videoName + \".mp4\", videoName + \".jpg\"))\n\n return image\n\n\ndef loadData(csvFile):\n df = pd.read_csv(csvFile)\n\n X = df[df.columns[0]].to_numpy()\n y = df[df.columns[1]].to_numpy()\n # split data into 3 parts (64%, 16%, 20%)\n XTrain, XTest, yTrain, yTest = train_test_split(X, y, test_size=0.2, random_state=1)\n XTrain, XValidation, yTrain, yValidation = train_test_split(XTrain, yTrain, test_size=0.2, random_state=1)\n\n trainData = np.c_[XTrain, yTrain]\n validData = np.c_[XValidation, yValidation]\n testData = np.c_[XTest, yTest]\n\n dataList = [trainData, testData, validData]\n\n return dataList\n\n\ndef refactorData(csvFile):\n dataList = loadData(csvFile)\n outputDf = pd.DataFrame(columns = OUTPUT_CSV_HEADERS)\n for i in range(len(dataList)):\n print(\"%s Set\\n\" % CATEGORY_NAMES[i])\n X = dataList[i][:, 0]\n pixels = []\n\n np.set_printoptions(threshold=maxsize, linewidth=maxsize)\n\n for j in range(X.shape[0]):\n # get image\n print(\"Converting image %d\" % j)\n pixel = videoToImage(X[j]).ravel().astype(np.uint8)\n # convert numpy array to string\n pixelStr = \" \".join(map(str, pixel.tolist()))\n pixels.append(pixelStr)\n\n # 0-Negative, 1-Neutral, 2-Positive\n y = dataList[i][:, 1]\n yOneHot = LabelEncoder().fit_transform(y)\n df = pd.DataFrame([yOneHot, pixels]).T\n df.columns = OUTPUT_CSV_HEADERS[:2]\n df[OUTPUT_CSV_HEADERS[-1]] = CATEGORY_NAMES[i]\n\n outputDf = pd.concat([outputDf, df], ignore_index=True)\n\n outputDf.to_csv(OUTPUT_CSV_FILE, index=False)\n\n\ndef refactorFolder(dataDir, csvFile):\n dataDir = os.path.join(dataDir)\n trainDir = dataDir + \"train\"\n validationDir = dataDir + \"validation\"\n testDir = dataDir + \"test\"\n dataDirs = [trainDir, validationDir, testDir]\n\n if not os.path.exists(dataDir):\n os.mkdir(dataDir)\n\n for directory in dataDirs:\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n dataList = loadData(csvFile)\n\n count = 0\n for i in range(len(dataList)):\n desDirs = []\n # create new folder by class\n for j in range(len(CLASS_NAMES)):\n desDir = dataDirs[i] + \"/\" + CLASS_NAMES[j]\n desDirs.append(desDir)\n if not os.path.exists(desDir):\n os.mkdir(desDir)\n\n data = dataList[i]\n for j in range(data.shape[0]):\n file = data[j, 0]\n idx = CLASS_NAMES.index(data[j, 1])\n videoToImage(file, desDirs[idx], crop=False)\n count += 1\n\n print(\"Total images: %s\" % count)\n\n\n# test cropImage function\n# image = cv2.imread(\"joe.7wfrtnGV27k.00.jpg\")\n# cropImage(image)\n\n# for image generator in method 3\nrefactorFolder(dataDir = DATA_DIR, csvFile = CSV_FILE)\n\n# if running on method 1 or 2, uncomment below lines\n# for kaggle compability\n# refactorData(CSV_FILE)","repo_name":"tzcliff/CS640_Facial_Expression_Recognition","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39462083699","text":"\"\"\" File contains class Scraper which scrapes website https://www.hyperia.sk/ for info about job offerings\r\n Author: Adam Fabo\r\n Date: 23.8.2022\r\n\"\"\"\r\n\r\nimport requests\r\nfrom lxml import html\r\n\r\nclass Scraper:\r\n\r\n web_link = 'https://www.hyperia.sk/'\r\n\r\n # paths for each important element on site\r\n place_xpath = '/html/body/div/div/div/div/section[1]/div[2]/div/div/div[1]/p/text()'\r\n salary_xpath = '/html/body/div/div/div/div/section[1]/div[2]/div/div/div[2]/p/text()'\r\n contract_type_xpath = '/html/body/div/div/div/div/section[1]/div[2]/div/div/div[3]/p/text()'\r\n\r\n # contact email can have 2 different paths\r\n contact_email_xpath = '/html/body/div/div/div/div/section[2]/div[2]/a'\r\n contact_email_xpath_2 = '/html/body/div/div/div/div/div[6]/a'\r\n\r\n\r\n def __init__(self,jobs):\r\n self.jobs = jobs\r\n\r\n def scrape(self):\r\n\r\n final_jobs = []\r\n\r\n for job in self.jobs.getchildren():\r\n\r\n title = job.getchildren()[0].text\r\n\r\n link_to_job = job.getchildren()[2][0].get('href')\r\n\r\n # load page of current job offering\r\n response = requests.get(self.web_link + link_to_job, stream=True)\r\n if not response.ok:\r\n continue\r\n\r\n response.encoding = response.apparent_encoding\r\n tree = html.document_fromstring(response.text)\r\n\r\n place = tree.xpath(self.place_xpath)[0]\r\n salary = tree.xpath(self.salary_xpath)[0]\r\n contract_type = tree.xpath(self.contract_type_xpath)[0]\r\n\r\n # try one path, if it is empty try another\r\n contact_email = tree.xpath(self.contact_email_xpath)\r\n if not contact_email:\r\n contact_email = tree.xpath(self.contact_email_xpath_2)\r\n\r\n contact_email = contact_email[0].get(\"href\")\r\n contact_email = contact_email.replace(\"mailto:\", \"\")\r\n\r\n job_info = {\"title\" : title, \"place\" : place, \"salary\" : salary, \"contract_type\" : contract_type, \"contact_email\" : contact_email}\r\n\r\n final_jobs.append(job_info)\r\n\r\n return final_jobs\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Adam-Fabo/Hyperia-parse","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4160412078","text":"import unittest\nfrom city_functions import cityCountry\n\nclass test(unittest.TestCase):\n def test_city_country(self):\n result = cityCountry(\"berlin\", \"germany\")\n result2 = cityCountry(\"lincoln\", \"nebraska\", \"20,000\")\n self.assertEqual(result, \"Germany, Berlin\")\n self.assertEqual(result2, \"Nebraska, Lincoln, Population: 20,000\")\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"RiggityRussell/CIT228","sub_path":"Chapter11/test_cities.py","file_name":"test_cities.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35049628213","text":"from PyQt5.QtWidgets import (QWidget, QPushButton, QVBoxLayout, QHBoxLayout, \n QFileDialog, QListWidget, QToolButton, QLabel, \n QApplication, QStackedWidget, QSizePolicy, QListWidgetItem, QTabWidget)\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtGui import QIcon\n\nfrom spectralyze.gui.Models.projectModel import projectModel\nfrom spectralyze.gui.Views.fileBrowse import fileBrowser\nfrom spectralyze.gui.Views.Widgets.menubar import MenuBar\nimport datetime\nimport os\nimport sys\nimport toml\n\n\nclass projectView(QWidget):\n \"\"\"\n A Widget for displaying a project. Contains a list of files and a\n stacked widget that contains the individual file views\n \n attributes\n -------------\n model: Project model\n fileBrowser: File browser. Only instantiated if needed\n fileViews: Dictionary of individual file widgets\n \n \"\"\"\n saveProject = pyqtSignal()\n def __init__(self, project, global_config):\n super().__init__()\n self.global_config = global_config\n\n self.CONFIG_FILE = os.path.join(self.global_config['config_location'], \n self.global_config['projectView'])\n self.model = project\n self.fileBrowser = None\n self.fileViews = {}\n self.menuBar = MenuBar(self.global_config)\n\n self.setupWidgets()\n self.connectSignals()\n self.connectSlots()\n super().update()\n\n def setupWidgets(self):\n \"\"\"\n Basic layout includes an editable list of files\n And the stacked widget containing individual file views.\n \"\"\"\n self.layout = QHBoxLayout()\n self.layout.addWidget(self.menuBar)\n self.projectNavigator = ProjectNavigator(self.global_config)\n self.projectNavigator.update({'fileList': self.model.getFileNames()})\n self.leftLayout = QVBoxLayout()\n self.fileViewsWidget = self.model.getWidget()\n self.projectNameLabel = QLabel(\"Project: {}\".format(self.model.name))\n self.saveButton = QPushButton(\"Save Project\")\n self.saveLabel = QLabel(\"\")\n self.leftLayout.addWidget(self.projectNameLabel)\n self.leftLayout.addWidget(self.projectNavigator)\n self.leftLayout.addWidget(self.saveButton)\n self.leftLayout.addWidget(self.saveLabel)\n self.layout.addLayout(self.leftLayout)\n self.layout.addWidget(self.fileViewsWidget)\n self.setLayout(self.layout)\n\n\n def connectSignals(self):\n self.projectNavigator.signal.connect(self.handleSignal)\n self.menuBar.signal.connect(self.handleMenuBarAction)\n #self.menuBar.exportMeta.connect(self.exportFileMeta)\n #self.menuBar.importMeta.connect(self.importFileMeta)\n\n def connectSlots(self):\n #self.fileList.addFile.connect(self.getFile)\n #self.fileList.removeFile.connect(lambda x: self.removeFile(x))\n #self.fileList.currentFileChanged.connect(lambda x: self.setActiveFile(x))\n self.saveButton.clicked.connect(self.saveProjectClicked)\n\n def addFile(self, files):\n for fname, ftype in files.items():\n self.model.addFile(fname, ftype)\n\n def removeFile(self, fname):\n self.model.removeFile(fname)\n\n def updateSelection(self, name):\n self.model.setActive(name)\n\n\n def saveProjectClicked(self):\n time = datetime.datetime.now()\n self.saveLabel.setText(\"Project saved at {}\".format(time.strftime(\"%H:%M\")))\n if not self.saveLabel.isVisible():\n self.saveLabel.show()\n\n self.saveProject.emit()\n \n def handleSignal(self, data):\n for k,v in data.items():\n if hasattr(self, k):\n f = getattr(self, k)\n f(v)\n \n def handleMenuBarAction(self, data):\n if self.model.canHandle(data['target']):\n self.model.handleSignal(data)\n\n def importFileMeta(self):\n fname = self.projectNavigator.widgets['fileList'].getCurrentSelection()\n self.model.importData(fname, 'meta', 'meta')\n\nclass ProjectNavigator(QTabWidget):\n signal = pyqtSignal(dict)\n def __init__(self, global_config):\n super().__init__()\n self.global_config = global_config\n self.CONFIG_FILE = os.path.join(self.global_config['config_location'], \n self.global_config['projectNavigator'])\n\n self.config = toml.load(self.CONFIG_FILE)\n self.icon_root = self.global_config['resource_location']\n self.layout = QVBoxLayout()\n self.setup()\n self.setLayout(self.layout)\n\n def setup(self):\n self.widgets = {}\n self.icons = {}\n for name, data in self.config['widgets'].items():\n widget_type = getattr(sys.modules[__name__], data['widget'])\n widget = widget_type(global_config=self.global_config)\n self.widgets.update({name: widget})\n self.icons.update({name: data['icons']})\n img_loc = os.path.join(self.icon_root, self.icons[name]['inactive'])\n self.addTab(self.widgets[name], QIcon(img_loc), data['name'])\n\n self.setTabPosition(QTabWidget.West)\n self.connectSignals()\n \n def connectSignals(self):\n for name, widget in self.widgets.items():\n if hasattr(widget, 'signal'):\n widget.signal.connect(lambda x, y=name: self.handleSignal(x, y))\n \n def update(self, data):\n for key, val in data.items():\n if key in self.widgets:\n self.widgets[key].update(val)\n \n def handleSignal(self, data, widget):\n for key, val in data.items():\n callback = self.config['widgets'][widget]['callbacks'][key]\n if hasattr(self, callback):\n f = getattr(self, callback)\n f(data)\n self.signal.emit({key: val})\n\nclass fileList(QWidget):\n signal = pyqtSignal(dict)\n\n def __init__(self, files={}, global_config={}):\n super().__init__()\n self.global_config = global_config\n self.list= QListWidget()\n if files is not None:\n for file in files.keys():\n self.list.addItem(os.path.basename(file))\n self.layout = QVBoxLayout()\n self.setMinimumHeight(400)\n self.setMinimumWidth(100)\n self.setSizePolicy(QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum))\n self.buttonLayout = QHBoxLayout()\n\n self.addButton = QPushButton()\n self.removeButton = QPushButton()\n self.addButton.setText('+')\n self.removeButton.setText('-')\n self.buttonLayout.addWidget(self.removeButton)\n self.buttonLayout.addWidget(self.addButton)\n self.fileBrowser = None\n self.layout.addWidget(self.list)\n self.layout.addLayout(self.buttonLayout)\n\n self.setLayout(self.layout)\n self.connectSignals()\n \n\n\n def getCurrentSelection(self):\n return self.list.currentItem().text()\n\n def connectSignals(self):\n self.addButton.clicked.connect(self.getFile)\n self.removeButton.clicked.connect(self.fileRemoveClicked)\n self.list.itemSelectionChanged.connect(self.updateSelection)\n\n def update(self, files):\n for file in files:\n self.list.addItem(os.path.basename(file))\n \n count = self.list.count()\n if count > 0:\n self.list.setCurrentRow(self.list.count() - 1)\n self.list.currentItem().setSelected(True)\n\n super().update()\n\n\n def fileRemoveClicked(self):\n item = self.list.currentItem()\n row = self.list.currentRow()\n self.list.takeItem(row)\n super().update()\n self.signal.emit({'removeFile': item.text()})\n\n def updateSelection(self):\n selection = self.list.currentItem()\n self.signal.emit({'updateSelection': selection.text()})\n\n def addFile(self, data):\n for key, val in data.items():\n self.list.addItem(os.path.basename(key))\n super().update()\n self.list.setCurrentRow(self.list.count() - 1)\n self.list.currentItem().setSelected(True)\n\n self.signal.emit({'addFile': data})\n\n def getFile(self):\n print(self.global_config)\n if self.fileBrowser is None:\n self.fileBrowser = fileBrowser(self.global_config) \n fname = self.fileBrowser.browseOpenLocation(\"fits\")\n if fname[0]:\n self.addAction(fname)\n\n\n\n \n\n","repo_name":"PatrickRWells/Spectralyze","sub_path":"spectralyze/gui/Views/projectView.py","file_name":"projectView.py","file_ext":"py","file_size_in_byte":8443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3324857733","text":"# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/KeyLab_Essential/skin_default.py\n# Compiled at: 2018-04-23 20:27:04\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom ableton.v2.control_surface import Skin\nfrom ableton.v2.control_surface.elements import Color\nfrom .sysex_rgb_color import SysexRGBColor\n\nclass Colors:\n\n class DefaultButton:\n On = Color(127)\n Off = Color(0)\n Disabled = Color(0)\n\n class Transport:\n PlayOn = Color(127)\n PlayOff = Color(0)\n StopOn = Color(127)\n StopOff = Color(0)\n\n class Session:\n ClipStopped = SysexRGBColor((31, 31, 0))\n ClipStarted = SysexRGBColor((0, 31, 0))\n ClipRecording = SysexRGBColor((31, 0, 0))\n ClipTriggeredPlay = SysexRGBColor((0, 31, 0))\n ClipTriggeredRecord = SysexRGBColor((31, 0, 0))\n ClipEmpty = SysexRGBColor((0, 0, 0))\n StopClip = Color(0)\n StopClipTriggered = Color(0)\n StoppedClip = Color(0)\n\n\ndefault_skin = Skin(Colors)\n","repo_name":"alessandroseno/AbletonLive10_MIDIRemoteScripts","sub_path":"KeyLab_Essential/skin_default.py","file_name":"skin_default.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"11138242428","text":"\"\"\"empty message\n\nRevision ID: 7533ffd97bc9\nRevises: 030409210205\nCreate Date: 2021-09-11 20:24:48.322323\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '7533ffd97bc9'\ndown_revision = '030409210205'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('membership', sa.Column('pwd', sa.String(length=50), nullable=True, comment='邮箱密码'))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('membership', 'pwd')\n # ### end Alembic commands ###\n","repo_name":"taojian2009/riverboat","sub_path":"migrations/versions/7533ffd97bc9_.py","file_name":"7533ffd97bc9_.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23552420391","text":"import sys\r\n\r\nwith open(sys.argv[1]) as f:\r\n num_test_cases = f.readline()\r\n answers = []\r\n for line in f:\r\n newline = line.strip()\r\n num = [int(d) for d in newline]\r\n for i in xrange(len(num) - 1, 0, -1):\r\n if num[i] < num[i - 1]:\r\n num[i:] = [9] * len(num[i:])\r\n num[i - 1] -= 1\r\n answers.append(''.join([str(d) for d in num]).strip('0'))\r\n\r\nwith open('response.txt', 'w') as f:\r\n for i, answer in enumerate(answers):\r\n f.write('Case #%s: %s\\n' % (i + 1, str(answer)))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1373.py","file_name":"1373.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15192387281","text":"from pwn import *\nimport time\nio = remote(\"helsectf2023-6ac4e1c6d8855c1bd96a-eksponentiell_sjekk.chals.io\", 443, ssl=True)\nio.recvuntil(b\"flagget?\").decode() # Wait until it's asking for at test flag\n\nflagLength = 52\n\ntestBytes = bytearray([0xFF]*flagLength) # Fill with high nubmer byte (0xFF, 255)\nallowedCharacters = list((string.ascii_letters + string.digits + string.punctuation).encode())\n\nfor i in range(0, flagLength+1):\n currentGuess = 0\n currentGuessTime = 0\n for nextByte in allowedCharacters:\n testBytes[i] = nextByte\n line = testBytes.hex().encode()\n\n # First check\n start = time.time()\n io.sendline(line)\n io.recvuntil(b\"flagget?\", timeout = 10)\n end1 = round((time.time() - start) * 1000)\n end2 = 1000\n if end1 > 110: \n # Sanity check\n start = time.time()\n io.sendline(line)\n io.recvuntil(b\"flagget?\", timeout = 10)\n end2 = round((time.time() - start) * 1000)\n\n end = end1 if end1 < end2 else end2\n\n if currentGuessTime < end:\n currentGuess = nextByte\n currentGuessTime = end\n\n testBytes[i] = currentGuess\n print(i, chr(currentGuess), currentGuessTime)\n\n\nprint()\nprint(testBytes.decode())\n\n\n# helsectf{f@il_Fas7_a|g0ritmeR_k4N_L3Kke_1NfOrM45j0n}","repo_name":"thorleifjacobsen/ctf","sub_path":"helsectf-2023/sidekanalsangrep/eksponentiell_sjekk/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"42432021181","text":"import os\nimport sys\nimport shutil\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nfrom glob import glob\nfrom sklearn.preprocessing import MinMaxScaler\nfrom argparse import ArgumentParser\nfrom keras import regularizers\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, CuDNNGRU, RepeatVector, TimeDistributed, Flatten, Input\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, Callback\nfrom keras.optimizers import RMSprop\n\nfrom azureml.core import Run\n\n# define the boundaries of validation and test sets\nvalid_start_dt = \"2014-09-01 00:00:00\"\ntest_start_dt = \"2014-11-01 00:00:00\"\n\n# fixed parameters\nEPOCHS = 100 # max number of epochs when training FNN\nHORIZON = 24 # forecasting horizon (in hours)\n\n# Get the run object\nrun = Run.get_context()\n\nclass LogRunMetrics(Callback):\n # callback at the end of every epoch\n def on_epoch_end(self, epoch, log):\n # log a value repeated which creates a list\n run.log('Loss', log['loss'])\n\n\n# create training, validation and test sets given the length of the history\ndef create_input(energy, T):\n\n from utils import TimeSeriesTensor\n \n # get training data\n train = energy.copy()[energy.index < valid_start_dt][[\"load\"]]\n\n # normalize training data\n y_scaler = MinMaxScaler()\n y_scaler.fit(train[[\"load\"]])\n X_scaler = MinMaxScaler()\n train[[\"load\"]] = X_scaler.fit_transform(train)\n\n tensor_structure = {\n \"encoder_input\": (range(-T + 1, 1), [\"load\"]),\n \"decoder_input\": (range(0, HORIZON), [\"load\"]),\n }\n train_inputs = TimeSeriesTensor(train, \"load\", HORIZON, tensor_structure)\n\n look_back_dt = dt.datetime.strptime(\n valid_start_dt, \"%Y-%m-%d %H:%M:%S\"\n ) - dt.timedelta(hours=T - 1)\n valid = energy.copy()[\n (energy.index >= look_back_dt) & (energy.index < test_start_dt)\n ][[\"load\"]]\n valid[[\"load\"]] = X_scaler.transform(valid)\n valid_inputs = TimeSeriesTensor(valid, \"load\", HORIZON, tensor_structure)\n\n look_back_dt = dt.datetime.strptime(\n test_start_dt, \"%Y-%m-%d %H:%M:%S\"\n ) - dt.timedelta(hours=T - 1)\n test = energy.copy()[test_start_dt:][[\"load\"]]\n test[[\"load\"]] = X_scaler.transform(test)\n test_inputs = TimeSeriesTensor(test, \"load\", HORIZON, tensor_structure)\n\n return train_inputs, valid_inputs, test_inputs, y_scaler\n\n\n# create the model with the given values of hyperparameters\ndef get_model(LEARNING_RATE, T, ALPHA, LATENT_DIM_1):\n\n # define training encoder\n encoder_input = Input(shape=(None, 1))\n encoder = CuDNNGRU(\n LATENT_DIM_1,\n return_state=True,\n kernel_regularizer=regularizers.l2(ALPHA),\n bias_regularizer=regularizers.l2(ALPHA),\n )\n encoder_output, state_h = encoder(encoder_input)\n encoder_states = [state_h]\n\n # define training decoder\n decoder_input = Input(shape=(None, 1))\n decoder_GRU = CuDNNGRU(\n LATENT_DIM_1,\n return_state=True,\n return_sequences=True,\n kernel_regularizer=regularizers.l2(ALPHA),\n bias_regularizer=regularizers.l2(ALPHA),\n )\n decoder_output, _ = decoder_GRU(decoder_input, initial_state=encoder_states)\n decoder_dense = TimeDistributed(Dense(1))\n decoder_output = decoder_dense(decoder_output)\n\n train_model = Model([encoder_input, decoder_input], decoder_output)\n\n optimizer = RMSprop(lr=LEARNING_RATE)\n train_model.compile(optimizer=optimizer, loss=\"mse\")\n\n # build inference encoder model\n encoder_model = Model(encoder_input, encoder_states)\n\n # build inference decoder model\n decoder_state_input_h = Input(shape=(LATENT_DIM_1,))\n decoder_states_input = [decoder_state_input_h]\n\n decoder_output, state_h = decoder_GRU(\n decoder_input, initial_state=decoder_states_input\n )\n decoder_states = [state_h]\n decoder_output = decoder_dense(decoder_output)\n decoder_model = Model(\n [decoder_input] + decoder_states_input, [decoder_output] + decoder_states\n )\n\n return train_model, encoder_model, decoder_model\n\n\ndef predict_single_sequence(\n encoder_model, decoder_model, single_input_seq, horizon, n_features\n):\n # apply encoder model to the input_seq to get state\n states_value = encoder_model.predict(single_input_seq)\n\n # get input for decoder's first time step (which is encoder input at time t)\n dec_input = np.zeros((1, 1, n_features))\n dec_input[0, 0, 0] = single_input_seq[0, -1, :]\n\n # create final output placeholder\n output = list()\n # collect predictions\n for t in range(horizon):\n # predict next value\n yhat, h = decoder_model.predict([dec_input] + [states_value])\n # store prediction\n output.append(yhat[0, 0, :])\n # update state\n # state = [h]\n states_value = h\n # update decoder input to be used as input for next prediction\n dec_input[0, 0, 0] = yhat\n\n return np.array(output)\n\n\ndef predict_multi_sequence(\n encoder_model, decoder_model, input_seq_multi, horizon, n_features\n):\n # create output placeholder\n predictions_all = list()\n for seq_index in range(input_seq_multi.shape[0]):\n # Take one sequence for decoding\n input_seq = input_seq_multi[seq_index : seq_index + 1]\n # Generate prediction for the single sequence\n predictions = predict_single_sequence(\n encoder_model, decoder_model, input_seq, horizon, n_features\n )\n # store all the sequence prediction\n predictions_all.append(predictions)\n\n return np.array(predictions_all)\n\n\ndef run_training(energy, T_val, LATENT_DIM_1, BATCH_SIZE, LEARNING_RATE, ALPHA):\n\n from utils import create_evaluation_df, mape\n\n train_inputs, valid_inputs, test_inputs, y_scaler = create_input(energy, T_val)\n\n # Initialize the model\n train_model, encoder_model, decoder_model = get_model(\n LEARNING_RATE, T_val, ALPHA, LATENT_DIM_1\n )\n earlystop = EarlyStopping(monitor=\"val_loss\", min_delta=0, patience=5)\n best_val = ModelCheckpoint(\n \"model_{epoch:02d}.h5\",\n save_best_only=True,\n mode=\"min\",\n period=1,\n save_weights_only=True,\n )\n\n train_target = train_inputs[\"target\"].reshape(\n train_inputs[\"target\"].shape[0], train_inputs[\"target\"].shape[1], 1\n )\n valid_target = valid_inputs[\"target\"].reshape(\n valid_inputs[\"target\"].shape[0], valid_inputs[\"target\"].shape[1], 1\n )\n\n # Train the model\n history = train_model.fit(\n [train_inputs[\"encoder_input\"], train_inputs[\"decoder_input\"]],\n train_target,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n validation_data=(\n [valid_inputs[\"encoder_input\"], valid_inputs[\"decoder_input\"]],\n valid_target,\n ),\n callbacks=[earlystop, best_val, LogRunMetrics()],\n verbose=0,\n )\n\n # load the model with the smallest validation MAPE\n best_epoch = np.argmin(np.array(history.history[\"val_loss\"])) + 1\n validationLoss = np.min(np.array(history.history[\"val_loss\"]))\n train_model.load_weights(\"model_{:02d}.h5\".format(best_epoch))\n\n # Save best model for this experiment\n model_name = \"bestmodel\"\n # serialize NN architecture to JSON\n model_json = train_model.to_json()\n # save model JSON\n with open(\"{}.json\".format(model_name), \"w\") as f:\n f.write(model_json)\n # save model weights\n train_model.save_weights(\"{}.h5\".format(model_name))\n\n # Compute test MAPE\n predictions = predict_multi_sequence(\n encoder_model, decoder_model, test_inputs[\"encoder_input\"], HORIZON, 1\n )\n predictions = predictions.reshape(predictions.shape[0], predictions.shape[1])\n eval_df = create_evaluation_df(predictions, test_inputs, HORIZON, y_scaler)\n testMAPE = mape(eval_df[\"prediction\"], eval_df[\"actual\"])\n\n # clean up model files\n for m in glob(\"model_*.h5\"):\n os.remove(m)\n\n # Log validation loss and test MAPE\n run.log(\"validationLoss\", validationLoss)\n run.log(\"testMAPE\", testMAPE)\n\n # create a ./outputs/model folder in the compute target\n # files saved in the \"./outputs\" folder are automatically uploaded into run history\n os.makedirs(\"./outputs/model\", exist_ok=True)\n model_files = glob(\"bestmodel*\")\n for f in model_files:\n shutil.move(f, \"./outputs/model\")\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n\n parser.add_argument(\n \"--datadir\",\n type=str,\n dest=\"datadir\",\n help=\"Directory where the dataset is located\",\n required=True,\n )\n parser.add_argument(\n \"--scriptdir\",\n type=str,\n dest=\"scriptdir\",\n help=\"Directory where scripts are located\",\n required=True,\n )\n parser.add_argument(\n \"--latent-dim-1\",\n type=int,\n dest=\"LATENT_DIM_1\",\n help=\"number of neurons in the first hidden layer\",\n required=True,\n )\n parser.add_argument(\n \"--batch-size\", type=int, dest=\"BATCH_SIZE\", help=\"batch size\", required=True\n )\n parser.add_argument(\"--T\", dest=\"T\", type=int, help=\"history length\", required=True)\n parser.add_argument(\n \"--learning-rate\",\n type=float,\n dest=\"LEARNING_RATE\",\n help=\"learning rate\",\n required=True,\n )\n parser.add_argument(\n \"--alpha\",\n type=float,\n dest=\"ALPHA\",\n help=\"regularization coefficient\",\n required=True,\n )\n\n args = parser.parse_args()\n\n commondir = args.scriptdir\n\n sys.path.append(commondir)\n from utils import load_data\n from extract_data import extract_data\n\n # load data into Pandas dataframe\n data_dir = args.datadir\n if not os.path.exists(os.path.join(data_dir, \"energy.csv\")):\n extract_data(data_dir)\n\n energy = load_data(data_dir)\n\n # parse values of hyperparameters\n T = int(args.T)\n LATENT_DIM_1 = int(args.LATENT_DIM_1)\n BATCH_SIZE = int(args.BATCH_SIZE)\n LEARNING_RATE = float(args.LEARNING_RATE)\n ALPHA = float(args.ALPHA)\n\n # train and evaluate RNN teacher forcing neural network with given values of hyperaparameters\n run_training(energy, T, LATENT_DIM_1, BATCH_SIZE, LEARNING_RATE, ALPHA)\n","repo_name":"Azure/DeepLearningForTimeSeriesForecasting","sub_path":"hyperparameter_tuning/RNN_teacher_forcing/RNN_teacher_forcing.py","file_name":"RNN_teacher_forcing.py","file_ext":"py","file_size_in_byte":10210,"program_lang":"python","lang":"en","doc_type":"code","stars":628,"dataset":"github-code","pt":"61"} +{"seq_id":"72661498433","text":"# This is a sample Python script.\n\n# Press ⌃R to execute it or replace it with your code.\n# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.\n\nimport requests\nimport json\n\nurl=\"https://api.openweathermap.org/data/2.5/weather\"\nparams={\"q\":\"Tegucigalpa\",\"appid\":\"63aed104a1fc299178c8eed99f65fd6f\"}\nresponse=requests.get(url, params=params)\njsonDataString=response.text\njsonDictionary=json.loads(jsonDataString)\ntemperaturamax=round((int(jsonDictionary[\"main\"][\"temp_max\"]))-273.15,2)\ntemperaturamin=round((int(jsonDictionary[\"main\"][\"temp_min\"]))-273.15,2)\ntemperatura=round(int((jsonDictionary[\"main\"][\"temp\"]))-273.15,2)\nciudad=jsonDictionary[\"name\"]\nprint(\"La temperatura actual en \",ciudad,\" es\", str(temperatura),\" °C\")\nprint(\"Se espera una temperatura maxima de \",str(temperaturamax),\" °C\")\nprint(\"Y una temperatura minima de \",str(temperaturamin),\" °C\")\n\n\n\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n","repo_name":"carlosp2001/API_Clima_PMP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38440410876","text":"#!/usr/bin/env python3\nimport os\nimport re\nimport sys\n\n\ndef add_space(origin: str):\n modified = \"{} {}\".format(origin[0], origin[1])\n print(repr(origin), \"->\", modified)\n return modified\n\n\ndef change_dot(origin: str):\n modified = \"{}.\".format(origin)\n print(repr(origin), \"->\", modified)\n return modified\n\n\ndef remove_space(origin: str):\n modified = origin.replace(\" \", \"\")\n print(repr(origin), \"->\", modified)\n return modified\n\n\ndef handle(full_path: str):\n print(full_path)\n with open(full_path, \"r\", encoding=\"utf-8\") as fp:\n content = fp.read()\n content = re.sub(\n \"[0-9a-zA-Z][|\\u4e00-\\u9fa5]\", lambda x: add_space(x.group(0)), content\n )\n content = re.sub(\n \"[|\\u4e00-\\u9fa5][0-9a-zA-Z]\", lambda x: add_space(x.group(0)), content\n )\n content = re.sub(\"[0-9a-zA-Z%][。]\", lambda x: change_dot(x.group(0)), content)\n content = re.sub(\"[,。;:?!”)] \", lambda x: remove_space(x.group(0)), content)\n content = re.sub(\" [,。;:?!“(]\", lambda x: remove_space(x.group(0)), content)\n content = re.sub(\"^[ ]+$\", \"\", content)\n content = re.sub(\" MtF \", \" MtF \", content, flags=re.IGNORECASE)\n content = re.sub(\" LGBT \", \" LGBT \", content, flags=re.IGNORECASE)\n content = re.sub(\" QQ \", \" QQ \", content, flags=re.IGNORECASE)\n content = re.sub(\"\\n\\n\\n\", \"\\n\\n\", content)\n with open(full_path, \"w\", encoding=\"utf-8\") as fp:\n fp.write(content)\n\n\ndef walk_all_files(base: str):\n print(base)\n for root, dirs, files in os.walk(base):\n for file in files:\n full_path = os.path.join(root, file)\n full_path: str\n if full_path.endswith(\".md\"):\n handle(full_path)\n\n\ndef main():\n if len(sys.argv) < 2:\n walk_all_files(os.getcwd())\n else:\n walk_all_files(sys.argv[1])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cardinalsystem256/MtF-Wiki","sub_path":".github/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"13801799858","text":"import dummy\n\ndef guessT0(m, attempts=100):\n\t\"Guess t0 of a given mixnet\"\n\tg = -1\n\ti0 = 0\n\tfor i in range(1, attempts):\n\t\tout, m = dummy.sendMessage(m)\n\t\tif out > 0 :\n\t\t\tprint(\"Output on\", i,\":\", out)\n\t\t\tif (g < 0 or (i - i0) < g):\n\t\t\t\tg = (i - i0)\n\t\t\t\tprint(\"New guess:\", g)\n\t\t\t\ti0 = i\n\t\t\n\n\tif(g >= 0):\n\t\tprint(\"My guess is \" + str(g) + \" is (divisible by) t0\")\n\telse :\n\t\tprint(\"I have no guess for t0\")\n\n# Final mix t2:\n# when t2 flushes, then t1 and t0 also flushed\n# t2 <= t1: when t1 flushes, t2 flushes\n# t2 > t1: then \n\t# t2 > t1 * n for some n >= 1\n\ndef guessTn(m, attempts=100):\n\t\"Guess tn of a given mixnet\"\n\tg = -1\n\tfor i in range(1, attempts):\n\t\tout, m = dummy.sendMessage(m)\n\t\tif out > 0 :\n\t\t\t# print(\"Output on\", i,\":\", out)\n\t\t\tif (g < 0 or out < g):\n\t\t\t\tg = out\n\t\t\t\t# print(\"New guess:\", out)\n\t\t\n\tif(g >= 0):\n\t\tprint(\"My guess is \" + str(g) + \" is (divisible by) tn\")\n\telse :\n\t\tprint(\"I have no guess for t0\")\n\n\treturn g\n\ndef guessLCM(m, attempts=100):\n\t\"Guess lcm of a given mixnet\"\n\tg = -1\n\tfor i in range(1, attempts):\n\t\tout, m = dummy.sendMessage(m)\n\t\tif out > 0 :\n\t\t\t# print(\"Output on\", i,\":\", out)\n\t\t\tif (g < 0 or out < g):\n\t\t\t\tg = out\n\t\t\t\t# print(\"New guess:\", out)\n\t\t\n\tif(g >= 0):\n\t\tprint(\"My guess is \" + str(g) + \" is (divisible by) tn\")\n\telse :\n\t\tprint(\"I have no guess for t0\")\n\n\treturn g\n\nguessTn( (3, [4, 6, 7], [0,0,0]), 10000)\n\ndef lcmn(l):\n\ta = lcm(l[0], l[1])\n\tfor i in range(2, len(l)):\n\t\ta = lcm(a , l[i])\n\treturn a\n\ndef gcd(a, b):\n \"\"\"Calculate the Greatest Common Divisor of a and b.\n\n Unless b==0, the result will have the same sign as b (so that when\n b is divided by it, the result comes out positive).\n \"\"\"\n while b:\n a, b = b, a%b\n return a\n\ndef lcm(x, y):\n if x > y:\n z = x\n else:\n z = y\n\n while(True):\n if((z % x == 0) and (z % y == 0)):\n lcm = z\n break\n z += 1\n\n return lcm\ndef getInEqOutFlush(m):\n\ti = 0\n\tflushes = []\n\twhile i < 10000:\n\t\tout, m = dummy.sendMessage(m)\n\t\ti += 1\n\t\tif out == i:\n\t\t\treturn i\n\n\tprint(\"Cant find in=out for \" + str(m))\n\treturn 1\n\n\ndef testGuessTn(attempts=10000):\n\tfor i in range(100):\n\t\tm = dummy.randomMixnet(3)\n\t\tg = guessTn(m)\n\t\tassert g == m[1][2], \"Guess tn does not work for \" + str(m) + \", gives \" + str(g)\n\ndef lcmOfInOut(m):\n\tf = dummy.getNFlushes(m,1)[0]\n\treturn lcm(f[0], f[1])\n\nm = (3, [4, 6, 7], [0,0,0])\nprint( lcmOfInOut(m) )\nprint( lcmn(m[1]))\n\ndef testLcmOfInOut(attempts=10000):\n\tfor i in range(100):\n\t\tm = dummy.randomMixnet(3)\n\t\tg = lcmOfInOut(m)\n\t\te = lcmn(m[1])\n\t\te2 = e % g\n\t\tassert e2 == 0 or (g % e == 0), \"Guess lcm does not work for \" + str(m) + \", gives \" + str(g) + \", expected \" + str(e)\n\ndef guessLargestWithTwoFlushes(attempts=10000):\n\tfor i in range(attempts):\n\t\tm = dummy.randomMixnet(3)\n\t\tf = dummy.getNFlushes(m, 2)\n\t\tl = lcmn(m[1])\n\t\tg = gcd(f[0][0], f[1][0])\n\t\tok = f[0][1] in m[1] or l in m[1]\n\t\tassert g in m[1] or ok, \"Guessed wrong, m: \" + str(m) + \", guessed \" + str(g) + \", expected x in \" + str(m[1]) + \" | \" + str(f) + \" | \" + str(l)\n\ndef findInEqualsOut(attempts=10000):\n\tfor i in range(attempts):\n\t\tm = dummy.randomMixnet(3)\n\t\tf = getInEqOutFlush(m)\n\t\tassert f > 0, \"Naha\"\n\ndef floorAss(attempts=10000):\n\tfor i in range(attempts):\n\t\tm = dummy.randomMixnet(3)\n\t\tf = dummy.getNFlushes(m, 10)\n\t\tt123 = m[1][0] * m[1][1] * m[1][2]\n\t\tfor j in f:\n\t\t\tx0 = j[0]\n\t\t\tx3 = j[1]\n\t\t\tg = math.floor(x0/t123) * t123\n\t\t\tassert g == x3, \"Guessed wrong, m: \" + str(m) + \", guessed \" + str(g) + \", expected \" + str(x3) \n\n# testGuessTn()\n\nm1 = (3, [32,13,5], [0,0,0])\nprint( dummy.getNFlushes(m1, 3))\n","repo_name":"Gamer1120/pet-mixnets","sub_path":"use-dummy.py","file_name":"use-dummy.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17583245707","text":"import pandas as pd\n\n\ndef sma(close, length=10):\n return close.rolling(window=length).mean()\n\n\ndef ema(close, length=10):\n close_copy = close.copy()\n sma_nth = close_copy[0:length].mean()\n\n close_copy[: length - 1] = \"nan\"\n close_copy.iloc[length - 1] = sma_nth\n\n return close_copy.ewm(span=length, adjust=False).mean()\n\n\ndef rsi(close, length=14):\n negative = close.diff()\n positive = negative.copy()\n\n positive[positive < 0] = 0\n negative[negative > 0] = 0\n\n positive_avg = positive.ewm(alpha=(1.0 / length), min_periods=length).mean()\n negative_avg = negative.ewm(alpha=(1.0 / length), min_periods=length).mean()\n\n return 100 * positive_avg / (positive_avg + negative_avg.abs())\n\n\ndef stoch(high, low, close, k=14, d=3):\n lowest_low = low.rolling(k).min()\n highest_high = high.rolling(k).max()\n\n stoch = 100 * (close - lowest_low) / (highest_high - lowest_low)\n\n stoch_k = sma(stoch.loc[stoch.first_valid_index() :,], length=d)\n stoch_d = sma(stoch_k.loc[stoch_k.first_valid_index() :,], length=d)\n\n return pd.DataFrame({\"STOCHk_14_3_3\": stoch_k, \"STOCHd_14_3_3\": stoch_d})\n\n\ndef macd(close, fast=12, slow=26, signal=9):\n fast_ema = ema(close, length=fast)\n slow_ema = ema(close, length=slow)\n\n macd_line = fast_ema - slow_ema\n signal_ema = ema(\n close=macd_line.loc[macd_line.first_valid_index() :,], length=signal\n )\n histogram = macd_line - signal_ema\n\n return pd.DataFrame(\n {\n \"MACD_12_26_9\": macd_line,\n \"MACDh_12_26_9\": histogram,\n \"MACDs_12_26_9\": signal_ema,\n }\n )\n","repo_name":"budddma/tech-analysis-web-app","sub_path":"src/indicators/top_indicators.py","file_name":"top_indicators.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9430590076","text":"import os\n\ndef main():\n print('running main')\n # STEP 1\n # Analyze a job description, and show the highest weighted one, two,\n # and three word combinations,\n # so jobseekers can write tailored bdullets\n # upload_resume(input('Enter a filename: '))\\\n # if input('Do you have a new resume to upload (y/n)?') == 'y'\\\n # else 'Okay, no new resume.'\n\n\n # jd_text = get_job_post()\n # jd_title = input('Enter a title for the job post text file:\\n\\t')\n # jd_transcriber(jd_text,jd_title)\n\n # Path to folder containing job descriptions in .txt format\n # input_path = './input/jds/'\n\n\n # Create a useable corpus of words for analysis from the input jds.\n corpus = corpus_prepper(input_path)\n print('Corpus created from job post(s)...')\n # Display key terms in one-, two-, and three-word combinations\n analysis = jd_analyzer(corpus)\n print('Keywords found...')\n\n # Turn text from JDs into simple corpus that scikit learn uses for\n # count TFIDF\n jd_set = [txt_parser(filename) for filename in corpus]\n\n #\n # print(current_resume)\n # Make a chart showing keywords\n # chart_token_freq(jd_set)\n # create ordered lists of each part of spech from job post(s)\n jd_verb_stems = chart_prepper(jd_set,'VERB')[1]\n jd_adj_stems = chart_prepper(jd_set,'ADJ')[1]\n jd_noun_stems = chart_prepper(jd_set,'NOUN')[1]\n\n # get user input from a .csv file and convert into a pandas\n # data frame\n user_input_filepath = './input/resumes/processed/user_input.csv'\n user_input_df = csv_to_df(user_input_filepath)\n\n # add lengths of bullets\n user_input_df['bullet_length'] = [bullet_length_comparison(bullet)\\\n for bullet in user_input_df['Bullet']]\n\n user_input_df['parts_of_speech'] = [pos_tagger(bullet)\\\n for bullet in user_input_df['Bullet']]\n\n user_input_df['starts_with_VBN'] = [starts_with_VBN(pos_set)\\\n for pos_set in user_input_df['parts_of_speech']]\n\n # user_input_df['starts_strong'] = [starts_strong(bullet)\n # for bullet in user_input_df['Bullet']]\n\n # user_input_df['strong_synonyms'] = [strong_syns(bullet)\n # for bullet in user_input_df['Bullet']]\n\n # stem the parts of speech in user input resume bullet statements\n # VERBS\n user_input_df['verb_stems'] =\\\n [list(pos_finder(bullet, 'VERB').values())\\\n for bullet in user_input_df['Bullet']]\n\n user_input_df['verb_strength_score'] =\\\n [bullet_strength_calculator(stem_list, jd_verb_stems)\\\n for stem_list in user_input_df['verb_stems']]\n\n # ADJ\n user_input_df['adj_stems'] =\\\n [list(pos_finder(bullet, 'ADJ').values())\\\n for bullet in user_input_df['Bullet']]\n\n user_input_df['adj_strength_score'] =\\\n [bullet_strength_calculator(stem_list, jd_adj_stems)\\\n for stem_list in user_input_df['adj_stems']]\n\n # NOUNS\n user_input_df['noun_stems'] =\\\n [list(pos_finder(bullet, 'NOUN').values())\\\n for bullet in user_input_df['Bullet']]\n user_input_df['noun_strength_score'] =\\\n [bullet_strength_calculator(stem_list, jd_noun_stems)\\\n for stem_list in user_input_df['noun_stems']]\n user_input_df['total_bullet_strength'] =\\\n (user_input_df['verb_strength_score'] +\\\n user_input_df['adj_strength_score'] +\\\n user_input_df['noun_strength_score'])\n\n bullet_strength_index_df =\\\n user_input_df[['Bullet','total_bullet_strength']]\n\n report_check = input('Generate analysis report (y/n)?\\n\\t')\n\n if report_check == 'y':\n report_title = input('Enter an analysis report title:\\n\\t')\n analysis_reporter(analysis, jd_set, report_title)\n print('Analysis report genarated.')\n\n resume_check = input('Create resume (y/n)?')\n\n if resume_check == 'y':\n write_resume(user_input_df)\n\nif __name__ == '__main__':\n main()\n","repo_name":"janton42/hl_main","sub_path":"cold_apply/static/scripts/resume_writer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23914222708","text":"import json\nimport datetime\nimport time\nimport pytz\nimport math\n\n\nclass DataLoader(object):\n\n def __init__(self, src_path):\n with open(src_path) as src_file:\n json_data = json.load(src_file)\n\n self._tweets_data = json_data\n self._size = len(json_data)\n\n self._unify_timezone()\n\n self._start_time, self._end_time = self._calc_time_iterval()\n\n\n def get_data(self):\n return self._tweets_data\n\n\n def get_timegap(self):\n delta = self._end_time - self._start_time\n hours = 24 * delta.days + math.ceil(delta.seconds / 3600)\n return hours\n\n\n def get_split_data(self):\n split_data = [[] for i in range( self.get_timegap() )]\n for item in self._tweets_data:\n delta = item[\"date\"] - self._start_time\n idx = delta.days * 24 + int(delta.seconds / 3600)\n split_data[idx].append(item)\n\n return split_data\n\n\n def _unify_timezone(self):\n pst_tz = pytz.timezone(\"US/Pacific\")\n \n for i in range(self._size):\n date = self._tweets_data[i][\"date\"]\n self._tweets_data[i][\"date\"] = datetime.datetime.fromtimestamp(date, pst_tz)\n\n\n def _calc_time_iterval(self):\n end_time, start_time = self._tweets_data[0][\"date\"], self._tweets_data[0][\"date\"]\n\n for item in self._tweets_data:\n if end_time < item[\"date\"]:\n end_time = item[\"date\"]\n if start_time > item[\"date\"]:\n start_time = item[\"date\"]\n\n return start_time, end_time\n\n\ndef main():\n DataLoader(\"gohawks\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"TooSchoolForCool/EE219-Larger-Scale-Data-Mining","sub_path":"Project-5/src/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20073895318","text":"import json\nimport os.path\nimport re\n\nimport mrjob.protocol\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\n\nIMAGE_ID_RE = re.compile(r\"[0-9]+\")\n\n\nclass MRDeriveImageDataset(MRJob):\n \"\"\"MRDeriveImageDataset\n this MRJob MapReduce class takes in the raw_data\n from crawling pornhub and generates an image specific\n dataset while still holding onto information about\n which albums the images were a part of\n \"\"\"\n OUTPUT_PROTOCOL = mrjob.protocol.JSONValueProtocol\n\n def mapper(self, _, line):\n \"\"\"mapper\n the map step splits images from their albums,\n storing the segment, album_id, and album_title\n for use later and yielding the image by itself\n with no key\n \"\"\"\n data = json.loads(line)\n ## yield each image after processing\n for album in data[\"albums\"]:\n album_id = int(os.path.basename(album[\"URI\"]))\n\n ## yield images from the albums\n for image in album[\"images\"]:\n image[\"segment\"] = album[\"segment\"]\n image[\"album_id\"] = album_id\n image[\"album_title\"] = album[\"title\"]\n image[\"image_id\"] = int(IMAGE_ID_RE.findall(os.path.basename(image[\"uri\"]))[0])\n yield None, image\n\n def reducer(self, _, image):\n \"\"\"reducer\n the reduce step just passes all map\n output to the OUTPUT as a json object\n \"\"\"\n for val in image:\n yield None, val\n\n\nif __name__ == \"__main__\":\n MRDeriveImageDataset.run()\n","repo_name":"cdipaolo/hub-db","sub_path":"data/process/images.py","file_name":"images.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"61"} +{"seq_id":"11654886580","text":"import asyncio\nimport sys\nfrom cefpython3 import cefpython\nfrom .handler_wrappers import LoadHandlerWrapper, RequestHandlerWrapper\nfrom .utils import are_urls_equal\nfrom .js_functions import js_get_attr, js_is_element, js_click, js_fill_input, \\\n js_get_text, js_get_html, js_get_location\nfrom .exceptions import OperationTimeout, ElementNotFound, JavaScriptError\n\nBROWSER_LOOP_DELAY = 0.1\n\n# result values\nSUCCESS = 'success'\nFAILED = 'failed'\n\n# timeout constants\nWAIT_TIMEOUT = 60\nINTERVAL_TIMEOUT = 0.25\n\ndef singletask(func):\n '''Decorator for returning a future object which should be resolved by\n when method is completed and is canceled every time when new method is\n called\n '''\n async def wrapper(*args, **kw):\n driver = args[0]\n await driver.lock.acquire()\n driver.reset_async_primitives()\n result = await func(*args, **kw)\n driver.lock.release()\n return result\n return wrapper\n\n\nclass BrowserDriver:\n def __init__(self, loop, cef=None):\n self.WAIT_TIMEOUT = 60\n self.INTERVAL_TIMEOUT = 0.25\n\n self.cef = cef if cef is not None else cefpython\n # If we received cef already, we assume that it's already initialized\n self._cef_initialized = cef is not None\n self.loop = loop\n self.lock = asyncio.Lock()\n self._init_browser()\n self.is_browser_running = False\n self.reset_async_primitives()\n\n def _init_browser(self):\n if not self._cef_initialized:\n sys.excepthook = self.cef.ExceptHook # To shutdown all CEF processes on error\n self.cef.Initialize({\n 'log_severity': self.cef.LOGSEVERITY_DISABLE,\n 'windowless_rendering_enabled': True,\n 'debug': False\n })\n self.browser = self.cef.CreateBrowserSync(browserSettings=dict(image_load_disabled=True))\n self.browser.SetClientHandler(LoadHandlerWrapper(self))\n self.browser.SetClientHandler(RequestHandlerWrapper(self))\n # create JS bindings\n self.js_bindings = self.cef.JavascriptBindings(bindToFrames=False, bindToPopups=False)\n self.js_bindings.SetFunction('py_data_callback', self._py_data_callback)\n self.js_bindings.SetFunction('py_handle_exception', self._py_handle_exception)\n self.browser.SetJavascriptBindings(self.js_bindings)\n\n def _py_data_callback(self, js_data):\n '''Handle data set from JavaScript code\n '''\n self._future.set_result(js_data)\n\n def _py_handle_exception(self, js_error):\n '''Handle JavaScript exception caught while executing code\n '''\n self._future.set_exception(JavaScriptError(js_error))\n\n def run(self):\n self.loop.create_task(self.browser_message_loop())\n self.is_browser_running = True\n\n def shutdown(self):\n self.cef.shutdown()\n self.is_browser_running = False\n\n def reset_async_primitives(self):\n '''Reset async primitives Futur and Queue for current task context\n '''\n self._future = asyncio.Future()\n self._queue = asyncio.Queue()\n\n async def browser_message_loop(self):\n '''Run browser message loop within asyncio loop\n '''\n while True:\n self.cef.MessageLoopWork()\n await asyncio.sleep(BROWSER_LOOP_DELAY)\n\n # --------------------------------------------------\n # browser event handlers\n # --------------------------------------------------\n\n def on_load_end(self, browser, frame, http_code):\n self._queue.put_nowait(dict(type='url', data=frame.GetUrl()))\n\n def on_resource_redirect(self, browser, frame, old_url, new_url_out, request, response):\n self._queue.put_nowait(dict(\n type='redirect',\n data=dict(\n old_url=old_url,\n new_url=new_url_out[0]\n )\n ))\n\n # --------------------------------------------------\n # browser commands below\n # --------------------------------------------------\n\n @singletask\n async def open_url(self, url=''):\n '''Open url in main frame\n '''\n frame = self.browser.GetMainFrame()\n frame.LoadUrl(url)\n\n @singletask\n async def get_attr(self, selector='', attr='', forall=False, safe=False):\n '''Get attribute of specified by selector element.\n If safe parameter is provided it won't raise an error in case when\n element is not found.\n Return a value of attribute or a list of values if forall is True\n '''\n js_get_attr(self.browser, selector=selector, attr=attr, forall=forall)\n result = await self._future\n if not result and not safe:\n raise ElementNotFound(selector)\n return result\n\n @singletask\n async def wait_for(self, selector=None, url=None, timeout=WAIT_TIMEOUT, safe=False):\n '''Wait for url to be loaded or element reachable by selector.\n Return 1 for url if success and count of elements found with provided\n selector.\n '''\n max_attempts = int(timeout / INTERVAL_TIMEOUT)\n attempts = 0\n while True:\n attempts += 1\n if selector:\n js_is_element(self.browser, selector=selector)\n if url:\n js_get_location(self.browser)\n res = await self._future\n if selector:\n if res:\n return res\n if url:\n if are_urls_equal(res, url):\n return res\n if attempts > max_attempts:\n if not safe:\n raise OperationTimeout(\n 'Timeout exceeded while waiting for %s' %\n ('selector' if selector else 'url')\n )\n return 0\n # reset it after each attempt\n self.reset_async_primitives()\n await asyncio.sleep(self.INTERVAL_TIMEOUT)\n\n\n @singletask\n async def is_element(self, selector=''):\n '''Check whether element exists in current document.\n '''\n js_is_element(self.browser, selector=selector)\n return bool(await self._future)\n\n @singletask\n async def click(self, selector=''):\n '''Trigget click event on specific element reachable by selector\n '''\n js_is_element(self.browser, selector=selector)\n result = await self._future\n if (result):\n self.reset_async_primitives()\n js_click(self.browser, selector=selector)\n await self._future\n return True\n return False\n\n @singletask\n async def fill_input(self, selector='', value='', forall=False):\n '''Fill the input specified by selector\n '''\n js_is_element(self.browser, selector=selector)\n result = await self._future\n if (result):\n self.reset_async_primitives()\n js_fill_input(self.browser, selector=selector, value=value, forall=forall)\n await self._future\n return True\n return False\n\n @singletask\n async def get_text(self, selector='', forall=False, safe=False):\n '''Get text content of element specified by selector\n '''\n js_get_text(self.browser, selector=selector, forall=forall)\n result = await self._future\n if result is None or result == []:\n if not safe:\n raise ElementNotFound(selector)\n return '' if result is None else result\n\n @singletask\n async def get_html(self, selector='', forall=False, safe=False):\n '''Get inner HTML of element specified by selector\n '''\n js_get_html(self.browser, selector=selector, forall=forall)\n result = await self._future\n if result is None or result == []:\n if not safe:\n raise ElementNotFound(selector)\n return '' if result is None else result\n\n\n @singletask\n async def expect_redirect(self, url='', timeout=WAIT_TIMEOUT, safe=False):\n '''Wait for redirect\n '''\n frame = self.browser.GetMainFrame()\n frame.LoadUrl(url)\n while True:\n try:\n msg = await asyncio.wait_for(self._queue.get(), timeout)\n except asyncio.TimeoutError as e:\n if not safe:\n raise e\n return\n if msg['type'] == 'redirect':\n return msg['data']['new_url']\n","repo_name":"nskrypnik/jweb_driver","sub_path":"jweb_driver/browser_driver.py","file_name":"browser_driver.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"24196879833","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 1 13:00:10 2021\n\n@author: Sam\n\nFunction: transfor 'yolo format' to 'rectangle format' for calculate mAP\n\nreference: https://blog.csdn.net/weixin_43508499/article/details/118600392\n\"\"\"\nimport glob\nimport os\nimport cv2\n\n# classna,e\ncls_file = './dog_cat_monkey/obj_names.txt'\nwith open(cls_file, 'rt') as f:\n classes = f.read().rstrip('\\n').split('\\n')\n\ndef get_basename(filepath):\n '''\n filename and extension name\n filepath = '/home/ubuntu/python/example.py'\n return basename = example.py\n '''\n return os.path.basename(filepath)\n\ndef get_filename_only(filepath):\n '''filepath = '/home/ubuntu/python/example.py'\n return filename = example\n '''\n basename = os.path.basename(filepath)\n return os.path.splitext(basename)[0] \n\ndef created_directory(directory):\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\ndef isfile(f):\n '''Check if the file exists\n input filepath\n return True/False\n '''\n if os.path.isfile(f):\n return True\n else:\n return False\n\ndef yolo_to_retangle(temp, w, h):\n '''\n imput yolo format and img' weight and height\n return retangle format x1, x2, y1, y2\n <0-left> <1-top> <2-right> <3-bottom> []\n '''\n x_, y_, w_, h_=eval(temp[1]), eval(temp[2]), eval(temp[3]), eval(temp[4])\n x1 = w * x_ - 0.5 * w * w_\n x2 = w * x_ + 0.5 * w * w_\n y1 = h * y_ - 0.5 * h * h_\n y2 = h * y_ + 0.5 * h * h_\n return x1, y1, x2, y2\n\ndef single(f, out_path):\n print(f)\n img = cv2.imread(f)\n h, w, channels = img.shape\n# print(h, w, channels)\n f = f.replace(\"images\", \"yolo_txt\")\n f_yolo = f.replace(\".jpg\", \".txt\")\n# print('f_yolo:{}'.format(f_yolo))\n with open(f_yolo, 'r') as f:\n lines = f.readlines()\n# print(lines)\n\n # Write retcangle txt \n basename = get_basename(f_yolo)\n# print(f_name)\n retangle_file = os.path.join(out_path, basename)\n print(retangle_file)\n file = open(retangle_file,'w')\n for line in lines:\n temp = line.split()\n label = classes[int(temp[0])]\n # ['1', , '0.43906', '0.52083', '0.34687', '0.15']\n x1, y1, x2, y2 = yolo_to_retangle(temp, w, h)\n x1, y1, x2, y2 = int(x1), int(x2), int(y2), int(y2)\n print('{} {:.6f} {} {} {}'.format(label, x1, y1, x2, y2))\n file.write('{} {:.6f} {} {} {}\\n'.format(label, x1, y1, x2, y2))\n\n# #画图验证,注意画图坐标要转换成int格式\n# cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0))\n file.close()\n# print('write to retange.txt')\n\n\nif __name__ == '__main__':\n imgPath = './dog_cat_monkey/data/images'\n output_dir = './dog_cat_monkey/data/rectangle_txt'\n created_directory(output_dir)\n\n\n# =============================================================================\n# # single process\n# =============================================================================\n# f = './dog_cat_monkey/data/images/cats_016.jpg'\n# if isfile(f):\n# results = single(f, output_dir)\n\n# =============================================================================\n# # # multiple\n# =============================================================================\n for f in glob.glob(os.path.join(imgPath, \"*.jpg\")):\n single(f, output_dir)\n","repo_name":"samsu2018/mAP","sub_path":"yolo2rectang.py","file_name":"yolo2rectang.py","file_ext":"py","file_size_in_byte":3408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"38168352347","text":"#!\"C:/Python34/python.exe\"\n\nimport mysql.connector\nfrom database.db_config import config\nimport mlog\n\nTAG=\"Class Account\"\n\nclass Account():\n login_name=\"\"\n login_key=\"\"\n acc_number=0\n name=\"\"\n bank_name=\"\"\n ifsc_code=\"\"\n branch_name=\"\"\n balance=0\n\n #Constructor. Usage: obj_acc = Account()\n def __init__(self):\n mlog.debug(TAG,\"Constructor()\")\n\n def set_account_details(self,acc_number, acc_name,bank,branch,ifsc,balance_amt=0):\n self.acc_number = acc_number\n self.name = acc_name\n self.bank_name = bank\n self.branch_name = branch\n self.ifsc_code = ifsc\n self.__balance_amt = int(balance_amt)\n\n def set_user_details(self,acc_number,login_name,login_key):\n self.acc_number = acc_number\n self.login_name = login_name\n self.login_key = login_key\n\n def insert_to_account(self):\n try:\n conn = mysql.connector.connect(**config)\n cursor = conn.cursor()\n except Exception as e:\n mlog.error(TAG,\"Unable to conenct to MyBanking Database.\")\n print('''\n

Unable to conenct to MyBanking Database.

\n
\n BACK\n
\n ''')\n print(helperHTML.get_html_end_preset())\n sys.exit()\n\n insert_account = \"INSERT INTO ACCOUNT(ACC_NUMBER, NAME, BANK_NAME, BRANCH_NAME, IFSC_CODE, BALANCE) \"\n insert_account += \"VALUES (\" + str(self.acc_number) + \", '\" + str(self.name) + \"' , '\" + str(self.bank_name) + \"' , '\" + str(self.branch_name) + \"' ,\"\n insert_account += \"'\" + str(self.ifsc_code) + \"' , \"+ str(self.__balance_amt) + \");\"\n mlog.debug(TAG, insert_account)\n insert_status = False\n error = None\n try:\n cursor.execute(insert_account)\n conn.commit()\n except Exception as e:\n error = str(e)\n mlog.error(TAG,\"Error: \" + error)\n insert_status = True\n\n cursor.close()\n conn.close()\n\n return insert_status, error\n\n def insert_to_user(self):\n try:\n conn = mysql.connector.connect(**config)\n cursor = conn.cursor()\n except Exception as e:\n mlog.error(TAG,\"Unable to conenct to MyBanking Database.\")\n print('''\n

Unable to conenct to MyBanking Database.

\n
\n BACK\n
\n ''')\n print(helperHTML.get_html_end_preset())\n sys.exit()\n\n insert_user = \"INSERT INTO USER (ACC_NUMBER, LOGIN_NAME, LOGIN_KEY) VALUES ( \" + str(self.acc_number)\n insert_user += \", '\" + str(self.login_name) + \"' , '\" + str(self.login_key) + \"' );\"\n mlog.debug(TAG, insert_user)\n insert_status = False\n error = None\n try:\n cursor.execute(insert_user)\n conn.commit()\n except Exception as e:\n error = str(e)\n mlog.error(TAG,\"Error: \" + error)\n insert_status = True\n\n cursor.close()\n conn.close()\n\n return insert_status, error\n\n def delete_account(self):\n try:\n conn = mysql.connector.connect(**config)\n cursor = conn.cursor()\n except Exception as e:\n mlog.error(TAG,\"Unable to conenct to MyBanking Database.\")\n print('''\n

Unable to conenct to MyBanking Database.

\n
\n BACK\n
\n ''')\n print(helperHTML.get_html_end_preset())\n sys.exit()\n\n delete_acc = \"DELETE FROM ACCOUNT WHERE ACC_NUMBER = \" + str(self.acc_number)\n error = None\n try:\n cursor.execute(delete_acc)\n conn.commit()\n except Exception as e:\n error = str(e)\n mlog.error(TAG,\"Error: \" + error)\n\n cursor.close()\n conn.close()\n","repo_name":"kalathiit/myBanking","sub_path":"class_account.py","file_name":"class_account.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74642573314","text":"#!coding:utf8\n\n\ndef ReadBytes(fin, num):\n\t'''\n\t:param fin: 输入的文件\n\t:param num: 读取的个数\n\t:return: 读取到的数据(字符串)\n\t'''\n\ttempList = []\n\tfor i in range(0, num):\n\t\ttempList.append(fin.read(1).encode('hex'))\n\ttempList.reverse()\n\t#print(''.join(tempList))\n\treturn ''.join(tempList).upper()","repo_name":"youngqqcn/ChinaGMTool","sub_path":"lib/readbytes.py","file_name":"readbytes.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71109957633","text":"from AthenaCommon.Logging import logging\nlog = logging.getLogger(\"TriggerMenuMT.HLT.Jet.JetChainSequences\")\n\nfrom ..Config.MenuComponents import MenuSequenceCA, SelectionCA, InEventRecoCA\n\nfrom AthenaConfiguration.ComponentFactory import CompFactory\n\ndef jetEJsMenuSequence(flags, jetsIn, name):\n \n from TrigHLTJetHypo.TrigJetHypoToolConfig import trigJetEJsHypoToolFromDict\n\n # Get track sequence name\n from TrigInDetConfig.ConfigSettings import getInDetTrigConfig\n IDTrigConfig = getInDetTrigConfig( 'jet' )\n sequenceOut = IDTrigConfig.tracks_FTF()\n vertices = IDTrigConfig.vertex_jet\n \n reco = InEventRecoCA(\n f\"EmergingJets_HypoOnlyStep_{jetsIn}Reco\",\n inputMaker=CompFactory.InputMakerForRoI(\n \"IM_EmergingJets_HypoOnlyStep\",\n RoITool = CompFactory.ViewCreatorInitialROITool(),\n mergeUsingFeature = True\n )\n )\n\n selAcc = SelectionCA(f\"EmergingJets_HypoOnlyStep_{jetsIn}\")\n selAcc.mergeReco(reco)\n\n selAcc.addHypoAlgo(\n CompFactory.TrigJetEJsHypoAlg(\n \"L2EmergingJets\",\n Tracks = sequenceOut,\n PV = vertices\n )\n )\n\n return MenuSequenceCA(flags, selAcc, HypoToolGen=trigJetEJsHypoToolFromDict)\n\n \ndef jetCRMenuSequence(flags, jetsIn, name):\n\n from TrigHLTJetHypo.TrigJetHypoToolConfig import trigJetCRHypoToolFromDict\n\n # Get track sequence name\n from TrigInDetConfig.ConfigSettings import getInDetTrigConfig\n from ..CommonSequences.FullScanDefs import fs_cells, trkFSRoI\n IDTrigConfig = getInDetTrigConfig( 'jet' )\n sequenceOut = IDTrigConfig.tracks_FTF()\n cellsin=fs_cells\n\n from .JetMenuSequencesConfig import getTrackingInputMaker\n from .JetTrackingConfig import JetFSTrackingCfg\n trk_acc = JetFSTrackingCfg(flags, trkopt='ftf', RoIs=trkFSRoI)\n\n reco = InEventRecoCA(f\"EmergingJets_HypoOnlyStep_{jetsIn}Reco\", inputMaker=getTrackingInputMaker('ftf'))\n reco.mergeReco(trk_acc)\n\n selAcc = SelectionCA(f\"EmergingJets_HypoOnlyStep_{jetsIn}\")\n selAcc.mergeReco(reco)\n selAcc.addHypoAlgo(\n CompFactory.TrigJetCRHypoAlg(\n \"L2CalRatio\",\n Tracks = sequenceOut,\n Cells = cellsin\n )\n )\n\n return MenuSequenceCA(flags, selAcc, HypoToolGen=trigJetCRHypoToolFromDict)\n","repo_name":"Yusuf-Manjra/athena","sub_path":"Trigger/TriggerCommon/TriggerMenuMT/python/HLT/Jet/ExoticJetSequencesConfig.py","file_name":"ExoticJetSequencesConfig.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71490268673","text":"\"\"\"\nhttps://www.hackerrank.com/challenges/plus-minus/problem\n\"\"\"\n#!/bin/python3\n\nimport os\nimport sys\n\n#\n# Complete the plusMinus function below.\n#\ndef plusMinus(arr):\n #\n # Write your code here.\n #\n postive_fraction,negative_fraction,zero_fraction=0,0,0\n for v in arr:\n if v>0:\n postive_fraction+=1\n elif v<0:\n negative_fraction+=1\n else:\n zero_fraction+=1 \n print(\"{:.6f}\\n{:.6f}\\n{:.6f}\".format(postive_fraction/len(arr),negative_fraction/len(arr),zero_fraction/len(arr)))\n # print(\"{:.6f}\".format(postive_fraction/len(arr)))\n # print(\"{:.6f}\".format(negative_fraction/len(arr)))\n # print(\"{:.6f}\".format(zero_fraction/len(arr)))\n\nif __name__ == '__main__':\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n plusMinus(arr)\n","repo_name":"Devinwon/master","sub_path":"coding-exercise/hankerRank/algorithm/warmup/plus-minus.py","file_name":"plus-minus.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33449250378","text":"# O(N*K) -- where N is the lenght of the list and K is the places\nclass Solution:\n def rotateRight(self, head: Optional[ListNode], k: int) -> Optional[ListNode]:\n #edge case empty list and list with only one element\n if not head or not head.next :\n return head\n \n length = 0\n current = head\n while current:\n length += 1\n current = current.next\n \n if k > length:\n \n num_rotation = k % length\n else:\n num_rotation = k\n \n \n \n \n #going for K times\n for ind in range(num_rotation):\n prev = head\n current = head.next\n \n #getting the last element in the list\n while current and current.next:\n prev = prev.next\n current = current.next\n prev.next = current.next\n temp = head\n head = current\n current.next = temp\n return head\n \n \n \n \n ","repo_name":"yonasengdu/Compitative-programming","sub_path":"0061-rotate-list/0061-rotate-list.py","file_name":"0061-rotate-list.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13589599143","text":"import argparse\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional\n\nimport lightning.pytorch as pl\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport wandb\nfrom lightning import Fabric\nfrom torch import Tensor\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom data import loaders_from_config\nfrom lateral_connections.feature_extractor.straight_line_pl_modules import FixedFilterFeatureExtractor\nfrom lateral_connections.s2_rbm import L2RBM\nfrom lateral_connections.s1_lateral_connections import LateralNetwork\nfrom tools import loggers_from_conf\nfrom tools.store_load_run import load_run, save_run\nfrom utils import get_config, print_start, print_warn\n\n\ndef parse_args(parser: Optional[argparse.ArgumentParser] = None):\n \"\"\"\n Parse arguments from command line.\n :param parser: Optional ArgumentParser instance.\n :return: Parsed arguments.\n \"\"\"\n if parser is None:\n parser = argparse.ArgumentParser(description=\"Lateral Connections Stage 1\")\n parser.add_argument(\"config\",\n type=str,\n help=\"Path to the config file\",\n )\n parser.add_argument(\"--batch-size\",\n type=int,\n # default=64,\n metavar=\"N\",\n dest=\"dataset:batch_size\",\n help=\"input batch size for training (default: 64)\"\n )\n parser.add_argument(\"--epochs\",\n type=int,\n # default=20,\n metavar=\"N\",\n dest=\"run:n_epochs\",\n help=\"number of epochs to train (default: 10)\"\n )\n parser.add_argument(\"--lr\",\n type=float,\n # default=0.001,\n metavar=\"LR\",\n dest=\"optimizers:opt1:params:lr\",\n help=\"learning rate (default: 0.001)\"\n )\n parser.add_argument('--wandb',\n action='store_true',\n default=False,\n dest='logging:wandb:active',\n help='Log to wandb'\n )\n parser.add_argument('--plot',\n action='store_true',\n default=False,\n dest='run:plots:enable',\n help='Plot results'\n )\n parser.add_argument('--plot_dir',\n type=str,\n dest='run:plots:store_path',\n help='Store the plotted results in the given path'\n )\n parser.add_argument('--store',\n type=str,\n dest='run:store_state_path',\n help='Path where the model will be stored'\n )\n parser.add_argument('--load',\n type=str,\n dest='run:load_state_path',\n help='Path from where the model will be loaded'\n )\n\n args = parser.parse_args()\n return args\n\n\ndef configure() -> Dict[str, Optional[Any]]:\n \"\"\"\n Load the config based on the given console args.\n :return: Configuration dict.\n \"\"\"\n args = parse_args()\n config = get_config(args.config, args)\n torch.backends.cudnn.deterministic = True\n if not torch.cuda.is_available():\n print_warn(\"CUDA is not available.\", title=\"Slow training expected.\")\n return config\n\n\ndef setup_fabric(config: Dict[str, Optional[Any]]) -> Fabric:\n \"\"\"\n Setup the fabric instance.\n :param config: Configuration dict\n :return: Fabric instance.\n \"\"\"\n loggers = loggers_from_conf(config)\n # torch.backends.cudnn.deterministic = True\n fabric = Fabric(accelerator=\"auto\", devices=1, loggers=loggers, callbacks=[])\n fabric.launch()\n fabric.seed_everything(1)\n return fabric\n\n\ndef setup_dataloader(config: Dict[str, Optional[Any]], fabric: Fabric) -> (DataLoader, DataLoader):\n \"\"\"\n Setup the dataloaders for training and testing.\n :param config: Configuration dict\n :param fabric: Fabric instance\n :return: Returns the training and testing dataloader\n \"\"\"\n train_loader, _, test_loader = loaders_from_config(config)\n train_loader = fabric.setup_dataloaders(train_loader)\n test_loader = fabric.setup_dataloaders(test_loader)\n return train_loader, test_loader\n\n\ndef setup_feature_extractor(config: Dict[str, Optional[Any]], fabric: Fabric) -> pl.LightningModule:\n \"\"\"\n Setup the feature extractor model that is used to extract features from images before they are fed into the model\n leveraging lateral connections.\n :param config: Configuration dict\n :param fabric: Fabric instance\n :return: Feature extractor model.\n \"\"\"\n feature_extractor = FixedFilterFeatureExtractor(config, fabric)\n feature_extractor = fabric.setup(feature_extractor)\n return feature_extractor\n\n\ndef cycle(\n config: Dict[str, Optional[Any]],\n feature_extractor: pl.LightningModule,\n lateral_network: LateralNetwork,\n l2: L2RBM,\n batch: Tensor,\n batch_idx: int,\n epoch: int,\n store_tensors: Optional[bool] = False,\n mode: Optional[str] = \"train\",\n fabric: Optional[Fabric] = None,\n l2_opt: Optional[Optimizer] = None,\n):\n \"\"\"\n Perform a single cycle of the model.\n :param config: Configuration dict\n :param feature_extractor: The feature extractor model to extract features from a given image.\n :param lateral_network: The network building sub-networks by using lateral connections\n :param l2: The L2RBM model\n :param batch: The images to process.\n :param batch_idx: The index of the batch.\n :param epoch: Current epoch\n :param store_tensors: Whether to store the tensors and return them.\n :param mode: The mode of the cycle, either train or eval.\n :param fabric: The fabric instance.\n :param l2_opt: The optimizer for the L2-model.\n :return: The features extracted from the image, the binarized features fed into the network with lateral\n connections, the features after lateral connections (binary) and the features after lateral connections as float\n \"\"\"\n assert mode in [\"train\", \"eval\"], \"Mode must be either train or eval\"\n assert mode == \"train\" and fabric is not None or mode == \"eval\", \"Fabric must be given in train mode\"\n assert mode == \"train\" and fabric is not None or mode == \"eval\", \"Optimizer must be given in train mode\"\n\n with torch.no_grad():\n features = feature_extractor(batch)\n\n lateral_network.new_sample()\n z = None\n\n input_features, lateral_features, lateral_features_f, l2_features, l2h_features = [], [], [], [], []\n for view_idx in range(features.shape[1]):\n x_view_features = features[:, view_idx, ...]\n x_view_features = feature_extractor.binarize_features(x_view_features)\n\n # Add noise to the input features -> should be removed by net fragments\n # x_view_features = np.array(x_view_features.detach().cpu())\n # x_view_features = x_view_features + np.random.choice(2, x_view_features.shape, p=[1 - 0.005, 0.005])\n # x_view_features = torch.from_numpy(x_view_features).cuda().float()\n # x_view_features = feature_extractor.binarize_features(x_view_features)\n\n if store_tensors:\n input_features.append(x_view_features)\n\n if z is None:\n z = torch.zeros((x_view_features.shape[0], lateral_network.model.out_channels, x_view_features.shape[2],\n x_view_features.shape[3]), device=batch.device)\n\n features_lat, features_lat_float, features_l2, features_l2_h = [], [], [], []\n for t in range(config[\"lateral_model\"][\"max_timesteps\"]):\n lateral_network.model.update_ts(t)\n x_in = torch.cat([x_view_features, z], dim=1)\n z_float, z = lateral_network(x_in)\n\n z2, z2_feedback, h, loss = l2.eval_step(z)\n\n if epoch > 10:\n mask_active = (z > 0) | (z2_feedback > 0)\n if F.mse_loss(z[mask_active], z2_feedback[mask_active]) < .1:\n z = z2_feedback\n\n features_lat.append(z)\n if store_tensors:\n features_lat_float.append(z_float)\n features_l2.append(z2_feedback)\n features_l2_h.append(h)\n\n features_lat = torch.stack(features_lat, dim=1)\n features_lat_median = torch.median(features_lat, dim=1)[0]\n if store_tensors:\n features_lat_float = torch.stack(features_lat_float, dim=1)\n features_l2 = torch.stack(features_l2, dim=1)\n features_l2_h = torch.stack(features_l2_h, dim=1)\n\n if mode == \"train\": # TODO: Train at the end after all timesteps (use median activation per cell),\n # also update L1 after training\n # Train L1\n x_rearranged = lateral_network.model.l1.rearrange_input(\n torch.cat([x_view_features, features_lat_median], dim=1))\n lateral_network.model.l1.hebbian_update(x_rearranged, features_lat_median)\n\n # Train L2\n l2_opt.zero_grad()\n z2, z2_feedback, h, loss = l2.train_step(features_lat_median)\n fabric.backward(loss)\n l2_opt.step()\n\n if store_tensors:\n features_lat_float_median = torch.median(features_lat_float, dim=1)[0]\n features_l2_median = torch.median(features_l2, dim=1)[0]\n l2h_features_median = torch.median(features_l2_h, dim=1)[0]\n features_lat = torch.cat([features_lat, features_lat_median.unsqueeze(1)], dim=1)\n features_lat_float = torch.cat([features_lat_float, features_lat_float_median.unsqueeze(1)], dim=1)\n features_l2 = torch.cat([features_l2, features_l2_median.unsqueeze(1)], dim=1)\n features_l2_h = torch.cat([features_l2_h, l2h_features_median.unsqueeze(1)], dim=1)\n lateral_features.append(features_lat)\n lateral_features_f.append(features_lat_float)\n l2_features.append(features_l2)\n l2h_features.append(features_l2_h)\n\n if store_tensors:\n return features, torch.stack(input_features, dim=1), torch.stack(lateral_features, dim=1), torch.stack(\n lateral_features_f, dim=1), torch.stack(l2_features, dim=1), torch.stack(l2h_features, dim=1)\n\n\ndef single_train_epoch(\n config: Dict[str, Optional[Any]],\n feature_extractor: pl.LightningModule,\n lateral_network: LateralNetwork,\n l2: L2RBM,\n train_loader: DataLoader,\n epoch: int,\n fabric: Fabric,\n l2_opt: Optimizer,\n):\n \"\"\"\n Train the model for a single epoch.\n :param config: Configuration dict.\n :param feature_extractor: Feature extractor model.\n :param lateral_network: Laternal network model.\n :param l2: The L2RBM model\n :param train_loader: Test set dataloader.\n :param epoch: Current epoch.\n :param fabric: The fabric instance.\n :param l2_opt: The optimizer for the L2-model.\n \"\"\"\n feature_extractor.eval()\n lateral_network.eval()\n l2.train()\n for i, batch in tqdm(enumerate(train_loader),\n total=len(train_loader),\n colour=\"GREEN\",\n desc=f\"Train Epoch {epoch}/{config['run']['n_epochs']}\"):\n cycle(config, feature_extractor, lateral_network, l2, batch[0], i, epoch=epoch, store_tensors=False,\n mode=\"train\",\n fabric=fabric, l2_opt=l2_opt)\n\n\ndef single_eval_epoch(\n config: Dict[str, Optional[Any]],\n feature_extractor: pl.LightningModule,\n lateral_network: LateralNetwork,\n l2: L2RBM,\n test_loader: DataLoader,\n epoch: int,\n):\n \"\"\"\n Evaluate the model for a single epoch.\n :param config: Configuration dict.\n :param feature_extractor: Feature extractor model.\n :param lateral_network: Laternal network model.\n :param l2: The L2RBM model\n :param test_loader: Test set dataloader.\n :param epoch: Current epoch.\n \"\"\"\n feature_extractor.eval()\n lateral_network.eval()\n l2.eval()\n plt_img, plt_features, plt_input_features, plt_activations, plt_activations_f, plt_activations_l2 = [], [], [], \\\n [], [], []\n for i, batch in tqdm(enumerate(test_loader),\n total=len(test_loader),\n colour=\"GREEN\",\n desc=f\"Testing Epoch {epoch}/{config['run']['n_epochs']}\"):\n with torch.no_grad():\n features, input_features, lateral_features, lateral_features_f, l2_features, l2_h_features = cycle(config,\n feature_extractor,\n lateral_network,\n l2,\n batch[0],\n i,\n epoch=epoch,\n store_tensors=True,\n mode=\"eval\")\n plt_img.append(batch[0])\n plt_features.append(features)\n plt_input_features.append(input_features)\n plt_activations.append(lateral_features)\n plt_activations_f.append(lateral_features_f)\n plt_activations_l2.append(l2_features)\n\n plot = config['run']['plots']['enable'] and \\\n (not config['run']['plots']['only_last_epoch'] or epoch == config['run']['n_epochs'])\n wandb_b = config['logging']['wandb']['active']\n store_plots = config['run']['plots'].get('store_path', False)\n\n assert not wandb_b or wandb_b and store_plots, \"Wandb logging requires storing the plots.\"\n\n if plot or wandb_b or store_plots:\n if epoch == 0:\n feature_extractor.plot_model_weights(show_plot=plot)\n#\n plots_fp = lateral_network.plot_samples(plt_img,\n plt_features,\n plt_input_features,\n plt_activations,\n plt_activations_f,\n plot_input_features=epoch == 0,\n show_plot=plot)\n weights_fp = lateral_network.plot_model_weights(show_plot=plot)\n plots_l2_fp = l2.plot_samples(plt_img, plt_activations_l2, show_plot=plot)\n if epoch == config['run']['n_epochs']:\n videos_fp = lateral_network.create_activations_video(plt_img, plt_input_features, plt_activations)\n\n if wandb_b:\n logs = {str(pfp.name[:-4]): wandb.Image(str(pfp)) for pfp in plots_fp}\n logs |= {str(wfp.name[:-4]): wandb.Image(str(wfp)) for wfp in weights_fp}\n logs |= {str(wfp.name[:-4]): wandb.Image(str(wfp)) for wfp in plots_l2_fp}\n if epoch == config['run']['n_epochs']:\n logs |= {str(vfp.name[:-4]): wandb.Video(str(vfp)) for vfp in videos_fp}\n wandb.log(logs | {\"epoch\": epoch, \"trainer/global_step\": epoch})\n\n\ndef train(\n config: Dict[str, Optional[Any]],\n feature_extractor: pl.LightningModule,\n lateral_network: LateralNetwork,\n l2: L2RBM,\n train_loader: DataLoader,\n test_loader: DataLoader,\n fabric: Fabric,\n l2_opt: Optimizer,\n l2_sched: Optional[ReduceLROnPlateau] = None\n):\n \"\"\"\n Train the model.\n :param config: Configuration dict\n :param feature_extractor: Feature extractor module\n :param lateral_network: Lateral network module\n :param l2: L2RBM module\n :param train_loader: Training dataloader\n :param test_loader: Testing dataloader\n :param fabric: The fabric instance.\n :param l2_opt: The optimizer for the L2-model.\n :param l2_sched: The lr scheduler for the L2-model.\n \"\"\"\n start_epoch = config['run']['current_epoch']\n\n if config['logging']['wandb']['active'] or config['run']['plots']['enable']:\n single_eval_epoch(config, feature_extractor, lateral_network, l2, test_loader, 0)\n lateral_network.on_epoch_end() # print logs\n\n for epoch in range(start_epoch, config['run']['n_epochs']):\n single_train_epoch(config, feature_extractor, lateral_network, l2, train_loader, epoch + 1, fabric, l2_opt)\n single_eval_epoch(config, feature_extractor, lateral_network, l2, test_loader, epoch + 1)\n lateral_network.on_epoch_end()\n l2_logs = l2.on_epoch_end()\n if l2_sched is not None:\n l2_sched.step(l2_logs[\"l2/val/loss\"])\n config['run']['current_epoch'] = epoch + 1\n\n\ndef setup_lateral_network(config, fabric) -> LateralNetwork:\n \"\"\"\n Setup the model using lateral connections.\n :param config: Configuration dict\n :param fabric: Fabric instance\n :return: Model using lateral connections.\n \"\"\"\n return fabric.setup(LateralNetwork(config, fabric))\n\n\ndef setup_l2(config, fabric) -> L2RBM:\n \"\"\"\n Setup the model L2.\n :param config: Configuration dict\n :param fabric: Fabric instance\n :return: L2 model.\n \"\"\"\n return fabric.setup(L2RBM(config, fabric))\n\n\ndef main():\n \"\"\"\n Run the model: Create modules, extract features from images and run the model leveraging lateral connections.\n \"\"\"\n print_start(\"Starting python script 'main_lateral_connections.py'...\",\n title=\"Training S1: Lateral Connections Toy Example\")\n config = configure()\n fabric = setup_fabric(config)\n train_loader, test_loader = setup_dataloader(config, fabric)\n feature_extractor = setup_feature_extractor(config, fabric)\n lateral_network = setup_lateral_network(config, fabric)\n l2 = setup_l2(config, fabric)\n l2_opt, l2_sched = l2.configure_optimizers()\n\n if 'load_state_path' in config['run'] and config['run']['load_state_path'] != 'None':\n config, state = load_run(config, fabric)\n feature_extractor.load_state_dict(state['feature_extractor'])\n lateral_network.load_state_dict(state['lateral_network'])\n l2.load_state_dict(state['l2'])\n l2_opt.load_state_dict(state['l2_opt'])\n l2_sched.load_state_dict(state['l2_sched'])\n\n feature_extractor.eval() # does not have to be trained\n if 'store_path' in config['run']['plots'] and config['run']['plots']['store_path'] is not None and \\\n config['run']['plots']['store_path'] != 'None':\n fp = Path(config['run']['plots']['store_path'])\n if not fp.exists():\n fp.mkdir(parents=True, exist_ok=True)\n train(config, feature_extractor, lateral_network, l2, train_loader, test_loader, fabric, l2_opt, l2_sched)\n\n if 'store_state_path' in config['run'] and config['run']['store_state_path'] is not None and config['run'][\n 'store_state_path'] != 'None':\n save_run(config, fabric,\n components={'feature_extractor': feature_extractor, 'lateral_network': lateral_network, 'l2': l2,\n 'l2_opt': l2_opt, 'l2_sched': l2_sched.state_dict()})\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sagerpascal/lateral-connections","sub_path":"src/main_lateral_connections.py","file_name":"main_lateral_connections.py","file_ext":"py","file_size_in_byte":20032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15857153404","text":"import unittest\n\nfrom pyamaze import maze, agent, COLOR\n\n\ndef DFS(m):\n \"\"\"Creates a starting cell and an end cell and asks the maze obj to traverse the maze based on a\n preference of direction. Then will return the maze map to be loaded or saved.\"\"\"\n # define variables\n currentCell = (0, 0)\n start = (m.rows, m.cols)\n explored = [start]\n frontier = [start]\n dfsPath = {}\n\n if start is None:\n start = (m.rows, m.cols)\n\n # found finish line\n while len(frontier) > 0:\n currentCell = frontier.pop()\n if currentCell == (1, 1):\n break\n # when following direction, move based on if allowed to go in an allocated way\n # using 'ESNW' as the directions of East, then South, then North and West.\n # This can be changed to such as 'WNSE' to have the maze traversed around the right sides\n\n for direction in 'WNSE':\n if m.maze_map[currentCell][direction]:\n if direction == 'E':\n childCell = (currentCell[0], currentCell[1] + 1)\n elif direction == 'W':\n childCell = (currentCell[0], currentCell[1] - 1)\n elif direction == 'S':\n childCell = (currentCell[0] + 1, currentCell[1])\n elif direction == 'N':\n childCell = (currentCell[0] - 1, currentCell[1])\n if childCell in explored:\n continue\n explored.append(childCell)\n frontier.append(childCell)\n dfsPath[childCell] = currentCell\n fwdPath = {}\n cell = (1, 1)\n while cell != start:\n fwdPath[dfsPath[cell]] = cell\n cell = dfsPath[cell]\n return fwdPath\n\n\nif __name__ == '__main__':\n # run a standard 20, 20 maze or run the maze in the file\n loadMaze = True\n\n if not loadMaze:\n m = maze(20, 20)\n m.CreateMaze(saveMaze=True)\n path = DFS(m)\n a = agent(m, footprints=True)\n m.tracePath({a: path}, delay=100, showMarked=True, kill=True)\n\n m.run()\n if loadMaze:\n m = maze()\n m.CreateMaze(loadMaze='maze.csv')\n path = DFS(m)\n a = agent(m, footprints=True)\n m.tracePath({a: path}, delay=100, showMarked=True, kill=True)\n\n m.run()\n\n\nclass TestPositionsMaze(unittest.TestCase):\n def test_startPos(self):\n \"\"\"This test will ensure that the maze obj has started where it was expected to and compares the start variable\n with the start position of the object.\"\"\"\n m = maze(5, 5)\n start = (m.rows, m.cols)\n\n m.CreateMaze(saveMaze=True)\n path = DFS(m)\n a = agent(m, footprints=True)\n self.assertEqual(a.position, start)\n print('start ' f'{start}', 'position' f'{a.position}')\n # m.tracePath({a: path}, delay=100, showMarked=True, kill=True)\n # Test maze\n\n\n def test_endPos(self):\n \"\"\"This test like before however will now test for the end position of the maze and see if the obj\n has successfully traversed the maze. It will then return the end position of the maze and the maze obj.\"\"\"\n m = maze(5, 5)\n end = (1, 1)\n\n m.CreateMaze(saveMaze=True)\n path = DFS(m)\n a = agent(m, footprints=True)\n m.tracePath({a: path}, delay=100, showMarked=True, kill=False)\n m.run()\n self.assertEqual(a.position, end)\n print(m.path)\n print('end ' f'{end}', 'position' f'{a.position}')\n\n # Test maze","repo_name":"VoidPlayz510/PythonMazeScript","sub_path":"PythonMazeScripts/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41192644093","text":"import FWCore.ParameterSet.Config as cms\n\nvectorHitsBuilderValidation = cms.EDAnalyzer('VectorHitsBuilderValidation',\n src = cms.string('siPhase2Clusters'),\n links = cms.InputTag('simSiPixelDigis', 'Tracker'),\n VH_acc = cms.InputTag('siPhase2VectorHits', 'accepted'),\n VH_rej = cms.InputTag('siPhase2VectorHits', 'rejected'),\n CPE = cms.ESInputTag('phase2StripCPEESProducer', 'Phase2StripCPE'),\n trackingParticleSrc = cms.InputTag('mix', 'MergedTrackTruth'),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"RecoLocalTracker/SiPhase2VectorHitBuilder/vectorHitsBuilderValidation_cfi.py","file_name":"vectorHitsBuilderValidation_cfi.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31819207218","text":"tests = int(input())\n\nalphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\nfor case in range(tests):\n S = input()\n F = input()\n\n possible = list(F)\n possible_indices = [alphabets.index(x) for x in possible]\n\n total_transformations = 0\n\n\n def find_lowest_transformation(element):\n lowest = 100\n index_of_element = alphabets.index(element)\n for possible_index in possible_indices:\n diff = abs(possible_index - index_of_element)\n if diff > 13:\n diff = 26 - diff\n if lowest > diff:\n lowest = diff\n return lowest\n\n\n for letter in S:\n if letter not in possible:\n current_transormations = find_lowest_transformation(letter)\n total_transformations += current_transormations\n\n print(f\"Case #{case+1}: {total_transformations}\")\n","repo_name":"khushaal-nandwani/google-kickstart","sub_path":"Transform The String.py","file_name":"Transform The String.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11914423223","text":"import os\nimport pandas as pd\nimport csv\n\npath_to_sra = \"/gpfs/space/projects/genomic_references/OneK1K/sra\"\nmetadata = pd.read_csv(\"/gpfs/space/home/ingvar/Bioinformatics/OneK1K_SRR_metadata.txt\", sep = \"\\t\")\n\nfor root, dirs, files in os.walk(path_to_sra):\n\twith open('sra.tsv', 'w', newline=\"\", encoding=\"utf-8\") as tsvfile:\n\t\twriter = csv.writer(tsvfile, delimiter='\\t')\n\t\twriter.writerow([\"POOL_ID\",\"SRA_ID\", \"SRA_DIR\"])\n\t\tfor file in sorted(files):\n\t\t\tfilename_without_filetype = file[:-4]\n\t\t\taccession = metadata[metadata[\"run_accession\"] == filename_without_filetype][\"accession\"].values[0]\n\t\t\twriter.writerow([accession, filename_without_filetype, os.path.join(root, file)])\n\n","repo_name":"marilin99/sc_quality_analysis","sub_path":"scripts/sra2fastq_nextflow/create_sra_tsv.py","file_name":"create_sra_tsv.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31963918457","text":"# 594. 最长和谐子序列\nfrom collections import Counter\n\n\nclass Solution:\n def findLHS(self, nums: List[int]) -> int:\n\n res = 0\n hashmap = Counter(nums)\n # hashmap = {}\n # for i in nums:\n # if i not in hashmap:\n # hashmap[i] = 0\n # hashmap[i] += 1\n # hashmap = sorted(hashmap.items(), key= lambda x: x[1], reverse= True)\n # print(hashmap)\n\n for num in nums:\n if num + 1 in hashmap:\n res = max(res,hashmap[num] + hashmap[num+1])\n \n return res\n \n\n","repo_name":"mrmenand/Py_transaction","sub_path":"LeetCode/hashtable/594.最长和谐子序列.py","file_name":"594.最长和谐子序列.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28631487402","text":"import os\nimport argparse\nimport glob\nimport json\nimport codecs\nimport random\n\n# argument\nap = argparse.ArgumentParser()\n\nap.add_argument(\"data_file\", type=str, help=\"all data\")\nap.add_argument(\"train_file\", type=str, help=\"output training file\")\nap.add_argument(\"test_file\", type=str, help=\"output testing file\")\nap.add_argument(\"-t\", \"--test-num\", type=int, default=1000, help=\"# test frames\")\n\nargs = ap.parse_args()\n\nall_data = [] # (intent, tokens, labels)\nwith codecs.open(args.data_file, \"r\", \"utf-8\") as f_in:\n lines = f_in.readlines()\n for i in range(0, len(lines), 3):\n intent = lines[i].strip()\n word_line = lines[i+1].strip()\n label_line = lines[i+2].strip()\n\n #print intent, word_line, label_line\n all_data.append( (intent, word_line, label_line) )\n\n# shuffle\nrandom.shuffle(all_data)\n\n# split\ntest_data = all_data[:args.test_num]\ntrain_data = all_data[args.test_num:]\nwith codecs.open(args.train_file, \"w\", \"utf-8\") as f_out:\n for (intent, word_line, label_line) in train_data:\n f_out.write(intent + \"\\n\")\n f_out.write(word_line + \"\\n\")\n f_out.write(label_line + \"\\n\")\nwith codecs.open(args.test_file, \"w\", \"utf-8\") as f_out:\n for (intent, word_line, label_line) in test_data:\n f_out.write(intent + \"\\n\")\n f_out.write(word_line + \"\\n\")\n f_out.write(label_line + \"\\n\")\n","repo_name":"henryyang42/NTU-Course-Bot","sub_path":"LU_LSTM/shuf_split_data.py","file_name":"shuf_split_data.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"15067872499","text":"# WHILE (cat timp)\n# while conditie:\n# fa ceva\n\n# # While este o structura repetitiva prin care se executa o bucata de cod cat timp conditia pusa este adevarata\n#\n# i = 0\n# while i < 7:\n# print(i)\n# i += 1\n#\n# print(\"Am terminat!\")\n\n# # Use-case real: preluare user input si verificare sa fie corect\n# # Vrem sa luam varsta utilizatorului, care trebuie sa fie int, si sa fie intre 1 si 99\n# age = 0\n# correct_input = False\n# while not correct_input:\n# age = input(\"Introdu varsta\\n\")\n# if age.isdigit():\n# age = int(age)\n# if 1 < age < 99:\n# print(f\"Ai introdus {age}, care este o varsta corecta! Felicitari!\")\n# correct_input = True\n# else:\n# print(\"Nu ati selectat o varsta corecta, nu este in intervalul 1-99!\")\n# else:\n# print(\"Nu ai introdus un numar! Te rog mai incearca!\\n\")\n\n# Ghicire numar la care s-a gandit calculatorul (intre 1 si 10)\nnumar_calculator = 7\nnumar_ghicit = int(input(\"Ghiceste:\\n\"))\n\nwhile numar_ghicit != numar_calculator:\n if numar_calculator < numar_ghicit:\n print(\"Numarul la care m-am gandit eu este mai mic decat cel ghicit\")\n else:\n print(\"Numarul la care m-am gandit eu este mai mare decat cel ghicit\")\n numar_ghicit = int(input(\"Ghiceste din nou:\\n\"))\n\nprint(\"Felicitari, ai castigat, acesta era numarul ghicit!\")\n","repo_name":"Alx152/curs_TA18_ITF_Adela","sub_path":"curs4_cicluri_repetitive/while_loop.py","file_name":"while_loop.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43143620390","text":"'''The tricky part here is to keep the indices of the input string S intact so we can check the next source string at a specific index. If you modify (insert or replace) the string S or use a string builder to build a string Out, you’ll lose the integrity of the indices in S anytime the replacement (target) has a different length than the source substring.\n=> Split S into an array so we can always get the correct index of the first letter of each source string in the sources. Convert the next characters of the source substring to empty strings. Join the array and return.\n\nLogic:\n\nSplit input string S into an array of characters.\nIterate from left to right of indexes and sources, check if sources[i] is at index indexes[i] in input string S. If yes, replace char_arr[i] with targets[i] and change all char from i+1 to len(sources[i]) in char_arr to empty strings.\nJoin the character array.\nTime: O(n + m) where n = len(S) and m = len(indexes) = len(sources) = len(targets) because:\n\nSplit S: O(n)\nIterate through sources and target O(m). Replace: O(1)\nJoin O(n + m) because output = n + m\nTotal: O(n + m + n + m) = O(2(n + m)) remove constant = O(n + m)\nSpace: O(n) because:\n\nChar array needs O(n) space.\nPython\n'''\nclass Solution:\n \n def findReplaceString(self, S: str, indexes: List[int], sources: List[str], targets: List[str]) -> str:\n char_arr = list(S)\n for i in range(len(indexes)):\n idx = indexes[i]\n source_str = sources[i]\n replacement_str = targets[i]\n if self._is_substring_at(idx, source_str, S):\n self._replace_at(idx, source_str, replacement_str, char_arr)\n return \"\".join(char_arr)\n \n def _is_substring_at(self, idx, str1, str2):\n i = 0\n j = idx\n while i < len(str1) and j < len(str2):\n if str1[i] != str2[j]:\n return False\n i += 1\n j += 1\n return True\n \n def _replace_at(self, idx, source, replacement, arr):\n arr[idx] = replacement\n for i in range(idx + 1, idx + len(source)):\n arr[i] = \"\"","repo_name":"hardik302001/leetcode","sub_path":"problems/find_and_replace_in_string/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72600523393","text":"from collections import namedtuple\nfrom logging import getLogger\nfrom os.path import splitext\nfrom uuid import uuid1 as uuid\nfrom xml.dom.minidom import Document, parseString\n\nLOGGER = getLogger( __name__ )\n\nNamespace = namedtuple( 'Namespace', 'uri prefix' )\n\nTaggedImage = namedtuple( 'TaggedImage', 'path lat lon title author description')\n\n\nNAMESPACES = {\n\t'kml': Namespace( 'http://www.opengis.net/kml/2.2', '' ),\n\t'foaf': Namespace( 'http://xmlns.com/foaf/0.1/', 'foaf' ),\n\t'dc': Namespace( 'http://dublincore.org/documents/dcmi-namespace/', 'dc' ),\n\t'xml': Namespace( 'http://www.w3.org/XML/1998/namespace', 'xml' ), \n}\n\ndoc = None\n\ndef init( metadata ):\n\tglobal doc\n\tif metadata is None:\n\t\tdoc = Document()\n\t\troot = doc.createElementNS( NAMESPACES[ 'kml' ].uri, 'kml' )\n\t\troot.setAttribute( 'xmlns', NAMESPACES[ 'kml' ].uri )\n\t\troot.setAttribute( 'xmlns:' + NAMESPACES[ 'foaf' ].prefix, NAMESPACES[ 'foaf' ].uri )\n\t\troot.setAttribute( 'xmlns:' + NAMESPACES[ 'dc' ].prefix, NAMESPACES[ 'dc' ].uri )\n\t\troot.setAttribute( 'xmlns:' + NAMESPACES[ 'xml' ].prefix, NAMESPACES[ 'xml' ].uri )\n\t\tdoc.appendChild( root )\n\telse:\t\n\t\tdoc = parseString( metadata )\n\ndef metadata():\n\treturn doc.toxml( 'utf-8' )\n\ndef astuples():\n\tdef x( pm, tag ):\n\t\telem = pm.getElementsByTagName( tag )\n\t\tif not elem: return None\n\t\tfc = elem[ 0 ].firstChild;\n\t\tif not fc: return None\n\t\treturn fc.nodeValue\n\tres = []\n\tfor pm in doc.getElementsByTagName( 'Placemark' ):\n\t\tlat, lon = x( pm.getElementsByTagName( 'Point' )[ 0 ], 'coordinates' ).split( ',' )\n\t\tres.append( TaggedImage( \n\t\t\tpm.attributes.getNamedItem( 'xml:id' ).value, lat, lon,\n\t\t\tx( pm, 'name' ), x( pm, 'dc:creator' ), x( pm, 'description' )\n\t\t) )\n\treturn res\n\t\ndef append( img, placemark ):\n\t_, ext = splitext( img )\n\timg_id = uuid().hex + ext\n\tplacemark.setAttributeNS( NAMESPACES[ 'xml' ].uri, '{0}:{1}'.format( NAMESPACES[ 'xml' ].prefix, 'id' ), img_id )\n\tdoc.documentElement.appendChild( placemark )\n\treturn img_id\n\ndef element( tagName, namespace = 'kml', child = None ):\n\telement = doc.createElementNS( \n\t\tNAMESPACES[ namespace ].uri, \n\t\t'{0}:{1}'.format( NAMESPACES[ namespace ].prefix, tagName ) if NAMESPACES[ namespace ].prefix else tagName \n\t)\n\tif child: \n\t\tif isinstance( child, unicode ): element.appendChild( doc.createTextNode( child ) )\n\t\telse: element.appendChild( child )\n\treturn element\n\ndef placemark( lat, lon ):\n\treturn element( 'Placemark', \n\t\tchild = element( 'Point', \n\t\t\tchild = element( 'coordinates', child = u'{0},{1}'.format( lat, lon ) ) \n\t\t) \n\t)\n\ndef creator( creator ):\n\treturn element( 'creator', 'dc', creator )\n\ndef name( name ):\n\treturn element( 'name', child = name )\n\ndef description( description ):\n\treturn element( 'description', child = description )\n","repo_name":"Aladdin-Unimi/Learning-Week-2012-Software","sub_path":"lwapp/lwf/apps/img/kml.py","file_name":"kml.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73823296515","text":"#!/usr/bin/python\n\nDOCUMENTATION = '''\n---\nmodule: identities\n\nshort_description: Manage Identities in Keyfactor\n\nversion_added: \"2.11\"\n\ndescription:\n - Module manages Identities in Keyfactor.\n Users can either add a new Keyfactor Identity or delete an existing Keyfactor Identity.\n Currently supports checkmode\n\noptions:\n name:\n description:\n - This is the Identity name. (\\\\)\n required: true\n src:\n description:\n - Name of the Virtual Directory. Default: CMSAPI\n required: false\n state:\n description:\n required: true\n - Whether the State should be present or absent\n choices: [\"present\", \"absent\"]\n\nauthor:\n - David Fleming (@david_fleming)\n'''\n\nEXAMPLES = '''\n# Create Identity\n- name: Create Identity in Keyfactor\n keyfactor.platform.identities:\n name: \"KEYFACTOR\\\\Test\"\n state: 'present'\n'''\n\nRETURN = '''\nchanged:\n description: Whether or not a change was made\n type: bool\n returned: always\n'''\n\nfrom ansible_collections.keyfactor.platform.plugins.module_utils.core import AnsibleKeyfactorModule\n\ndef run_module():\n\n argument_spec = dict(\n src=dict(type='str', required=False, default=\"CMSAPI\")\n )\n\n # seed the result dict in the object\n result = dict(\n changed=False\n )\n\n module = AnsibleKeyfactorModule(\n argument_spec=argument_spec,\n supports_check_mode=True\n )\n\n # if the user is working with this module in only check mode we do not\n # want to make any changes to the environment, just return the current\n # state with no modifications\n if module.check_mode:\n result['changed'] = checkMode(module)\n module.exit_json(**result)\n\n if module.params['state'] == 'absent':\n result['changed'] = handleDelete(module)\n elif module.params['state'] == 'present':\n result['changed'] = handleAdd(module)\n\n module.exit_json(**result)\n\nimport json\n\n\ndef checkMode(module):\n current = handleGet(module)\n # Since we do not update any parameter for an identity, a simple state check is\n # enough for checkMode.\n if module.params['state'] == 'absent':\n if current:\n return True\n return False\n if module.params['state'] == 'present':\n if current:\n return False\n return True\n\ndef handleAdd(module):\n url = module.params.get('src')\n endpoint = url+'/Security/1/AddIdentity'\n payload = { \"Account\": module.params['name']}\n resp, info = module.handleRequest(\"POST\", endpoint, payload)\n if info['status'] == -1:\n module.fail_json(msg=info)\n try:\n content = resp.read()\n if (json.loads(content)['Valid']) == True:\n return True\n module.fail_json(msg='Failed Add.')\n except AttributeError:\n content = info.pop('body', '')\n message = (json.loads(content)['Message'])\n if message == 'Cannot create Identity because it already exists.':\n return False\n if message == 'Could not find user or Group.':\n module.fail_json(msg=message)\n module.fail_json(msg='Failed Add Error.')\n\n\ndef handleDelete(module):\n url = module.params.get('src')\n endpoint = url+'/Security/1/DeleteIdentity'\n payload = { \"Account\": module.params['name']}\n resp, info = module.handleRequest(\"POST\", endpoint, payload)\n try:\n content = resp.read()\n if (json.loads(content)['Message']) == 'ADIdentity deleted successfully':\n return True\n module.fail_json(msg='Failed.')\n except AttributeError:\n content = info.pop('body', '')\n message = json.loads(content)['Message']\n if message == 'Can not delete Identity because it does not exist.':\n return False\n module.fail_json(msg='Failed.')\n\ndef handleGet(module):\n url = module.params.get('src')\n endpoint = url+'/Security/1/GetIdentities'\n resp, info = module.handleRequest(\"GET\", endpoint)\n try:\n content = resp.read()\n contentSet = json.loads(content)\n collection = [collection_content for collection_content in contentSet if collection_content['AccountName'].lower() == module.params['name'].lower()]\n if collection:\n collection = next(iter(collection))\n return collection\n module.fail_json(msg='Failed.')\n except AttributeError:\n content = info.pop('body', '')\n message = json.loads(content)['Message']\n if message == 'Identity with Name \\'' + module.params['name'] + '\\' does not exist.':\n return {}\n module.fail_json(msg=message)\n\ndef main():\n run_module()\n\nif __name__ == '__main__':\n main()","repo_name":"Keyfactor/ansible-collection-keyfactor-platform","sub_path":"plugins/modules/identities.py","file_name":"identities.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"6465723192","text":"import random as rand\na = []\nb = 0\nwhile len(a) <= 10:\n b = rand.randint(0,100)\n if b in a:\n print(b, 'is already in array.')\n else:\n a.append(b) \n print(b, 'is added to array') \nprint(a)\nprint('SORTING')\ne = 0\nf = 0\nwhile e != 11:\n c = 0\n d = 1\n while d <= 10:\n g = a[c]\n h = a[d]\n if g > h:\n f = g\n a.pop(c)\n a.insert(d, f)\n d = d + 1\n c = c + 1\n if d > 11:\n break\n e = e + 1\n print(a)\nprint('FINAL SORTED')\nprint(a)\n","repo_name":"TABerz90/Class-Work","sub_path":"Bubble Sorting.py","file_name":"Bubble Sorting.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44265608570","text":"from fastapi import APIRouter, Depends\n\nfrom helpers.get_time import get_current_time\nfrom helpers.crud import get_user_by_login\nfrom helpers.dependencies import get_db, get_user\n\naction_router = APIRouter(\n prefix=\"/action\"\n)\n\n\n@action_router.post(\"/decrement\")\ndef decrement(\n user=Depends(get_user),\n db=Depends(get_db)\n):\n user = get_user_by_login(db, user.login)\n user.counter -= 1\n db.commit()\n return \"OK\"\n\n\n@action_router.post(\"/increment\")\ndef increment(\n user=Depends(get_user),\n db=Depends(get_db)\n):\n user = get_user_by_login(db, user.login)\n user.counter += 1\n db.commit()\n return \"OK\"\n\n\n@action_router.get(\"/counter\")\ndef get_counter(\n user=Depends(get_user),\n db=Depends(get_db)\n):\n user = get_user_by_login(db, user.login)\n return user.counter\n\n\n@action_router.patch(\"/new_action\")\ndef update_action(\n user=Depends(get_user),\n db=Depends(get_db)\n):\n user = get_user_by_login(db, user.login)\n user.last_action_time = get_current_time()\n db.commit()\n return \"OK\"\n","repo_name":"Melekh11/business-club","sub_path":"backend/routes/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30878479247","text":"import random as rd\n\ndef bottom_to_top(deck):\n bottom=deck[len(deck)-1]\n count=0\n while deck[0]!=bottom:\n idx=rd.randrange(len(deck))\n tmp=deck[0]\n for i in range(idx):\n deck[i]=deck[i+1]\n deck[idx]=tmp\n count+=1\n return count\n\ndeck=[x for x in range(52)]\nrd.shuffle(deck)\n\ntotal=0\nfor _ in range(1000):\n total+=bottom_to_top(deck)\n\nprint(int(total/1000))\n","repo_name":"jd1618/Quant-Projects","sub_path":"Probability/bottom_to_top.py","file_name":"bottom_to_top.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25773480636","text":"import re\r\nimport json\r\nimport glob\r\nimport inspect\r\nimport typing\r\n\r\n# Source language used by the calling script for translatable strings. This should always be set.\r\n__source_lang = None\r\n\r\n# Target language to translate to. If this is the same as source_lang or is None,\r\n# the module will function as a passthrough and not translate anything.\r\n__target_lang = None\r\n\r\n# A dictionary to store loaded translation data. It will be populated by __load_translations(),\r\n# which is called by various functions in this module as needed. Data structure is as follows:\r\n# {\"Source phrase in source language\" : \"Translated version of source phrase\"}\r\n__translations: typing.Dict[str, str] = {}\r\n\r\n# A function to load a translation file from the source directory. A target_lang\r\n# must always be passed, as it will define the name of the file to be searched\r\n# for by this function--The full name of the file being target_lang + \".translation\"\r\ndef __load_translations(target_lang):\r\n \r\n # In this function we'll need access to the global var \"__translations\".\r\n global __translations\r\n \r\n # That being done, attempt to find the specified file.\r\n try:\r\n \r\n # This context handler tries to open the specified file, target_lang + \".translation\",\r\n # in the source directory. If successful, it loads the json in that file and stores\r\n # the resulting dict of translation data to the global var \"__translations\".\r\n with open(target_lang+\".translation\", \"r\", encoding='utf-8') as read_file:\r\n __translations = json.load(read_file)\r\n \r\n # If a matching filename is not found for the given target_lang,\r\n except FileNotFoundError:\r\n # Reraise an error denoting that the requested file is missing.\r\n raise FileNotFoundError(\"No translation file found for target language '\" + target_lang + \"'.\")\r\n \r\n # If an error occurs parsing the json in the file,\r\n except json.decoder.JSONDecodeError as err:\r\n # Reraise an error denoting the problem and its location within the file.\r\n raise json.decoder.JSONDecodeError(\"\\nCouldn't parse JSON in file '\" + target_lang + \".translation'. \"\r\n + \"Check to make sure it is not malformed.\\n\"\r\n +\"Location of error in JSON data file\", err.doc, err.pos)\r\n\r\n\r\n# The function used by the calling script to initialize the translation functions.\r\n# It will be passed a source_lang and optionally a target_lang, both strings.\r\n# If target_lang is None, the translator will be setup for passthrough of the source lang.\r\n\r\n# This function returns a reference to the function __gettext(), which does the actual translating;\r\n# thus when this translator() function is called, its return should be stored to \"_\" in\r\n# the calling script so translation can be accessed by the call _('Text to be translated.')\r\n\r\ndef get_lang_readable_name(lang):\r\n # Note: for english, which we don't expect there to\r\n # usually be a translation file for, bypass:\r\n if lang == 'en':\r\n return \"English\"\r\n \r\n # lang is a required field and must be a str of len 2.\r\n if type(lang) is not str or len(lang) != 2:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"source_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Otherwise, if the passed lang is valid,\r\n else:\r\n # Backup any current data in __translations so we can\r\n # restore it at the end of this function.\r\n global __translations\r\n temptrans = __translations\r\n \r\n # Load translation data for lang:\r\n __load_translations(lang)\r\n \r\n # Get the readable name of the language:\r\n output = __translations[\"__target_lang_readable\"]\r\n \r\n # restore the temporary translation data from earlier to its known good state:\r\n __translations = temptrans\r\n \r\n # Return\r\n return output\r\n \r\n\r\ndef translator(source_lang, target_lang=None):\r\n \r\n # Source lang is a required field and must be a str of len 2.\r\n if type(source_lang) is not str or len(source_lang) != 2:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"source_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Otherwise, if the passed source_lang is valid,\r\n else:\r\n # Store it to the global variable \"__source_lang\".\r\n # It is also stored here as lowercase to standardize\r\n # case for later matching.\r\n global __source_lang\r\n __source_lang = source_lang.lower()\r\n \r\n # Target language MAY be defined. If it is None, the translation function __gettext()\r\n # will act as a passthrough, returning the original source language text.\r\n \r\n # First, validate the target_lang as we did for the source_lang\r\n # (the rules are the same, except this time it can also be None).\r\n \r\n # Setup the global variable \"__target_lang\" to receive data,\r\n # if we should find valid data.\r\n global __target_lang\r\n \r\n # If no target_lang was passed,\r\n if target_lang is None:\r\n # set global \"__target_name\" to None\r\n __target_lang = None\r\n \r\n # If a valid target_lang string was passed,\r\n elif type(target_lang) == str and len(target_lang) == 2:\r\n # Store it to global \"__target_name\"\r\n # as a lowercase string, as before.\r\n __target_lang = target_lang.lower()\r\n \r\n else:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"target_lang must be either a two character string, or the value None.\")\r\n \r\n # Validation of parameters done. \r\n \r\n # Now, if we have a target language to translate to,\r\n if __target_lang != None and __target_lang != __source_lang:\r\n \r\n # attempt to load the translation file. this function will\r\n # raise an error if the specified file is not found.\r\n __load_translations(__target_lang)\r\n \r\n # If we don't have a target language to translate to,\r\n else:\r\n # Reset the global translations dict to empty.\r\n global __translations\r\n __translations = {}\r\n \r\n # Finally, return the function gettext. It should be assigned in the calling script to \"_\" so that\r\n # it can be called from anywhere using \"_()\"\r\n return __gettext\r\n\r\n# A function to translate text. When the user runs the translator() function,\r\n# a reference to this function is returned such that the user may assign that\r\n# reference to a variable and call it as needed, such as _(\"text to be translated\")\r\n# if the reccomended variable of \"_\" is used.\r\n\r\n# The sole parameter, source_phrase, is the incoming string to be translated\r\n# from __source_lang to __target_lang. It will be looked up in the dict and\r\n# translated on demand, or if no match is found a KeyError will be thrown.\r\ndef __gettext(source_phrase):\r\n \r\n # First, confirm that we have a valid __source_lang.\r\n if type(__source_lang) != str or len(__source_lang) != 2:\r\n \r\n # If not, raise an error.\r\n raise RuntimeError(\"The source language of your script has not been set to valid data. \"\r\n + \"Please make sure it is set in your calling script by first calling \"\r\n + \"the function translator() with a valid source_lang parameter.\")\r\n \r\n # Next, validate the __target_lang as well.\r\n if type(__target_lang) not in [str, type(None)] or \\\r\n __target_lang is not None and len(str(__target_lang)) != 2:\r\n \r\n # If it is set to invalid data, raise an error stating such.\r\n raise RuntimeError(\"The target language of your script has been set to a bad value. \"\r\n + \"Please make sure it is set in your calling script by first calling \"\r\n + \"the function translator() with a valid target_lang parameter, or \"\r\n + \"otherwise setting the target_lang to None.\")\r\n \r\n # Otherwise, if the data is valid,\r\n else:\r\n \r\n # If setup for passthrough,\r\n if __target_lang == None or __target_lang == __source_lang:\r\n \r\n # Return the string as-is, without translation.\r\n return source_phrase\r\n \r\n # If setup to translate,\r\n else:\r\n \r\n # Check the translation dict for the source_phrase as a key.\r\n if source_phrase in __translations.keys():\r\n \r\n # If found, return the translated phrase.\r\n return __translations[source_phrase]\r\n \r\n # If not found in the dict,\r\n else:\r\n \r\n # Raise an error stating such.\r\n raise KeyError(\"Source phrase '\" + source_phrase\r\n + \"' was not found in the translation file for target_lang '\"\r\n + __target_lang + \"'.\")\r\n\r\n\r\n# Function to return what language packs are installed.\r\n# These will be files in the source directory with the extension '.translation'.\r\ndef getlangs(source_lang):\r\n \r\n # source_lang should be a string and always should be provided to this function.\r\n if type(source_lang) is not str or len(source_lang) != 2:\r\n raise ValueError(\"source_lang must be a string of exactly two characters in length.\")\r\n \r\n # We always support the source language used in the script itself.\r\n # Convert it to lowercase, and store it at the top of the list.\r\n source_lang = source_lang.lower()\r\n langs = [source_lang]\r\n \r\n # Add to this list another list of files with the .translation extension\r\n # that were found in the source directory.\r\n langs = langs + glob.glob(\"*.translation\")\r\n \r\n # Trim all language strings to 2 characters.\r\n langs = [lang[:2] for lang in langs]\r\n \r\n # save the currently loaded translation data, for restoration later. \r\n global __translations\r\n temptrans = __translations\r\n \r\n # Iterate over all found langs to validate their data\r\n for lang in langs:\r\n \r\n # Bypass checks for the source language, as it won't need any translation data loaded.\r\n if lang == source_lang:\r\n continue\r\n \r\n # For other language packs found in the source directory:\r\n try:\r\n \r\n # try to load the translation file for the current lang.\r\n __load_translations(lang)\r\n \r\n # if that load was successful, confirm its \"__source_lang\" key matches our source_lang.\r\n if __translations[\"__source_lang\"].lower() != source_lang:\r\n \r\n # If the above validation failed, raise an error.\r\n raise RuntimeError(\"The translation file for '\" + lang + \"' defines a source language of '\"\r\n + __translations[\"__source_lang\"].lower() + \"' which does not match the \"\r\n + \"program's specified source language of '\" + source_lang + \"'.\")\r\n \r\n # If the load of translation data failed or was invalid:\r\n except RuntimeError as e:\r\n \r\n # Catch and reraise the above mismatched language error if it is what's being excepted:\r\n if \"does not match\" in str(e):\r\n raise e\r\n \r\n # If any other error was raised, reraise as a general loading error.\r\n else:\r\n raise RuntimeError(\"Could not load the translation file for '\" + lang\r\n + \"'. Please check the file for errors.\")\r\n \r\n # And no matter what happens above, always:\r\n finally:\r\n \r\n # restore the temporary translation data from earlier to its known good state.\r\n __translations = temptrans\r\n \r\n # Lastly, if all langs loaded successfully, return the list of available langs.\r\n return langs\r\n\r\n# Function to generate a skeleton template for translation files.\r\ndef get_data_template(source_lang, target_lang):\r\n # Source lang is a required field and must be a str of len 2.\r\n if type(source_lang) is not str or len(source_lang) != 2:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"source_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Otherwise, if the passed source_lang is valid,\r\n else:\r\n # Store it to the local variable \"__source_lang\".\r\n # It is also stored here as lowercase to standardize\r\n # case for later matching.\r\n source_lang = source_lang.lower()\r\n \r\n # Next, validate the target_lang as we did for the source_lang.\r\n \r\n # If a valid target_lang string was passed,\r\n if type(target_lang) == str and len(target_lang) == 2:\r\n # Store it to local \"target_name\"\r\n # as a lowercase string, as before.\r\n target_lang = target_lang.lower()\r\n \r\n else:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"target_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Validation of parameters done.\r\n \r\n # Get the source file for the calling script:\r\n caller = inspect.stack()[1].filename\r\n \r\n # Populate required special fields into a dict named data,\r\n # to which we will also add found translatable strings.\r\n data = {\r\n \"__source_lang\": source_lang,\r\n \"__target_lang\": target_lang,\r\n \"__target_lang_readable\": \"\"\r\n }\r\n \r\n # Get the text of the calling file:\r\n with open(caller, 'r') as file:\r\n script = file.read()\r\n \r\n # Create an iterator to find all matches in the code for the\r\n # following regex which will match strings formatted as _(\"Translatable\")\r\n matches = re.finditer(r'^.*_\\([\"\\'](.+?)[\"\\']\\).*$', script, re.M)\r\n \r\n # Iterate matches,\r\n for match in matches:\r\n \r\n # and if the line is not a comment,\r\n if re.search(r'^.*#', match.group(0), re.M) is None:\r\n \r\n # add the match group to the data dict.\r\n data.update({match.group(1) : \"\"})\r\n \r\n return json.dumps(data, indent=4)\r\n\r\n# Function to cleanup a translation file,\r\n# identifying unused translation data.\r\ndef cleanup_translation_data(target_lang):\r\n # lang is a required field and must be a str of len 2.\r\n if type(target_lang) is not str or len(target_lang) != 2:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"target_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Get the filename of the source (calling) script:\r\n caller = inspect.stack()[1].filename\r\n \r\n # Get the text of that file:\r\n with open(caller, \"r\") as file:\r\n script = file.read()\r\n \r\n scriptdata = []\r\n \r\n # Create an iterator to find all matches in the code for the\r\n # following regex which will match strings formatted as _(\"Translatable\")\r\n matches = re.finditer(r'^.*_\\([\"\\'](.+?)[\"\\']\\).*$', script, re.M)\r\n \r\n # Iterate matches,\r\n for match in matches:\r\n \r\n # and if the line is not a comment,\r\n if re.search(r'^.*#', match.group(0), re.M) is None:\r\n \r\n # add the match group to the scriptdata dict.\r\n scriptdata = scriptdata + [match.group(1)]\r\n \r\n # Before messing with translation data, make a backup:\r\n global __translations\r\n transbackup = __translations\r\n \r\n # Next, try to load translation data for target_lang\r\n __load_translations(target_lang)\r\n \r\n translationfiledata = list(__translations.keys())\r\n \r\n # Create a report to hold found errors:\r\n report = []\r\n \r\n # Iterate over data from translation file\r\n for phrase in translationfiledata:\r\n # Ignore special fields\r\n if phrase[:2] == \"__\":\r\n continue\r\n \r\n # Check if the phrase exists in the source code\r\n # as a translatable string\r\n if phrase not in scriptdata:\r\n \r\n # If it doesn't, add it to the report.\r\n report = report + [\"'\" + phrase + \"' was not found in the source code.\"]\r\n \r\n return \"\\n\".join(report)\r\n\r\n# Function to check that all translatable strings in the\r\n# source file have a translation for a given language.\r\ndef validate_translation_data(source_lang, target_lang):\r\n\r\n # Source lang is a required field and must be a str of len 2.\r\n if type(source_lang) is not str or len(source_lang) != 2:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"source_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Otherwise, if the passed source_lang is valid,\r\n else:\r\n # Store it to the local variable \"__source_lang\".\r\n # It is also stored here as lowercase to standardize\r\n # case for later matching.\r\n source_lang = source_lang.lower()\r\n \r\n # Next, validate the target_lang as we did for the source_lang.\r\n \r\n # If a valid target_lang string was passed,\r\n if type(target_lang) == str and len(target_lang) == 2:\r\n # Store it to local \"target_name\"\r\n # as a lowercase string, as before.\r\n target_lang = target_lang.lower()\r\n \r\n else:\r\n # If it is not valid, raise a ValueError.\r\n raise ValueError(\"target_lang must be a two character string, such as 'en' or 'pl'.\")\r\n \r\n # Validation of parameters done.\r\n \r\n # Get the filename of the source (calling) script:\r\n caller = inspect.stack()[1].filename\r\n \r\n # Get the text of that file:\r\n with open(caller, \"r\") as file:\r\n script = file.read()\r\n \r\n # Initialize a data dict with the required \"__source_lang\" and \"__target_lang\" fields:\r\n scriptdata = {\r\n \"__source_lang\": source_lang,\r\n \"__target_lang\": target_lang,\r\n \"__target_lang_readable\": \"\"\r\n }\r\n \r\n # Create an iterator to find all matches in the code for the\r\n # following regex which will match strings formatted as _(\"Translatable\")\r\n matches = re.finditer(r'^.*_\\([\"\\'](.+?)[\"\\']\\).*$', script, re.M)\r\n \r\n # Iterate matches,\r\n for match in matches:\r\n \r\n # and if the line is not a comment,\r\n if re.search(r'^.*#', match.group(0), re.M) is None:\r\n \r\n # add the match group to the scriptdata dict.\r\n scriptdata.update({match.group(1) : \"\"})\r\n \r\n # Get subset of special keys in dict:\r\n specialkeys = dict({(key, value) for key, value in scriptdata.items() if key[:2] == \"__\"})\r\n \r\n # Before messing with translation data, make a backup:\r\n global __translations\r\n transbackup = __translations\r\n \r\n # Next, try to load translation data for target_lang\r\n __load_translations(target_lang)\r\n \r\n # Create variables to hold reports of any missing strings\r\n # and incomplete/invalid translations found in the file:\r\n missings: typing.List[str] = []\r\n incompletes: typing.List[str] = []\r\n \r\n # For every translatable key in the source file:\r\n for key in scriptdata.keys():\r\n \r\n # If that key does not exist in the translation file as well,\r\n if key not in __translations.keys():\r\n \r\n # Add it to the missings report.\r\n missings = missings + [' \"' + key + '\": None,\\n']\r\n \r\n # Otherwise, if the key exists on file but doesn't have a valid translation:\r\n elif type(__translations[key]) != str or len(__translations[key].strip()) < 1:\r\n \r\n # Add it to the incompletes report.\r\n incompletes = incompletes + [\"(!) Source phrase '\" + key\r\n + \"' exists in file but has no valid translation to return.\"]\r\n \r\n # Build the final report of errors, if errors were found:\r\n if len(incompletes) > 0 or len(missings) > 0:\r\n \r\n # Add incompletes to the report.\r\n report = incompletes\r\n \r\n # If there are missings to add,\r\n if len(missings) > 0:\r\n \r\n # Add a header line for missings,\r\n report = report + [\"(!) Source phrase for the following keys does not exist on file:\"]\r\n \r\n # Then add the missings to the report.\r\n report = report + missings\r\n \r\n # Combine the report (currently a list of strings)\r\n # into one large string with newline splits.\r\n report = \"\\n\".join(report)\r\n \r\n # As cleanup, if missings were added, trim the last two\r\n # characters from the report; these will be \",\\n\" and\r\n # should be discarded for readability.\r\n if len(missings) > 0:\r\n report = report[:-2]\r\n \r\n # Finally, with errors found and the report built,\r\n # Raise it as a warning.\r\n raise RuntimeWarning(\"Errors found in translation data:\\n\" + report)\r\n \r\n \r\n # Before returning from the function,\r\n # restore the translation data backup to its\r\n # state before the function was run:\r\n __translations = transbackup\r\n \r\n # Finally, if we made it this far without errors, return a string\r\n # describing what data was found in the file.\r\n return \"Found \" + str(len(scriptdata) - len(specialkeys)) \\\r\n + \" unique translatable strings in file, plus \" \\\r\n + str(len(specialkeys)) + \" special keys. All translation data OK.\"\r\n\r\n\r\n","repo_name":"adyrosebrigg/localize.py","sub_path":"localize.py","file_name":"localize.py","file_ext":"py","file_size_in_byte":21609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23570776951","text":"T=int(input())\r\nfor I in range(T):\r\n\ttemp=input().split()\r\n\tN=int(temp[0])\r\n\tK=int(temp[1])\r\n\t\r\n\troom=[False for i in range(N+1)]\r\n\troom[N]=True\r\n\tfor i in range(K):\r\n\t\tLast=-1\r\n\t\tMin=0\r\n\t\tMax=0\r\n\t\tans=-1\r\n\t\t\r\n\t\tfor j in range(N):\r\n\t\t\tLs=j-Last-1\r\n\t\t\tif room[j]:\r\n\t\t\t\tLast=j\r\n\t\t\t\tcontinue\r\n\t\t\tRs=room[j+1:].index(True)\r\n\t\t\tif min(Ls,Rs)>Min:\r\n\t\t\t\tMin=min(Ls,Rs)\r\n\t\t\t\tMax=max(Ls,Rs)\r\n\t\t\t\tans=j\r\n\t\t\telif min(Ls,Rs)==Min:\r\n\t\t\t\tif max(Ls,Rs)>Max:\r\n\t\t\t\t\tMax=max(Ls,Rs)\r\n\t\t\t\t\tans=j\r\n\t\troom[ans]=True\r\n\t\t#print(room)\r\n\t\t\r\n\t\tif i==K-1:\r\n\t\t\tprint(\"Case #\"+str(I+1)+\": \"+str(Max)+\" \"+str(Min))\r\n\t\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2587.py","file_name":"2587.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23989182121","text":"import os\nfrom settings.common import *\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\nALLOWED_HOSTS = []\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# SECURITY WARNING: keep the secret key used in production secret!\n# pylint: disable=undefined-variable\nSECRET_KEY = '##DJANGO_SECRET##'\n\n# pylint: disable=undefined-variable\nDATABASES['default'] = {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'dbIntranet.sqlite3')\n}\n","repo_name":"wen96/django-boilerplate","sub_path":"my_project/settings/testing_ci.py","file_name":"testing_ci.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75242095874","text":"#Project Euler Problem 23\n#Find the sum of all numbers that cant be expressed as the sum of two abundant numbers\n#Abundant numbers: numbers for which the sum of their proper divisors is greater than the number itself\n\nimport os\nimport sys\nif(len(sys.argv) > 1):\n os.chdir(sys.argv[1])\nsys.path.append(\"../../Utils\")\nsys.path.append(\"Utils\")\n\nfrom itertools import combinations as combination\nimport prime_utils as p\nimport combinatorics_utils as c_utils\nutil = p.Prime_Utils()\n\nthreshold = 28134\n\nproper_divisor_sum = lambda n : sum(util.get_divisor_list(n)[:-1])\nabundant_numbers = [x for x in xrange(1,threshold) if x < proper_divisor_sum(x)]\nabundant_set = set(abundant_numbers)\n\ndef is_abundant_sum(n,abundant_numbers):\n i = 0\n while(abundant_numbers[i] <= n / 2):\n if(n - abundant_numbers[i] in abundant_set):\n return True\n i += 1\n return False\n\nprint(sum(x for x in xrange(1,threshold) if not is_abundant_sum(x,abundant_numbers)))\n","repo_name":"joewledger/ProjectEuler","sub_path":"Problems/Euler23/Euler23.py","file_name":"Euler23.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2040565588","text":"#\n# @lc app=leetcode id=138 lang=python\n#\n# [138] Copy List with Random Pointer\n#\n\n# @lc code=start\n# Definition for a Node.\nclass Node:\n def __init__(self, x, next=None, random=None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\nclass Solution(object):\n def copyRandomList(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n\n if not head:\n return None\n\n index = 0\n nodes = []\n curr = head\n while curr:\n nodes.append(Node(curr.val))\n curr.val = (curr.val, index)\n curr = curr.next\n index += 1\n\n index = 0\n curr = head\n while curr:\n if curr.random:\n nodes[index].random = nodes[curr.random.val[1]]\n\n nodes[index].next = nodes[index + 1] if index + 1 < len(nodes) else None\n \n index += 1\n curr = curr.next\n\n return nodes[0]\n\n \n# @lc code=end\n\n","repo_name":"aryanjain28/DSA","sub_path":"revision_150/138.copy-list-with-random-pointer.py","file_name":"138.copy-list-with-random-pointer.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22376629461","text":"import re\nimport unittest\n\n\ndef f(filename):\n with open(filename) as f:\n lines = [re.split(\", y=|\\\\.\\\\.\", x.strip()[15:]) for x in f.readlines()][0] # read the lines\n min_x, max_x, min_y, max_y = int(lines[0]), int(lines[1]), int(lines[2]), int(lines[3])\n\n def pass_by(v_x, v_y):\n cur_x, cur_y, h_max = 0, 0, 0\n while cur_x <= max_x and cur_y >= min_y: # time is running\n cur_x += v_x # update the x position\n cur_y += v_y # update the y position\n h_max = max(h_max, cur_y)\n if min_x <= cur_x <= max_x and min_y <= cur_y <= max_y:\n return (True, h_max)\n v_x = max(v_x - 1, 0)\n v_y -= 1\n return (False, h_max)\n\n x, y, h = 0, 0, 0\n direction = \"b\"\n while x <= max_x and y >= min_y:\n flag, h_max = pass_by(x, y)\n if flag:\n h = max(h_max, h)\n if direction == \"a\":\n y -= 1\n x += 1\n if y < 0:\n y = 0\n direction = \"b\"\n elif direction == \"b\":\n y += 1\n x -= 1\n if x < 0:\n x = 0\n direction = \"a\"\n\n # y_max = 0\n # v_x_opt = 0\n # v_y_opt = 0\n # for x in range(min_x, max_x + 1):\n # for y in range(min_y, max_y + 1):\n # for t in range(1, 1000):\n # if y == -9 and t == 9 and x == 21:\n # print(\"Here\")\n # v1 = x / t + -1 * (t - 1) # 0.5 *\n # v2 = y / t - 1 * (t - 1) # 0.5 *\n # if v2 * v2 / 2 > y_max:\n # y_max = v2 * v2 / 2\n # v_x_opt = v1\n # v_y_opt = v2\n\n return h\n\n\nclass Test(unittest.TestCase):\n def test(self):\n self.assertEqual(f('data/17-test.txt'), 45)\n self.assertEqual(f('data/17-input.txt'), 2628)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"w4bo/coding","sub_path":"adventofcode/2021/17/test_part1.py","file_name":"test_part1.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8614909576","text":"import frappe\nfrom frappe.modules.utils import sync_customizations\n\n\ndef execute():\n\tsync_customizations(\"bloomstack_core\")\n\ttimesheets = frappe.get_all(\"Timesheet Detail\" , fields=[\"name\", \"task\"])\n\n\tfor timesheet in timesheets:\n\t\ttask_name = frappe.db.get_value(\"Task\", timesheet.task, \"subject\")\n\t\tfrappe.db.set_value(\"Timesheet Detail\", timesheet.name, \"task_name\", task_name, update_modified=False)","repo_name":"Bloomstack/bloomstack_core","sub_path":"bloomstack_core/patches/v0_0_1/set_task_name_in_timesheet.py","file_name":"set_task_name_in_timesheet.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"14840559492","text":"from aiogram.types import ReplyKeyboardMarkup\n\nback_message = '­ЪЉѕ лЮл░лил░л┤'\n\n\ndef back_markup():\n markup = ReplyKeyboardMarkup(resize_keyboard=True, selective=True)\n markup.add(back_message)\n\n return markup\n\n\n\n\n\n\n","repo_name":"Mekan777-alt/Telegram_chat_bot","sub_path":"keyboards/buttons.py","file_name":"buttons.py","file_ext":"py","file_size_in_byte":226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5729316921","text":"# -*- coding: utf-8 -*-\n# **************************************************************************\n# *\n# * Authors: Emmanuel Moebel (emmanuel.moebel@inria.fr)\n# *\n# * Inria - Centre de Rennes Bretagne Atlantique, France\n# *\n# * This program is free software; you can redistribute it and/or modify\n# * it under the terms of the GNU General Public License as published by\n# * the Free Software Foundation; either version 2 of the License, or\n# * (at your option) any later version.\n# *\n# * This program is distributed in the hope that it will be useful,\n# * but WITHOUT ANY WARRANTY; without even the implied warranty of\n# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# * GNU General Public License for more details.\n# *\n# * You should have received a copy of the GNU General Public License\n# * along with this program; if not, write to the Free Software\n# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA\n# * 02111-1307 USA\n# *\n# * All comments concerning this program package may be sent to the\n# * e-mail address 'you@yourinstitution.email'\n# *\n# **************************************************************************\nfrom enum import Enum\nfrom pyworkflow.object import String\nfrom pyworkflow.protocol import params, PointerParam, STEPS_PARALLEL\nfrom pyworkflow.utils import removeBaseExt\nfrom pyworkflow.utils.properties import Message\nfrom tomo.constants import BOTTOM_LEFT_CORNER\nfrom tomo.objects import Coordinate3D, SetOfTomograms, SetOfCoordinates3D\nfrom tomo.protocols import ProtTomoPicking\nfrom deepfinder import Plugin\nfrom deepfinder.constants import *\nimport deepfinder.convert as cv\nfrom deepfinder.protocols import ProtDeepFinderBase\nimport os\nfrom tomo.utils import getObjFromRelation\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass DFClusterOutputs(Enum):\n coordinates = SetOfCoordinates3D\n\n\nclass DeepFinderCluster(ProtTomoPicking, ProtDeepFinderBase):\n \"\"\"This protocol analyses segmentation maps and outputs particle coordinates and class.\"\"\"\n\n _label = 'cluster'\n _possibleOutputs = DFClusterOutputs\n\n def __init__(self, **args):\n super().__init__(**args)\n self.stepsExecutionMode = STEPS_PARALLEL\n self.clusteringSummary = String()\n\n # --------------------------- DEFINE param functions ----------------------\n def _defineParams(self, form):\n form.addSection(label=Message.LABEL_INPUT)\n\n form.addParam('inputSegmentations', PointerParam,\n pointerClass='SetOfTomoMasks',\n label=\"Segmentation maps\",\n important=True,\n help='Please select the segmentation maps you would like to analyze.')\n form.addParam('cradius', params.IntParam,\n default=5,\n label='Clustering radius',\n important=True,\n help='Should correspond to average radius of target objects (in voxels)')\n form.addParallelSection(threads=4, mpi=1)\n\n # --------------------------- INSERT steps functions ----------------------\n def _insertAllSteps(self):\n tomoMasks = [tomoMask.clone() for tomoMask in self.inputSegmentations.get()]\n for ind, tomoMask in enumerate(tomoMasks):\n pid = self._insertFunctionStep(self.launchClusteringStep, tomoMask, prerequisites=[])\n self._insertFunctionStep(self.createOutputStep, tomoMask, ind, prerequisites=pid)\n\n # --------------------------- STEPS functions -----------------------------\n def launchClusteringStep(self, segm):\n logger.info(f'Clustering step of ---> {segm.getTsId()}')\n fname_objl = 'objl_' + removeBaseExt(segm.getFileName()) + '.xml'\n fname_objl = os.path.abspath(os.path.join(self._getExtraPath(), fname_objl))\n\n # Launch DeepFinder executable:\n deepfinder_args = '-l ' + segm.getFileName()\n deepfinder_args += ' -r ' + str(self.cradius)\n deepfinder_args += ' -o ' + fname_objl\n\n Plugin.runDeepFinder(self, 'cluster', deepfinder_args)\n\n def createOutputStep(self, segm, segmInd):\n logger.info(f'Generating the output of ---> {segm.getTsId()}')\n boxSize = 2 * self.cradius.get()\n # Convert DeepFinder annotation output to Scipion SetOfCoordinates3D\n coord3DSet = getattr(self, self._possibleOutputs.coordinates.name, None)\n if not coord3DSet:\n setSegmentations = self.inputSegmentations.get()\n tomograms = getObjFromRelation(setSegmentations, self, SetOfTomograms)\n coord3DSet = SetOfCoordinates3D.create(self.getPath(), template='coordinates%s.sqlite')\n coord3DSet.setName('Detected objects')\n coord3DSet.setPrecedents(tomograms)\n coord3DSet.setSamplingRate(setSegmentations.getSamplingRate())\n coord3DSet.setBoxSize(boxSize)\n\n clusteringSummary = ''\n # Get objl filename:\n fname_segm = os.path.splitext(segm.getFileName())\n fname_segm = os.path.basename(fname_segm[0])\n fname_objl = 'objl_' + fname_segm + '.xml'\n\n # Read objl:\n objl_tomo = cv.objl_read(os.path.abspath(os.path.join(self._getExtraPath(), fname_objl)))\n\n # Generate string for protocol summary:\n msg = 'Segmentation ' + str(segmInd + 1) + ': a total of ' + str(\n len(objl_tomo)) + ' objects has been found.'\n clusteringSummary += msg\n lbl_list = cv.objl_get_labels(objl_tomo)\n for lbl in lbl_list:\n objl_class = cv.objl_get_class(objl_tomo, lbl)\n msg = '\\nClass ' + str(lbl) + ': ' + str(len(objl_class)) + ' objects'\n clusteringSummary += msg\n clusteringSummary += '\\n'\n\n # Get tomo corresponding to current tomomask:\n tomo = segm.getTomogram()\n tomoId = segm.getTsId()\n\n for idx in range(len(objl_tomo)):\n x = objl_tomo[idx][DF_COORD_X]\n y = objl_tomo[idx][DF_COORD_Y]\n z = objl_tomo[idx][DF_COORD_Z]\n lbl = objl_tomo[idx][DF_LABEL]\n score = objl_tomo[idx][DF_SCORE]\n\n coord = Coordinate3D()\n coord.setVolume(tomo)\n coord.setPosition(x, y, z, BOTTOM_LEFT_CORNER)\n coord.setTomoId(tomoId)\n coord.setVolId(segmInd + 1)\n coord.setGroupId(lbl)\n coord.setScore(score)\n\n coord3DSet.append(coord)\n\n self._defineOutputs(**{self._possibleOutputs.coordinates.name: coord3DSet})\n self._defineSourceRelation(self.inputSegmentations, coord3DSet)\n\n self.clusteringSummary.set(clusteringSummary)\n self._store(self.clusteringSummary)\n\n # --------------------------- DEFINE info functions ---------------------- # TODO\n def _summary(self):\n \"\"\" Summarize what the protocol has done\"\"\"\n summary = []\n if self.isFinished():\n if self.clusteringSummary.get():\n summary.append(self.clusteringSummary.get())\n\n # if self._noAnnotations.get():\n # summary.append('NO OBJECTS WERE TAKEN.')\n\n return summary\n","repo_name":"scipion-em/scipion-em-deepfinder","sub_path":"deepfinder/protocols/protocol_cluster.py","file_name":"protocol_cluster.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"37590793271","text":"# ある店から最短で買えるもの\ndef min_store(t, s_i, ab, N): # 現在の時刻, お店, お店リスト, お店数\n if N == 1:\n min_t = ab[0][0] * t + ab[0][1] + 1\n buy_j = 1\n elif s_i + 1 == N:\n min_t = ab[s_i - 1][0] * t + ab[s_i - 1][1] + 1 # s_iから一つ移動したお店での購入時間\n buy_j = s_i-1\n else:\n min_t = ab[s_i+1][0] * t + ab[s_i+1][1] + 1 # s_iから一つ移動したお店での購入時間\n buy_j = s_i+1\n t += 1\n for j in range(1, N):\n if t != 0:\n if ab[j][0] * t + ab[j][1] + 1 < min_t:\n min_t = ab[j][0] * t + ab[j][1] + t\n buy_j = j\n else:\n pass\n next_t = min_t\n return next_t, buy_j # 購入後の時間, 買えたお店\n\n\nN, T = map(int, input().split())\nab = [list(map(int, input().split())) for i in range(N)]\n\nbuy = 0\nt = 0\ns_i = 0\n\nwhile t < T+0.5:\n next_t, buy_j = min_store(t, s_i, ab, N)\n print(next_t, buy_j)\n t += next_t\n s_i = buy_j\n buy += 1\n #print(t, s_i)\nbuy -= 1\nprint(buy)\n","repo_name":"dororich/AtCoder","sub_path":"日立製作所 社会システム事業部 プログラミングコンテスト2020/D - Manga Market.py","file_name":"D - Manga Market.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31896520506","text":"\"\"\"\r\nA perfect number is a number for which the sum of its proper divisors is\r\nexactly equal to the number. For example, the sum of the proper divisors\r\nof 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect\r\nnumber.\r\n\r\nA number n is called deficient if the sum of its proper divisors is less\r\nthan n and it is called abundant if this sum exceeds n.\r\n\r\nAs 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest\r\nnumber that can be written as the sum of two abundant numbers is 24. By\r\nmathematical analysis, it can be shown that all integers greater than 28123\r\ncan be written as the sum of two abundant numbers. However, this upper\r\nlimit cannot be reduced any further by analysis even though it is known that\r\nthe greatest number that cannot be expressed as the sum of two abundant\r\nnumbers is less than this limit.\r\n\r\nFind the sum of all the positive integers which cannot be written as the sum\r\nof two abundant numbers.\r\n\"\"\"\r\n\r\nimport time\r\nfrom itertools import combinations_with_replacement\r\nimport numpy\r\n\r\n\r\n# Forma correcta de hacerlo, increiblemente mas eficiente que las\r\n# anteriores (que son la misma de dos formas distintas)\r\ndef divisores_tres(n):\r\n divi = [1]\r\n s = 1\r\n fin = int(numpy.sqrt(n))\r\n if fin * fin == n:\r\n divi.append(fin)\r\n s = 0\r\n for i in range(2, fin + s):\r\n if n % i == 0:\r\n divi.append(i)\r\n divi.append(n // i)\r\n\r\n return divi\r\n\r\n\r\ndef numeros_abundantes(n):\r\n f = lambda x: sum(divisores_tres(x)) > x\r\n return [i for i in range(2, n + 1) if f(i)]\r\n\r\nN = 28123\r\nstart = time.time()\r\n\r\n\r\n## Aqui es donde pierdo el tiempo. Pero tambien es dificil\r\n## mejorar, los dos bucles son distintos (el primero es enoooorme)\r\nsuma_abundantes = [False] * (2 * N + 1)\r\na = numeros_abundantes(N)\r\nfor i in combinations_with_replacement(a, 2):\r\n suma_abundantes[sum(i)] = True\r\n\r\nprint(sum(i for i in range(1, N + 1) if not suma_abundantes[i]))\r\n\r\nprint(\"tiempo = {:.5f} seg\".format(time.time() - start))\r\n\r\n# 4179871\r\n# tiempo = 7.21484 seg\r\n","repo_name":"floppp/programming_challenges","sub_path":"project_euler/001-050/23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13421829288","text":"\nimport os\nimport sys\nimport shlex\nfrom subprocess import PIPE\n\nfrom process import spawn\n\nfrom .error import AnaGondaError\nfrom .base import AnaGondaContext\n\n_go_get = 'golang.org/x/tools/cmd/godoc'\n\n\nclass DocError(AnaGondaError):\n \"\"\"Fires on Doc errors\n \"\"\"\n\n\nclass Doc(AnaGondaContext):\n \"\"\"Context to run go doc cmd into anaconda_go\n \"\"\"\n\n def __init__(self, path, expr, private, env_ctx):\n self.path = path\n self.expr = expr\n self.private = private\n super(Doc, self).__init__(env_ctx, _go_get)\n\n def __enter__(self):\n \"\"\"Check binary existence and perform command\n \"\"\"\n\n super(Doc, self).__enter__()\n if not self._bin_found:\n raise DocError('{0} not found...'.format(self.binary))\n\n return self.doc()\n\n def doc(self):\n \"\"\"Run the doc command and return back the results as a string\n \"\"\"\n\n args = shlex.split('\\'{0}\\' doc {1}{2}'.format(\n self.binary, '-u ' if self.private else '', self.expr\n ))\n print(' '.join(args))\n godoc = spawn(\n args, stdout=PIPE, stderr=PIPE, env=self.env,\n cwd=os.path.dirname(self.path)\n )\n out, err = godoc.communicate()\n if err is not None and len(err) > 0:\n if sys.version_info >= (3,):\n err = err.decode('utf8')\n raise DocError(err)\n\n if sys.version_info >= (3,):\n out = out.decode('utf8')\n\n return out\n\n @property\n def binary(self):\n \"\"\"Return back the binary path\n \"\"\"\n\n return os.path.join(self.env['GOROOT'], 'bin', 'go')\n","repo_name":"DamnWidget/anaconda_go","sub_path":"plugin/handlers_go/anagonda/context/godoc.py","file_name":"godoc.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"61"} +{"seq_id":"23092032207","text":"import network\nimport time\nsta_if = network.WLAN(network.STA_IF)\nsta_if.active(True)\nsta_if.connect('Wi-Fi', '15721572')\n\ncounter = 15\nwhile not sta_if.isconnected():\n time.sleep(1)\n counter-=1\n if not counter:\n print(\"Can't connect to wi-fi network\")\n machine.deepsleep()\n\t\t\nprint(\"Connected to wi-fi network\")","repo_name":"dinartal/esp8266_micropython","sub_path":"connect_wifi.py","file_name":"connect_wifi.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29345346087","text":"\"\"\"\nA keyword argument is when you pass the value as a variable in the function\n\n\"\"\"\n\n\ndef describe_pet(animal_type, pet_name):\n \"\"\"Display information about pet\"\"\"\n print(f\"\\nMy favorite animal is a {animal_type}\")\n print(f\"My {animal_type}'s name is {pet_name.title()}\")\n\n\n# no matter in what order are you passing the arguments\n# those are identified by the keyword\n\ndescribe_pet(animal_type=\"hamster\", pet_name=\"twilio\")\ndescribe_pet(pet_name=\"sasha\", animal_type=\"dog\")\n","repo_name":"Any28Flo/python-crash-course","sub_path":"08_functions/05_keyword_arguments.py","file_name":"05_keyword_arguments.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14868317478","text":"import numpy as np\nimport sympy\n\nclass pulsemodref():\n def __init__(self, str_modfunc=None):\n \n #\n # Dictionary with string that matches to :\n # 1. the correct modulation function and \n # 2. a default list of arguments to feed the modulation function\n #\n self.dict_master = {'RECT': [self.f_rectangle, {'amp':1}],\n 'GAUS': [self.f_gaussian, {'center':0, 'sd':1, 'maxamp':1, 'norm':True}],\n 'SINC': [self.f_sinc, {'center':0, 'peaktozero':1, 'maxamp':1, 'norm':True}],\n 'HRM2': [self.f_hermite_poly2, {'center':0, 'sd_gauss':1, 'coeff':1, 'maxamp':1}]}\n \n # check is string is given upon object initialization \n if str_modfunc:\n self.reset_defaults(str_modfunc)\n else:\n self.modfunc = {}\n self.moddict = {}\n self.modstr = None\n \n def reset_defaults(self, str_modfunc):\n #\n # Input is a string with first 4 letters matching one of \n # the keys in self.dict_master\n #\n key2match = str_modfunc[:4].upper()\n if key2match in self.dict_master:\n self.modfunc = self.dict_master[key2match][0]\n self.moddict = self.dict_master[key2match][1]\n self.modstr = key2match\n else:\n raise ValueError('Specified key is not assigned to a function within puslemodfuncs')\n \n def f_gaussian(self, x, center=0, sd=1, maxamp=1, norm=False):\n #\n # To output a normalized Gaussian distribution\n # Set maxamp to either: 0 or None\n #\n\n if not maxamp:\n maxamp = 1/(sd*((2*np.pi)**0.5))\n elif norm:\n maxamp *= 1/(sd*((2*np.pi)**0.5)) \n return np.exp(-(1/2)*((x-center)/sd)**2)*maxamp\n\n def f_sinc(self, x, center=0, peaktozero=1, maxamp=1, norm=True):\n #\n # To output a normalized distribution\n # Set maxamp to either: 0 or None\n #\n if not maxamp:\n maxamp = 1\n elif norm:\n maxamp /= peaktozero\n return np.sinc((x-center)/peaktozero)*maxamp #note np.sinc(x) = sin(pi*x)/(pi*x)\n\n \n def f_hermite_poly2(self, x, center=0, sd_gauss=1, coeff=1, maxamp=1):\n #\n # To output a normalized distribution\n # Set maxamp to either: 0 or None\n #\n # Note: For now, code is only ready for polynomials up to order 2!\n #\n \n expr1 = np.exp(-(((x-center)/sd_gauss)**2)/2)*(1-coeff*(((x-center)/sd_gauss)**2)/2)\n normfactor = -((np.pi/2)**0.5)*sd_gauss*(coeff-2)\n if not maxamp:\n maxamp = 1\n \n return expr1*maxamp/normfactor\n \n \n def f_rectangle(self, x, amp):\n return amp*np.ones(np.asarray(x).size)\n \n\n \nclass chirpfuncs():\n def __init__(self, str_modfunc=None):\n \n #\n # Dictionary with string that matches to :\n # 1. the correct modulation function and \n # 2. a default list of arguments to feed the modulation function\n #\n self.dict_sympy = {}\n self.dict_sympy['RAMP'] = [self.f_ramp, {'slope':1, 'intercept':0}]\n \n self.dict_master = {}\n for key in self.dict_sympy:\n modfunc, dict_args = self.dict_sympy[key]\n self.dict_master[key] = {'expr': sympy.integrate(modfunc(dict_args), sympy.symbols('t'))}\n \n self.modfunc = self.eval_expr\n \n # check is string is given upon object initialization \n if str_modfunc:\n self.reset_defaults(str_modfunc)\n else:\n self.moddict = {}\n self.modfunc_args = None\n self.modfunc_sympy = None\n \n def reset_defaults(self, str_modfunc):\n #\n # Input is a string with first 4 letters matching one of \n # the keys in self.dict_master\n #\n key2match = str_modfunc[:4].upper()\n if key2match in self.dict_master:\n self.modstr = key2match\n self.modfunc_sympy = self.dict_sympy[key2match][0]\n self.modfunc_args = self.dict_sympy[key2match][1]\n self.moddict = self.dict_master[key2match]\n else:\n raise ValueError('Specified key is not assigned to a function within pulsemodfuncs')\n \n def eval_expr(self, expr, tval):\n return expr.subs(sympy.symbols('t'), tval)\n \n def f_ramp(self, dict_args):\n slope = dict_args['slope']\n intercept = dict_args['intercept']\n return slope*sympy.symbols('t')+intercept \n \n \n\n","repo_name":"jstangnv/NV_HyperfineDriving","sub_path":"BaseScripts_by_JJ/pulsemods.py","file_name":"pulsemods.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6846102700","text":"from django.urls import path,include\n\nfrom busdetails import views\n\nurlpatterns = [\n\n path('',views.BusCompaniesListView.as_view()),\n path('create/',views.BusCompaniesCreateView.as_view()),\n path('update//',views.BusCompaniesUpdateView.as_view()),\n path('delete//',views.BusCompaniesDeleteView.as_view()),\n]","repo_name":"sumonmd/Bus-Company-Details","sub_path":"busdetails/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2834933472","text":"#Напишите декоратор, который будет считать, сколько раз была вызвана декорируемая функция\n\ndef conter_dec(func):\n def wrapper(a):\n wrapper.count +=1\n res = func(a)\n name = func.__name__\n count = wrapper.count\n print(f'{name} была вызвана: {count} раз(а)')#.format(func.__name__, wrapper.count))\n return res\n wrapper.count=0\n return wrapper\n\n@conter_dec\ndef function(b):\n return b\nprint(function(2))\nprint(function(4))\nprint(function(7))\n\n\n# def counter_dec(n):\n# n=0\n# if True:\n# n+=1\n# print(f'функция вызвана {n} раз.')\n# return counter_dec\n#\n# @counter_dec\n# def function(arg):\n# x=2\n# print(arg,x)\n# function(5)\n\n\n ","repo_name":"Sergey-Malets/Homeworks","sub_path":"homework/hw_28_func.py","file_name":"hw_28_func.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3793864614","text":"import hashlib\nimport hmac\nimport json\nfrom operator import itemgetter\n\nimport aiohttp\nimport asyncio\n\n\nasync def fetch_url(url: str, header: dict, params: dict, req_method: str, signature: str) -> dict:\n params['signature'] = signature\n async with aiohttp.ClientSession() as session:\n if req_method == 'post':\n method = session.post\n elif req_method == 'get':\n method = session.get\n elif req_method == 'delete':\n method = session.delete\n async with method(url + '?' + Signature.generate_url(params), headers=header) as response:\n return json.loads(await response.text())\n\n\nclass Signature:\n @staticmethod\n def __order_params(data: dict) -> list[tuple[str, str]]:\n data = dict(filter(lambda el: el[1] is not None, data.items()))\n has_signature = False\n params = []\n for key, value in data.items():\n if key == 'signature':\n has_signature = True\n else:\n params.append((key, str(value)))\n\n params.sort(key=itemgetter(0))\n if has_signature:\n params.append(('signature', data['signature']))\n return params\n\n @staticmethod\n def generate_url(data: dict) -> str:\n ordered_data = Signature.__order_params(data)\n query_string = '&'.join([f\"{d[0]}={d[1]}\" for d in ordered_data])\n return query_string\n\n @staticmethod\n def _generate_signature(data: dict, secret_api) -> hmac:\n ordered_data = Signature.__order_params(data)\n query_string = '&'.join([f\"{d[0]}={d[1]}\" for d in ordered_data])\n m = hmac.new(secret_api.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)\n return m\n\n @staticmethod\n async def generate_query(request_method: str, endpoint: str, header: list, params: list, signs: list) -> tuple:\n signatures = [str(Signature._generate_signature(param, sign).hexdigest()) for param, sign in zip(params, signs)]\n headers = [{'Content-Type': 'application/json;charset=utf-8', 'X-MBX-APIKEY': x} for x in header]\n tasks = [fetch_url(endpoint, header, param, request_method, signature) for\n header, param, signature in zip(headers, params, signatures)]\n results = await asyncio.gather(*tasks)\n\n return results\n\n","repo_name":"Gifourm/tradeapp","sub_path":"src/exchange_api/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6621361272","text":"from time import time\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName('TEST').getOrCreate()\nst=time()\nps=spark.read.parquet('LBR M-18.gzip',inferSchema=True,header=True)\nprint(str(round((time()-st),3)))\nSM=spark.sparkContext.emptyRDD()\nps=ps.select(['Fiscal year/period', 'Order - Material (Key)',\n 'Order - Material (Text)', 'Order', 'Operation', 'Work Center', \n 'Standard Text Key', 'Operation Text', 'End Date',\n 'Operation Quantity', 'Hours Worked', 'Labor Rate', 'Labor Cost',\n 'Overhead Rate', 'Overhead Cost'])\nps=ps.na.drop(how='all')\nprint(SM)","repo_name":"AnveshJarabani/END-END-ETL-PIPELINES","sub_path":"DATA ETLS/PYSPARKTEST.py","file_name":"PYSPARKTEST.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3475156102","text":"from django import forms\r\n#from django.forms.util import ErrorList\r\n# from django.contrib.auth.models import User\r\n# from django.contrib.auth.forms import UserCreationForm\r\n# from django.contrib.auth.models import Group, Permission\r\n# from django.contrib.contenttypes.models import ContentType\r\n\r\nclass UpperCaseField(forms.CharField):\r\n \r\n \"\"\"\r\n Create custom form field that extends CharField and changes all characters \r\n is the field to uppercase\r\n \"\"\"\r\n \r\n def clean(self, value):\r\n try:\r\n return value.upper()\r\n except:\r\n raise forms.ValidationError\r\n\r\nclass AddNoteForm(forms.Form):\r\n\r\n \"\"\" \r\n Create form that allows to add new text note with at least 10 symbols\" \r\n \"\"\"\r\n\r\n note = UpperCaseField(\r\n widget=forms.Textarea(attrs={\r\n 'placeholder':'# newnote', \r\n 'class':'form-control',\r\n 'rows':5}),\r\n required=False)\r\n\r\n def clean_note(self):\r\n \"\"\" Raise ValidationError when the note contains less than 10 symbols\"\"\"\r\n message = self.cleaned_data['note']\r\n num_symbs = len(message)\r\n if num_symbs < 10:\r\n raise forms.ValidationError(\"Your note should contain at least 10 symbols!\")\r\n return message\r\n","repo_name":"Roman-Klimkevych/Roman_Klimkevych_test_7web","sub_path":"djangotest/apps/notes/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"572172597","text":"class Pila:\n def __init__(self):\n self.items = []\n\n def esta_vacia(self):\n return len(self.items) == 0\n\n def apilar(self, elemento):\n self.items.append(elemento)\n\n def desapilar(self):\n if self.esta_vacia():\n return None\n return self.items.pop()\n\n def ver_tope(self):\n if self.esta_vacia():\n return None\n return self.items[-1]\n\n\ndef imprimir_pila(pila):\n if pila.esta_vacia():\n print(\"La pila está vacía.\")\n else:\n print(\"Documentos en la pila:\")\n for documento in pila.items[::-1]:\n print(documento)\n\n\n# Crear la pila\npila_documentos = Pila()\n\n# a) Imprimir el listado de documentos actual de la pila\nimprimir_pila(pila_documentos)\n\n# b) Agregar documentos a la pila\npila_documentos.apilar(\"Informe Final\")\npila_documentos.apilar(\"Guia de Estudio\")\npila_documentos.apilar(\"Tesis 4\")\npila_documentos.apilar(\"Seminario Osorno\")\npila_documentos.apilar(\"Avance Tesis\")\npila_documentos.apilar(\"Proyecto Integrador\")\n\n# c) Obtener el último elemento superior de la pila\nultimo_documento = pila_documentos.ver_tope()\nprint(\"Último documento superior:\", ultimo_documento)\n\n# d) Eliminar el documento de la parte superior\npila_documentos.desapilar()\n\n# e) Imprimir la pila de documentos actualizada\nimprimir_pila(pila_documentos)\n\n# f) Verificar si la pila de documentos está vacía\nif pila_documentos.esta_vacia():\n print(\"La pila está vacía.\")\nelse:\n print(\"La pila no está vacía.\")\n","repo_name":"Benjaminguillermoburgos/Guias_ETD","sub_path":"Ej3_guia.py","file_name":"Ej3_guia.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74895639873","text":"\"\"\"The logs command.\"\"\"\n\nfrom .base import *\n\n\nclass Logs(Base):\n\n @staticmethod\n def display_logs(service, follow):\n command = [DOCKER_COMPOSE, \"logs\"]\n if follow:\n command.append(\"-f\")\n if service:\n command.extend(service)\n try:\n subprocess.call(command)\n except KeyboardInterrupt: # Need to just return from the subprocess\n return\n\n def run(self):\n follow = self.options['-f']\n if not self.options['']:\n Logs.display_logs(\"\", follow)\n else:\n Logs.display_logs(self.options[''], follow)\n","repo_name":"Fattouche/Stratocumulus","sub_path":"src/cumulus/commands/logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8354000965","text":"import pandas as pd\nimport numpy as np\n\ndef my_func(x):\n\tprint(f'Data frame shape is {x.shape}')\n\tprint(f'Data frame length is {len(x.T.columns.tolist())}')\n\tprint(f'Columns are: {x.columns}')\n\treturn\n#Example:\nvehicles_df=pd.read_excel('data/vehicles.xlsx')\nmy_func(vehicles_df)\n","repo_name":"Alvaru89/vehicles_insight","sub_path":"vehicles.py","file_name":"vehicles.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22802044835","text":"import tabula\n# from tabulate import tabulate\nimport io\nimport logging\nimport argparse\nimport datetime\n\n#For SCB\nimport scb_parser\n\n#For MoneyWiz\nimport webbrowser\nimport moneywiz_url_parser\n\nimport csv\n\n# Logger Setup\nlogging.basicConfig(format='%(levelname)s %(filename)s:%(lineno)s : %(message)s', level=logging.WARNING)\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# Arg Setup\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=\"Generate cost and usage report for the last 3 month grouped by service\")\n\nparser.add_argument('--infile', type=str, default=f'./data/AcctSt.pdf', help=\"Input file name\")\nparser.add_argument('--outfile', type=str, default=f'./data/acc_bnk_pst-{datetime.date.today()}.csv', help=\"Output file name\")\nparser.add_argument('--password', type=str, default=\"XXXXXXX\", help=\"PDF File Password\")\nparser.add_argument('--account', type=str, default=\"TEST\", help=\"MoneyWiz Account Name\")\nparser.add_argument('--currency', type=str, default=\"THB\", help=\"MoneyWiz Currency\")\nparser.add_argument('--save' , action=\"store_true\", help=\"Save MoneyWiz Transaction\")\nparser.add_argument('--csv', action=\"store_true\", help=\"Generate CSV, skip MoneyWiz URLs\")\nparser.add_argument('--debug', action=\"store_true\", help=\"Print debug info\")\nargs = parser.parse_args()\n\nif args.debug:\n logger.setLevel(logging.DEBUG)\n\nreport_file_name = args.outfile\n\n# Read PDF file\nsource = tabula.read_pdf(args.infile, pages=\"all\" , password=args.password, guess=False, stream=True, multiple_tables=True, pandas_options={'header': None})\n\n# Parse SCB Transactions\nscb_parsed_transaction_data = scb_parser.parse_transations(source=source)\ntotal = scb_parser.get_total(source=source)\nis_amounts_verified = scb_parser.verify_amounts(scb_parsed_transaction_data, total)\n\nif not is_amounts_verified:\n logger.error(\"Total amount is not equal to sum of transactions\")\n exit(1)\n\n\n# After we have to export it to CSV or try to execute MoneyWiz import URLs\n# We have some succcess with MoneyWiz URLs. Code below just works. We need another attemt to make it done.\n\n# Preprocess moneywiz_data using a list comprehension\npreprocessed_data_for_moneywiz = [\n {\n \"account\": args.account,\n \"amount\": row[\"Amount\"],\n \"currency\": args.currency,\n \"date\": row[\"Date\"] + \" \" + row[\"Time\"],\n \"payee\": row[\"Description\"],\n \"memo\": row[\"Notes\"] + \" Code:\" + row[\"Code/Channel\"],\n \"save\": args.save\n }\n for _, row in scb_parsed_transaction_data.iterrows()\n]\n\n# Preprocess csv_data using a list comprehension\npreprocessed_data_for_csv = [\n {\n \"Account\": args.account,\n \"Amount\": row[\"Amount\"],\n \"Date\": row[\"Date\"],\n \"Payee\": row[\"Description\"],\n \"Memo\": row[\"Notes\"] + \" Code: \" + row[\"Code/Channel\"] + \" Time: \" + row[\"Time\"],\n }\n for _, row in scb_parsed_transaction_data.iterrows()\n]\n\n# Process the preprocessed_data using get_moneywiz_url\nmoneywiz_urls = [\n moneywiz_url_parser.get_moneywiz_url(\n data[\"account\"],\n data[\"amount\"],\n data[\"currency\"],\n data[\"date\"],\n payee=data[\"payee\"],\n memo=data[\"memo\"],\n save=data[\"save\"]\n )\n for data in preprocessed_data_for_moneywiz\n]\n\n# Write transacations directly to MoneyWiz or create CSV file\nif args.csv:\n logger.debug(\"Generate CSV file\")\n with open(args.outfile, 'w', newline='') as csvfile:\n fieldnames = [\"Account\", \"Amount\", \"Date\", \"Payee\", \"Memo\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n # Write the header\n writer.writeheader()\n\n # Write the rows\n for data in preprocessed_data_for_csv:\n writer.writerow(data)\n\nelse:\n logger.debug(\"Open MoneyWiz URLs\")\n for url in moneywiz_urls:\n logger.debug(url)\n webbrowser.open(url)\n\n","repo_name":"cageyv/scb-to-moneywiz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23582806631","text":"from sys import stdin, stdout, setrecursionlimit\r\nfrom copy import deepcopy\r\nsetrecursionlimit(10000)\r\n\r\nmoves = {\r\n 0: [1, 2, 4],\r\n 1: [0, 2, 5],\r\n 2: [0, 1, 3],\r\n 3: [2],\r\n 4: [0],\r\n 5: [1],\r\n}\r\n\r\nto_char = \"RYBOGV\"\r\n\r\ndef solve(colour_counts):\r\n N, R, O, Y, G, B, V = colour_counts\r\n colours = {}\r\n for i, count in enumerate([R, Y, B, O, G, V]):\r\n colours[i] = count\r\n\r\n start = None\r\n for c in [3, 4, 5]:\r\n if colours[c] != 0:\r\n start = c\r\n break\r\n if not start:\r\n for c in [0, 1, 2]:\r\n if colours[c] != 0:\r\n start = c\r\n break\r\n placements = [start]\r\n colours[start] -= 1\r\n\r\n for _ in range(N-1):\r\n next_options = moves[placements[-1]]\r\n if len(next_options) == 1:\r\n next_option = next_options[0]\r\n if colours[next_option] == 0:\r\n return \"IMPOSSIBLE\"\r\n\r\n else:\r\n if colours[next_options[-1]] != 0:\r\n next_option = next_options[-1]\r\n else:\r\n cc1 = colours[next_options[0]]\r\n cc2 = colours[next_options[1]]\r\n if not cc1 and not cc2:\r\n return \"IMPOSSIBLE\"\r\n\r\n if (cc1 == cc2):\r\n if next_options[0] in moves[placements[0]]:\r\n next_option = next_options[1]\r\n else:\r\n next_option = next_options[0]\r\n elif cc1 > cc2:\r\n next_option = next_options[0]\r\n else:\r\n next_option = next_options[1]\r\n\r\n placements.append(next_option)\r\n colours[next_option] -= 1\r\n\r\n if placements[-1] not in moves[placements[0]]:\r\n return \"IMPOSSIBLE\"\r\n\r\n return \"\".join(to_char[c] for c in placements)\r\n\r\n\r\nT = int(stdin.readline())\r\n\r\nfor t in range(T):\r\n colour_counts = map(int, stdin.readline().strip().split())\r\n\r\n result = solve(colour_counts)\r\n\r\n stdout.write(\"Case #{}: {}\\n\".format(t+1, result))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_207/271.py","file_name":"271.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24463644321","text":"from sklearn.neural_network import MLPRegressor\nfrom sklearn.linear_model import LinearRegression, SGDRegressor, PassiveAggressiveRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.multioutput import MultiOutputRegressor\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.base import BaseEstimator, ClassifierMixin\n\nclass MyEnsembleRegressor(BaseEstimator, ClassifierMixin):\n\tdef __init__(self, M=10, sampling_method=None, eta=None):\n\t\tself.M = M\n\t\tself.sampling_method = sampling_method\n\t\tself.eta = eta\n\t\tself.base_learners = []\n\t\t\n\t\tfor m in range(self.M):\n\t\t\t#self.base_learners.append(SGDRegressor(loss='squared_loss', l1_ratio=1, alpha=0.1))\n\t\t\tself.base_learners.append(SGDRegressor(loss='squared_loss', l1_ratio=1, alpha=0.05))\n\t\t\t#self.base_learners.append(MLPRegressor())\n\t\n\tdef fit(self, X, y):\n\t\tself.X_ = X\n\t\tself.y_ = y\n\t\t\n\t\tN = X.shape[0]\n\t\tdata_indices = range(0,N)\n\t\t\n\t\tfor m in range(self.M):\n\t\t\t\n\t\t\tif self.sampling_method == 'subsampling':\n\t\t\t\tNsub = int(np.round(self.eta*N))\n\t\t\t\tsample_indices = np.random.choice(data_indices, Nsub, replace=False)\n\t\t\t\tX_base = X[sample_indices, :]\n\t\t\t\ty_base = y[sample_indices]\n\t\t\t\tself.base_learners[m].fit(X_base, y_base)\n\t\t\telif self.sampling_method == 'bootstrap':\n\t\t\t\tsample_indices = np.random.choice(data_indices, N, replace=True)\n\t\t\t\t#print(X.shape)\n\t\t\t\t#print(y.shape)\n\t\t\t\tX_base = X[sample_indices, :]\n\t\t\t\ty_base = y[sample_indices]\n\t\t\t\tself.base_learners[m].fit(X_base, y_base)\n\t\t\telse:\n\t\t\t\tself.base_learners[m].fit(X,y)\n\t\t\t\n\t\treturn self\n\t\n\tdef partial_fit(self, X, y):\n\t\tfor m in range(self.M):\n\t\t\tself.base_learners[m].partial_fit(X,y)\n\t\treturn self\n\t\n\tdef predict(self, X):\n\t\tpreds = np.zeros((X.shape[0],self.M), dtype=float)\n\t\tfor m in range(self.M):\n\t\t\tpreds[:,m] = self.base_learners[m].predict(X)\n\t\tpred_mean = np.mean(preds, axis=1)\n\t\tpred_std = np.std(preds, axis=1)\n\t\t\n\t\treturn pred_mean, pred_std\n\nclass MyMultiOutputRegressor(BaseEstimator, ClassifierMixin):\n\tdef __init__(self, num_outs):\n\t\tself.num_outputs = num_outs\n\t\tself.output_regressors = []\n\t\tfor k in range(self.num_outputs):\n\t\t\tself.output_regressors.append(MyEnsembleRegressor(M=10, sampling_method='subsampling', eta=0.2))\n\t\t\t#self.output_regressors.append(BaggingRegressor(base_estimator=SGDRegressor(), n_estimators=30))\n\n\tdef fit(self, X, y):\n\t\tself.X_ = X\n\t\tself.y_ = y\n\t\tfor k in range(self.num_outputs):\n\t\t\tself.output_regressors[k].fit(X,y[:,k])\n\t\treturn self\n\t\n\tdef partial_fit(self, X, y):\n\t\tfor k in range(self.num_outputs):\n\t\t\tself.output_regressors[k].partial_fit(X,y[:,k])\n\t\treturn self\n\t\n\tdef predict(self, X):\n\t\tpred_means = np.zeros((X.shape[0],self.num_outputs), dtype=float)\n\t\tpred_stds = np.zeros((X.shape[0],self.num_outputs), dtype=float)\n\t\t\n\t\tfor k in range(self.num_outputs):\n\t\t\tcurr_mean, curr_std = self.output_regressors[k].predict(X)\n\t\t\tpred_means[:,k] = curr_mean\n\t\t\tpred_stds[:,k] = curr_std\n\t\t\n\t\treturn pred_means, pred_stds\t\n\t\t\n\n","repo_name":"wpower12/DroneSimV2","sub_path":"SwarmSim/Regressors/MyMultiOutputRegressor.py","file_name":"MyMultiOutputRegressor.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"44004105663","text":"from logging import getLogger\n\nfrom pyramid.i18n import TranslationStringFactory\n\n\nlogger = getLogger(__name__)\n_ = TranslationStringFactory('arche_pas_social')\n\n\ndef includeme(config):\n try:\n config.include('.portlet')\n except (ImportError, AttributeError):\n logger.warn(\"Can't enable portlet - Arche is probably not installed or included.\")\n config.include('.views')\n config.include('.models')\n","repo_name":"ArcheProject/arche_pas_social","sub_path":"arche_pas_social/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34903122873","text":"\"\"\"Gui\n\nThis file contains the app (Tk) class, and the main method which starts the GUI and serial communication.\n\nRunning both simultaneously makes use of multithreading.\nIf serial communication is available, a separate thread is started\nfrom file_handler.py to run the serial communication.\n\nThis file is the starting point of the app. It creates the folder and files sensor readings will be stored into,\nstarts serial communication with the Arduino Micro, and starts the app.\nIt also defines all methods necessary for runtime app use.\n\"\"\"\nimport sys\nimport threading\nimport tkinter as tk\nimport matplotlib\n\nimport file_handler as fh\nimport pages as pg\nimport constants\n\nmatplotlib.use(\"TkAgg\")\n\n\nclass SensorCentral(tk.Tk):\n \"\"\"\n A class used to create the root of Tk interface.\n\n Attributes\n ----------\n frames : dict\n Contains all pages in app.\n key - SensorPage or StartPage (child) object\n value - instance of that page in app\n\n Methods\n -------\n show_frame(self, content)\n Raise the frame passed as 'content'.\n\n app_update(self)\n Call update on start page and check for door/window openings.\n As specified in main, this is called every START_UPDATE_INTERVAL_SECS s.\n Additionally, this is called on every update called from update page.\n\n sensor_update(self)\n Call update on sensor pages.\n As specified in main, this is called every SENSOR_UPDATE_INTERVAL_SECS s.\n Additionally, this is called on every update called from update page.\n \"\"\"\n\n frames = {}\n\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n\n tk.Tk.wm_title(self, constants.APP_NAME)\n\n # make app window as big as screen\n w, h = self.winfo_screenwidth(), self.winfo_screenheight()\n self.geometry(\"%dx%d+0+0\" % (w, h))\n\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n # wait for file inputs to stabilise if serial connection available\n if fh.check_serial_connection():\n fh.wait_for_file_input(constants.dps310_temp_csv)\n fh.wait_for_file_input(constants.tmp116_csv)\n fh.wait_for_file_input(constants.hdc2010_temp_csv)\n fh.wait_for_file_input(constants.hdc2010_hum_csv)\n\n for page in [pg.StartPage, pg.TMP116Page, pg.HDC2010Page, pg.OPT3001Page, pg.DPS310Page, pg.UpdatePage]:\n frame = page(container, self)\n self.frames[page] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(pg.StartPage)\n\n def show_frame(self, content):\n frame = self.frames[content]\n frame.tkraise()\n\n def app_update(self):\n pg.StartPage.update_start_data(self.frames[pg.StartPage])\n self.pressure_update()\n\n def sensor_update(self):\n pg.TMP116Page.update_data(self.frames[pg.TMP116Page],\n [constants.tmp116_csv], [constants.temp_string], [constants.temp_measurement],\n [constants.temp_name])\n\n pg.HDC2010Page.update_data(self.frames[pg.HDC2010Page],\n [constants.hdc2010_temp_csv, constants.hdc2010_hum_csv],\n [constants.temp_string, constants.hum_string],\n [constants.temp_measurement, constants.hum_measurement],\n [constants.temp_name, constants.hum_name], 3)\n\n pg.OPT3001Page.update_data(self.frames[pg.OPT3001Page],\n [constants.opt3001_csv], [constants.light_string], [constants.light_measurement],\n [constants.light_name], 4)\n\n pg.DPS310Page.update_data(self.frames[pg.DPS310Page],\n [constants.dps310_temp_csv, constants.dps310_pressure_csv],\n [constants.temp_string, constants.pressure_string],\n [constants.temp_measurement, constants.pressure_measurement],\n [constants.temp_name, constants.pressure_name], 5)\n\n def pressure_update(self):\n time = fh.check_pressure_diffs()\n if time != '':\n pg.StartPage.update_doors_message(self.frames[pg.StartPage], time)\n pg.StartPage.update_start_data(self.frames[pg.StartPage])\n\n\ndef call_repeatedly(interval, func, *args):\n \"\"\" Call func(*args) every {interval} seconds. \"\"\"\n\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval): # the first call is in `interval` secs\n func(*args)\n\n threading.Thread(target=loop, daemon=True).start()\n return stopped.set\n\n\nif __name__ == '__main__':\n fh.folder_prep() # prepare csv folder\n fh.connect_to_serial() # start serial communication if available\n\n app = SensorCentral() # start the app\n\n if fh.check_serial_connection():\n cancel_future_calls = call_repeatedly(constants.START_UPDATE_INTERVAL_SECS,\n app.app_update, ) # call for repeated app update and door open checks\n cancel_sensor_calls = call_repeatedly(constants.SENSOR_UPDATE_INTERVAL_SECS,\n app.sensor_update, ) # call for repeated sensor page updates\n\n app.iconbitmap(constants.ICON_PATH) # set app icon\n\n app.mainloop() # enter main app loop after repeated calls instantiated\n\n sys.exit() # exit program after window closes\n","repo_name":"lmlcvc/rpm-projekt","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4855128732","text":"# graph node\nclass Node():\n def __init__(self, num):\n self.num = num\n self.connect_node_list = []\n def __call__(self, num):\n self.num = num\n self.connect_node_list = []\n def append_connect_node(self, node):\n self.connect_node_list.append(node)\n\nif __name__ == '__main__':\n num_of_cases = int(input())\n for case_i in range(num_of_cases):\n num_list = []\n node_list = []\n num_of_edges = int(input())\n # create num_list + node_list\n for edge_i in range(num_of_edges):\n input_str = input()\n n1, n2 = input_str[1], input_str[3]\n n1, n2 = int(n1), int(n2)\n if n1 not in num_list:\n num_list.append(n1)\n node_list.append(Node(n1))\n if n2 not in num_list:\n num_list.append(n2)\n node_list.append(Node(n2))\n\n node_list[num_list.index(n1)].append_connect_node(node_list[num_list.index(n2)])\n node_list[num_list.index(n2)].append_connect_node(node_list[num_list.index(n1)])\n\n # test\n #for i in range(len(num_list)):\n # print(num_list[i], node_list[i].connect_node_list[0].num)\n\n # dfs\n connect_node_list_stack = []\n hamilton_circuit = []\n hamilton_circuit.append(node_list[0])\n while True:\n if len(hamilton_circuit) == len(node_list) and hamilton_circuit[0] in hamilton_circuit[-1].connect_node_list: # if hamilton circuit done\n print('True')\n break\n\n new_list = hamilton_circuit[-1].connect_node_list[:] # call by value, don't change connect_node_list!\n connect_node_list_stack.append(new_list)\n\n #print('->', connect_node_list_stack)\n while connect_node_list_stack[-1] == []: # if there are no neighbors\n connect_node_list_stack.pop()\n hamilton_circuit.pop()\n if hamilton_circuit == []: # if dfs no found\n print('False')\n break\n if_break_flag = False\n while connect_node_list_stack[-1][-1] in hamilton_circuit: # if node is already in circuit\n connect_node_list_stack[-1].pop()\n while connect_node_list_stack[-1] == []: # if there are no neighbors\n connect_node_list_stack.pop()\n hamilton_circuit.pop()\n if hamilton_circuit == []: # if dfs no found\n print('False')\n if_break_flag = True\n break\n if if_break_flag:\n break\n if if_break_flag:\n break\n hamilton_circuit.append(connect_node_list_stack[-1].pop()) # if connect_node_list_stack[-1][-1] not in circuit\n\n #for node in hamilton_circuit:\n # print('-->', node.num)\n del num_list\n del node_list","repo_name":"110621013/algorithm","sub_path":"hw/HW11_0608/Hamilton.py","file_name":"Hamilton.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22196617230","text":"import requests\nfrom microbrewforyou_app.forms import LoginForm, SignupForm, PostForm,\\\n EditUserForm, PicForm\nfrom microbrewforyou_app.models import CustomUser, Posts, BrewTypes, Breweries\nfrom django.views.generic.base import View\nfrom django.http import HttpResponse\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.shortcuts import render, HttpResponseRedirect, reverse, redirect\nfrom django.conf.urls.static import static\n\n\nclass BreweriesReloadView(View):\n def get(self, request):\n if request.user.is_superuser:\n full_breweries_list = []\n r = requests.get(\n url='https://raw.githubusercontent.com/openbrewerydb/openbrewerydb/master/breweries.json')\n full_breweries_list = r.json() # populate variable from api\n current_breweries_in_model = Breweries.objects.all() # in model\n print('Api brewery master list count: ', len(full_breweries_list))\n print('Model Brewery list count start: ',\n len(current_breweries_in_model))\n full_match = False\n for item in full_breweries_list:\n list_item_name = item['name']\n list_item_city = item['city']\n for model_item in current_breweries_in_model:\n if list_item_name == model_item.name and\\\n list_item_city == model_item.city:\n full_match = True\n break # match found break out of for loop for model\n else:\n full_match = False\n continue\n\n if full_match is False:\n new_brewery = Breweries.objects.create(\n name=item['name'],\n phone=item['phone'],\n address=item['street'],\n city=item['city'],\n state=item['state'],\n website=item['website_url']\n )\n else:\n continue\n current_breweries_in_model = Breweries.objects.all()\n print('Model Brewery list count end: ',\n len(current_breweries_in_model))\n return HttpResponseRedirect(reverse(\"homepage\"))\n\n return HttpResponseRedirect(reverse(\"homepage\"))\n\n\nclass IndexView(View):\n def get(self, request):\n words_quote = \"They who drink beer, think beer.\"\n words_author = \"Washington Irving\"\n # all posts merge of posts\n all_posts = []\n # add if for anonymous user\n if request.user.is_anonymous:\n follow_count = 0\n following_users_list = []\n number_posts = 0\n suggested_posts = []\n brewery_list_by_city = []\n fav_breweries = []\n friends_list = []\n fav_breweries_count = 0\n else:\n follow_count = len(request.user.users_following.all())\n following_users_list = request.user.users_following.all()\n for user in following_users_list:\n user_posts = Posts.objects.filter(author=user.id)\n for post in user_posts:\n all_posts.append(post)\n user_posts = Posts.objects.filter(\n author=request.user)\n for post in user_posts:\n all_posts.append(post)\n # end all posts merge\n # suggested posts based on city and brew_types_liked merged\n suggested_posts = []\n suggested_posts_list = []\n suggested_users_list = CustomUser.objects.filter(\n city=request.user.city, state=request.user.state)\n for user in suggested_users_list:\n if user.id != request.user.id:\n if user not in request.user.users_following.all():\n suggested_posts.append(Posts.objects.filter(\n author=user.id))\n if suggested_posts:\n for post in suggested_posts:\n suggested_posts_list.append(post)\n suggested_posts = suggested_posts[0].all()\n else:\n suggested_posts = suggested_posts\n # end suggested\n # nearby breweries\n brewery_list_by_city = Breweries.objects.filter(\n city=request.user.city, state=request.user.state)\n # end nearby breweries\n # favorite breweries start\n fav_breweries = request.user.fav_breweries.all()\n fav_breweries_count = len(fav_breweries)\n # favorite breweries end\n # friends start\n friends_list = []\n for user in following_users_list:\n friends_list.append(user)\n # freinds end\n number_posts = len(Posts.objects.filter(author=request.user))\n\n return render(\n request, 'index.html',\n {'follow_count': follow_count,\n 'number_posts': number_posts,\n 'words_author': words_author,\n \"words_quote\": words_quote,\n 'all_posts': all_posts,\n 'suggested_posts': suggested_posts,\n 'brewery_list_by_city': brewery_list_by_city,\n 'fav_breweries': fav_breweries,\n 'friends_list': friends_list,\n 'fav_breweries_count': fav_breweries_count})\n\n\ndef login_view(request):\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n user = authenticate(request, username=data.get(\n \"username\"), password=data.get(\"password\"))\n if user:\n login(request, user)\n return HttpResponseRedirect(request.GET.get(\n 'next', reverse(\"homepage\")))\n form = LoginForm()\n words_quote = \"He was a wise man who invented beer.\"\n words_author = \"Plato\"\n return render(request, \"login.html\", {\"form\": form,\n 'words_author': words_author,\n \"words_quote\": words_quote})\n\n\ndef signup_view(request):\n if request.method == \"POST\":\n form = SignupForm(request.POST, request.FILES)\n if form.is_valid():\n data = form.cleaned_data\n new_user = CustomUser.objects.create_user(\n username=data.get(\"username\"), password=data.get(\n \"password\"), first_name=data.get(\n \"first_name\"), bio=data.get(\n \"bio\"), user_image=data.get(\n \"user_image\"), address=data.get(\n \"address\"), city=data.get(\n \"city\").title(), state=data.get(\n \"state\").title())\n login(request, new_user)\n return HttpResponseRedirect(reverse(\"homepage\"))\n\n form = SignupForm()\n words_quote = \"Beer, it’s the best damn drink in the world.\"\n words_author = \"Jack Nicholson\"\n return render(request, \"sign_up.html\", {\"form\": form,\n 'words_author': words_author,\n \"words_quote\": words_quote})\n\n\ndef success(request):\n return HttpResponse('successfully uploaded')\n\n\ndef edit_user_view(request, user_id):\n edit_user = CustomUser.objects.filter(id=user_id).first()\n if edit_user == request.user:\n if request.method == \"POST\":\n user_form = EditUserForm(request.POST, request.FILES)\n if len(request.FILES) == 0:\n updated_request_files = request.FILES.copy()\n updated_request_files.update(\n {'user_image': edit_user.user_image}\n )\n user_form = EditUserForm(request.POST, updated_request_files)\n if user_form.is_valid():\n data = user_form.cleaned_data\n edit_user.username = data.get('username')\n edit_user.password = edit_user.password\n edit_user.first_name = data.get('first_name')\n edit_user.bio = data.get('bio')\n edit_user.user_image = data.get('user_image')\n edit_user.address = data.get('address')\n edit_user.city = data.get('city').title()\n edit_user.state = data.get('state').title()\n # breakpoint()\n edit_user.save()\n login(request, edit_user)\n return HttpResponseRedirect(reverse(\"homepage\"))\n user_form = EditUserForm(initial={'username': edit_user.username,\n 'first_name': edit_user.first_name,\n 'bio': edit_user.bio,\n 'user_image': edit_user.user_image,\n 'address': edit_user.address,\n 'city': edit_user.city,\n 'state': edit_user.state})\n words_quote = \"A man who lies about beer makes enemies.\"\n words_author = \"Stephen King\"\n return render(request, \"edit_user.html\",\n {\"form\": user_form,\n \"profile_user\": request.user,\n 'words_author': words_author,\n \"words_quote\": words_quote})\n else:\n return HttpResponseRedirect(reverse(\n \"edit_userview\", args=[edit_user.id]))\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"homepage\"))\n\n\nclass AddPostView(View):\n def get(self, request):\n words_quote = \"For a quart of Ale is a dish for a king.\"\n words_author = \"William Shakespeare\"\n form = PostForm()\n return render(\n request, \"add_post.html\",\n {\"form\": form, \"profile_user\": request.user,\n 'words_author': words_author, \"words_quote\": words_quote}\n )\n\n def post(self, request):\n form = PostForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n new_post = Posts.objects.create(\n body=data.get('body'),\n author=request.user\n )\n return HttpResponseRedirect(\n reverse(\"postview\", args=[new_post.id])\n )\n words_quote = (\n \"Beer is proof that God loves us and wants us to be happy.\"\n )\n words_author = \"Benjamin Franklin\"\n return render(\n request, \"add_post.html\",\n {\"form\": form, \"profile_user\": request.user,\n 'words_author': words_author, \"words_quote\": words_quote}\n )\n\n\ndef post_detail_view(request, post_id):\n my_post = Posts.objects.filter(id=post_id).first()\n words_quote = \"Beer, it’s the best damn drink in the world.\"\n words_author = \"Jack Nicholson\"\n return render(\n request, \"post_detail.html\",\n {\"post\": my_post,\n 'words_author': words_author, \"words_quote\": words_quote}\n )\n\n\ndef edit_post_view(request, post_id):\n edit_post = Posts.objects.filter(id=post_id).first()\n if edit_post.author == request.user:\n if request.method == \"POST\":\n post_form = PostForm(request.POST)\n if post_form.is_valid():\n data = post_form.cleaned_data\n edit_post.body = data.get('body')\n edit_post.save()\n return HttpResponseRedirect(\n reverse(\"postview\", args=[edit_post.id]))\n post_form = PostForm(initial={'body': edit_post.body})\n words_quote = \"He was a wise man who invented beer.\"\n words_author = \"Plato\"\n return render(request, \"edit_post.html\",\n {\"form\": post_form, \"profile_user\": request.user,\n 'words_author': words_author,\n \"words_quote\": words_quote}\n )\n else:\n return HttpResponseRedirect(reverse(\n \"edit_postview\", args=[edit_post.id]))\n\n\nclass UserPostListView(View):\n def get(self, request):\n user_posts = Posts.objects.filter(\n author=request.user).order_by('postTime').reverse()\n return render(\n request, \"user_posts.html\",\n {\"user_posts\": user_posts}\n )\n\n\nclass FollowingView(View):\n def get(self, request, follow_id):\n add_user = CustomUser.objects.filter(id=follow_id).first()\n request.user.users_following.add(add_user)\n request.user.save()\n return HttpResponseRedirect(reverse(\n \"homepage\"))\n\n\nclass UnfollowingView(View):\n def get(self, request, unfollow_id):\n remove_user = CustomUser.objects.filter(id=unfollow_id).first()\n logged_in_user = request.user\n logged_in_user.users_following.remove(remove_user)\n logged_in_user.save()\n return HttpResponseRedirect(reverse(\n \"homepage\"))\n\n\nclass FavoriteBreweryView(View):\n def get(self, request, brewery_id):\n user_to_edit = CustomUser.objects.filter(id=request.user.id).first()\n brewery = Breweries.objects.filter(id=brewery_id).first()\n user_to_edit.fav_breweries.add(brewery)\n user_to_edit.save()\n return HttpResponseRedirect(reverse(\n \"homepage\"))\n\n\nclass UnfavoriteBreweryView(View):\n def get(self, request, brewery_id):\n user_to_edit = CustomUser.objects.filter(id=request.user.id).first()\n brewery = Breweries.objects.filter(id=brewery_id).first()\n user_to_edit.fav_breweries.remove(brewery)\n user_to_edit.save()\n return HttpResponseRedirect(reverse(\n \"homepage\"))\n\n\nclass UserDetailView(View):\n def get(self, request, user_id):\n selected_user = CustomUser.objects.filter(id=user_id).first()\n\n following_list = request.user.users_following.all()\n if selected_user.users_following:\n number_following = len(selected_user.users_following.all())\n else:\n number_following = 0\n user_posts = Posts.objects.filter(\n author=user_id).order_by('postTime').reverse()\n number_posts = len(user_posts)\n words_quote = \"For a quart of Ale is a dish for a king.\"\n words_author = \"William Shakespeare\"\n return render(\n request, \"user_detail.html\",\n {\"number_posts\": number_posts,\n \"selected_user\": selected_user,\n \"user_posts\": user_posts,\n \"following_list\": following_list,\n \"number_following\": number_following,\n 'words_author': words_author, \"words_quote\": words_quote}\n )\n\n\nclass BreweryDetailView(View):\n def get(self, request, brewery_id):\n words_quote = \"He was a wise man who invented beer.\"\n words_author = \"Plato\"\n brewery = Breweries.objects.filter(id=brewery_id).first()\n return render(request,\n \"brewery_detail.html\",\n {\"brewery\": brewery,\n 'words_author': words_author,\n \"words_quote\": words_quote}\n )\n\n\nclass FavoriteBreweriesView(View):\n def get(self, request, favorite_id):\n breweriesname = Breweries.objects.get(id=favorite_id)\n logged_in_user = request.user\n logged_in_user.fav_breweries.add(breweriesname)\n logged_in_user.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n\n\nclass NearbyBreweriesView(View):\n def get(self, request):\n brewery_list_by_city = Breweries.objects.filter(\n city=request.user.city).all()\n return render(\n request, \"nearby_breweries.html\",\n {\"brewery\": brewery_list_by_city})\n\n\nclass FollowingBrewTypesView(View):\n def get(self, request, follow_brew_type_id):\n brewtype = BrewTypes.objects.filter(id=follow_brew_type_id).first()\n request.user.fav_brewtypes.add(brewtype)\n brewtype.img_upload = static(f'images/{follow_brew_type_id}.JPG')\n request.user.save()\n return HttpResponseRedirect(reverse(\n \"homepage\"))\n\n\nclass UnFollowingBrewTypesView(View):\n def get(self, request, unfollow_brew_type_id):\n brewtype = BrewTypes.objects.filter(id=unfollow_brew_type_id).first()\n request.user.fav_brewtypes.remove(brewtype)\n request.user.save()\n return HttpResponseRedirect(reverse(\n \"homepage\"))\n\n\ndef brewtypes_view(request):\n words_quote = \"A man who lies about beer makes enemies.\"\n words_author = \"Stephen King\"\n return render(request, \"brew_type_list.html\",\n {\"words_author\": words_author,\n \"words_quote\": words_quote})\n\n\ndef pic_form_view(request, brew_type_id):\n edit_brew_type = BrewTypes.objects.filter(id=brew_type_id).first()\n\n if request.method == 'POST':\n form = PicForm(request.POST, request.FILES)\n\n if form.is_valid():\n data = form.cleaned_data\n edit_brew_type.img_upload = data.get(img_upload)\n form.save()\n return redirect('success')\n else:\n form = PicForm()\n return render(request, 'favorite_brew_type.html', {'form': form})\n\n\ndef error404view(request, exception):\n return render(request, '404.html')\n\n\ndef error403view(request, exception):\n return render(request, '403.html')\n\n\ndef error500view(request):\n return render(request, '500.html')\n","repo_name":"mavwatts/MicroBrewForYou","sub_path":"microbrewforyou_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30580639759","text":"\"\"\"\n@File : WeChat-version-Android-HUAWEI-查看朋友圈.py\n@APPTYPE : Android\n\n@Modify Time @Author @Version @Action\n------------ ------- -------- -----------\n2022/3/9 9:36 xyhu 8.0.19 500\n\"\"\"\nimport time\nfrom selenium.webdriver.common.by import By\nimport WeChat_Base as WeChat\n\n\ndef moments():\n driver = WeChat.driver\n WeChat.tap_once(722, 2546, 808, 2601, 20)\n time.sleep(2)\n el1 = driver.find_element(By.XPATH, '//android.widget.TextView[@text=\"朋友圈\"]')\n el1.click()\n time.sleep(2)\n WeChat.swipe_up(20)\n time.sleep(2)\n\n\nmoments()\n","repo_name":"Lucky-Yuu/Auto","sub_path":"微信/WeChat-version-Android-查看朋友圈.py","file_name":"WeChat-version-Android-查看朋友圈.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"3342622554","text":"from machine import Timer, RTC, Pin\nimport NeoPixel\nfrom music import Song\nfrom touch import Touch\nfrom time import time\ntry:\n import umath as mathj\nexcept ImportError:\n import math\ntry:\n from asyn import Semaphore\nexcept ImportError:\n from asyncio import Semaphore\ntry:\n import uasyncio as asyncio\nexcept ImportError:\n import asyncio\n\nfrom animations.fire import Fire\n\n\nclass Note:\n \"\"\"\n Beats for a single note\n \"\"\"\n def __init__(self, duration, steps=34):\n self.duration = duration\n self.steps = steps\n self.dt_tick = steps/duration\n self.t_start = time()\n self.pulses = []\n self.positions = {}\n\n # def add_pulse(self, note):\n # self.pulses[note[0]] = 0\n\n async def pulse(self, note):\n ts = note[0]\n self.pulses.append(ts)\n duration = self.duration\n time_wait = (ts + self.t_start) - time()\n if time_wait >= 0:\n await asyncio.sleep(time_wait-duration)\n\n steps = self.steps\n for i in range(steps):\n self.positions[ts] = i\n print(i, ts, note[1])\n await asyncio.sleep(duration/steps)\n\n\nclass GameRound:\n def __init__(self, gracetime=0.1):\n self.score = 0\n self.multiplier = bytearray(1)\n self.combo_count = bytearray(1)\n self.t_start = time()\n self.gracetime = gracetime\n self.pulses = {}\n self.np_notes = {}\n self.np_notes[0] = NeoPixel(Pin(17), 34, timing=True)\n self.animations = []\n self.animation_bg = None\n\n def handle_touch(self, pos, ts):\n accepted = False\n note = self.pulses.get(pos)\n if len(note) > 0:\n pulse = note[0]\n if math.abs(pulse[0] - ts) < self.gracetime:\n accepted = True\n\n if accepted:\n self.combo_count += 1\n self.multiplier = max(self.combo_count % 10, 1)\n self.score += self.multiplier\n else:\n self.combo_count = 0\n self.animate_touch(pos, accepted)\n\n def animate_touch(self, pos, accepted):\n print(pos, accepted)\n\n async def refresh_leds(self, delay=100):\n sleep = asyncio.sleep\n while True:\n np = self.np_notes[0]\n\n anim_bg = self.animation_bg\n if anim_bg:\n np.buf = anim_bg.get_color_array()\n\n for note_pos, note in self.pulses.items():\n for i in note.positions:\n np[i] = (255, 0, 255)\n\n np.write()\n await sleep(delay)\n\n\nasync def consume(queue, callback):\n while True:\n item = await queue.get()\n await callback(item)\n\n\nasync def queue_put(queue, iterable):\n with iterable as it:\n async for n in it:\n await queue.put(n)\n\nasync def loop_stuff(animation):\n while True:\n np = await animation\n await asyncio.sleep(0.1)\n\nasync def report_touch(touch, delay=0.5):\n print(touch.istouch)\n await asyncio.sleep(delay)\n\ndef main():\n loop = asyncio.get_event_loop()\n queue = asyncio.Queue(maxsize=8)\n song = Song('dr_chaos')\n loop.create_task(queue_put(queue, song))\n pulse = Note()\n\n max_concurrent = 8\n for _ in range(max_concurrent):\n loop.create_task(consume(queue, pulse.pulse))\n\n fire_anim = Fire(num_leds=10)\n loop.create_task(loop_stuff(fire_anim))\n\n touch_pins = [1,2,3]\n touch = Touch(touch_pins)\n timer = Timer(0)\n timer.init(period=100, mode=Timer.PERIODIC, callback=touch.cb)\n loop.create_task(report_touch(touch))\n\n # import esp\n # esp.neopixel_write(pin, grb_buf, is800khz)\n # rtc = RTC()\n # rtc.datetime()\n loop.run_forever()\n\nif __name__ == \"__main__\":\n main()","repo_name":"SebastianRoll/rhyth_sic_game","sub_path":"rhyth_game/rhyth_game.py","file_name":"rhyth_game.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"37261491478","text":"############################\nfrom bluetooth import *\nfrom pynput import keyboard\nfrom random import randint\nimport time\nimport threading\n########################\nMAX_SPEED = 480 # <-- figure out how to make it get instantiated before call to main\nCUR_SPEED = 240\n############################\ndef formatData(data):\n p1 =1\n p2=data.index(\"]\")\n p3 = p2+2\n p4 = p3+data[p3:].index(\"]\")\n p5 = p4+2\n p6 = p5+data[p5:].index(\"]\")\n p7 = p6+2\n p8 = len(data)-1\n return data[p1:p2], data[p3:p4], data[p5:p6], data[p7:p8]\ndef on_press(key):\n global CUR_SPEED\n parsed_vals = \"[0][0]\"\n print(key)\n if key.char =='q':\n client_socket.send(key)\n client_socket.close()\n print(\"Socket Closed\")\n return False\n elif key == keyboard.Key.esc:\n client_socket.send(\"q\")\n client_socket.close()\n print(\"Socket Closed\")\n return False\n elif key.char== 'w':\n parsed_vals =parse_motor_values(CUR_SPEED, CUR_SPEED)\n elif key.char == 's':\n parsed_vals =parse_motor_values(-CUR_SPEED, -CUR_SPEED)\n elif key.char == 'a':\n parsed_vals =parse_motor_values(-CUR_SPEED, CUR_SPEED)\n elif key.char == 'd':\n parsed_vals =parse_motor_values(CUR_SPEED, -CUR_SPEED)\n elif key.char == '-':\n if(CUR_SPEED>0):\n CUR_SPEED -= 10\n elif key.char== '=':\n if(CUR_SPEED#################\n# Create the client socket\nclient_socket=BluetoothSocket( RFCOMM )\n\nclient_socket.connect((\"B8:27:EB:4A:A5:58\", 3))\nprint(\"Link Established\")\nprint(\"------------------------------------\")\n\ntry:\n with keyboard.Listener(on_press = on_press, on_release = on_release) as listener:\n listener.join()\n #s_thread = threading.Thread(target=sensor_thread)\n #s_thread.start()\n #k_thread = threading.Thread(target=keyboard_thread)\n #k_thread.daemon = True\n #k_thread.start()\n #s_thread.join()\n #k_thread.join()\nexcept Exception as e:\n print(str(e))\n print(\"Closing Client\")\n client_socket.close()\n","repo_name":"mvanderlyn27/robot-control","sub_path":"robot-control-client/robot-control-client.py","file_name":"robot-control-client.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33862878354","text":"from typing import List\n\n\nclass Solution:\n def triangularSum(self, nums: List[int]) -> int:\n if len(nums) == 1:\n return nums[0]\n res = []\n for i in range(0, len(nums) - 1):\n res.append((nums[i] + nums[i + 1]) % 10)\n return self.triangularSum(res)\n\n\nx = Solution()\nprint(x.triangularSum([1, 2, 3, 4, 5]))\n","repo_name":"TheFenrisLycaon/Competitive-Programming","sub_path":"LeetCode/traingular_sum.py","file_name":"traingular_sum.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25707938785","text":"import os\nimport cv2\nimport sys\nimport math\nimport scipy\nimport random\nimport rasterio\nimport numpy as np\nimport tifffile as tiff\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom random import seed\nfrom glob import glob\nfrom rasterio.windows import Window\n\nfrom keras.models import model_from_json\nfrom keras import backend as K\nfrom keras.layers import Conv2D\nfrom keras import layers\nfrom keras.models import Model\nimport tensorflow as tf\nimport random\nimport json\n\ndef create_class_weight(labels_dict,mu=0.15):\n total = np.sum(list(labels_dict.values()))\n keys = labels_dict.keys()\n class_weight = dict()\n weights_list = np.zeros((len(keys)))\n for key in keys:\n score = math.log(mu*total/float(labels_dict[key]))\n class_weight[key] = score if score > 1.0 else 1.0\n weights_list[sorted(keys).index(key)] = class_weight[key]\n return class_weight, weights_list\n\nclass Generator:\n def __init__(self, batch_size, class_0, class_1, num_channels):\n self.num_channels = num_channels\n self.num_classes = 2\n self.IMG_ROW = 64\n self.IMG_COL = 64\n self.batch_size = batch_size\n self.class_0 = class_0\n self.class_1 = class_1\n self.normalize_channel = None\n self.sup_materials = False\n self.sup_name = None\n self.sup_normalization = None\n self.sentinel = False\n self.val_region = False\n self.wv = False\n \n self.channels_name = ['B02','B03','B04','B05','B06','B07','B08','B11','B12','B8A']\n \n self.json_file_linden_val = None\n self.json_file_oak_val = None\n self.json_file_linden_train = None\n self.json_file_oak_train = None\n \n def set_normalize_channel(self):\n self.normalize_channel = {}\n for path in ['/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/0','/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/1',\n '/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/2','/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/3']:\n self.normalize_channel[path] = {}\n tmp = []\n for ch in range(8):\n with rasterio.open(path + '_channel_' + str(ch) + '.tif') as src:\n tmp += [src.read(1)]\n tmp = np.asarray(tmp)\n self.normalize_channel[path] = []\n self.normalize_channel[path] += [np.mean(tmp[tmp>0])]\n self.normalize_channel[path] += [np.std(tmp[tmp>0])]\n self.normalize_channel[path] += [tmp.max()]\n self.normalize_channel[path] += [tmp[tmp>0].min()] #tmp.min\n\n def get_img_mask_array(self, imgpath, upper_left_x, upper_left_y, pol_width, pol_height, age_flag = False):\n #class_0=['S'], class_1=['E']\n #print(imgpath)\n if self.wv:\n channel_name = imgpath + self.channels_name[0] + '.tif'\n else:\n channel_name = '_'.join(imgpath.split('_')[:-1]) + self.channels_name[0] + '.tif'\n \n with rasterio.open(channel_name) as src:\n size_x = src.width\n size_y = src.height\n #print(upper_left_x, upper_left_y, pol_width, pol_height)\n difference_x = max(0, self.IMG_COL - int(pol_width))\n difference_y = max(0, self.IMG_ROW - int(pol_height))\n rnd_x = random.randint(max(0, int(upper_left_x) - difference_x),min(size_x, \n int(upper_left_x) + int(pol_width) + difference_x) -\n self.IMG_COL)\n rnd_y = random.randint(max(0, int(upper_left_y) - difference_y),min(size_y, \n int(upper_left_y) + int(pol_height) + difference_y) -\n self.IMG_ROW)\n \n window = Window(rnd_x, rnd_y, self.IMG_COL, self.IMG_ROW)\n \n mask_0 = np.zeros((1, self.IMG_ROW, self.IMG_COL))\n for cl_name in self.class_0:\n #if '{}.tif'.format(cl_name) in os.listdir(imgpath):\n if self.wv:\n channel_name = imgpath + '_'+ cl_name + '.tif'\n else:\n channel_name = '_'.join(imgpath.split('_')[:-1]) +(not self.sentinel)*'_'+ '{}.tif'.format(cl_name)\n\n with rasterio.open(channel_name) as src:\n mask_0 += src.read(window=window).astype(np.int)\n\n mask_1 = np.zeros((1, self.IMG_ROW, self.IMG_COL))\n for cl_name in self.class_1:\n #if '{}.tif'.format(cl_name) in os.listdir(imgpath):\n if self.wv:\n channel_name = imgpath +'_'+ cl_name + '.tif'\n else:\n channel_name = '_'.join(imgpath.split('_')[:-1]) +(not self.sentinel)*'_'+ '{}.tif'.format(cl_name)\n\n with rasterio.open(channel_name) as src:\n mask_1 += src.read(window=window).astype(np.int)\n #mask_1 = mask_1 > 0.5\n\n img = np.ones((self.IMG_ROW, self.IMG_COL, self.num_channels), dtype=np.float)\n for i, ch in enumerate(self.channels_name):\n if self.wv:\n channel_name = imgpath + ch + '.tif'\n else:\n channel_name = '_'.join(imgpath.split('_')[:-1])+ch+ '.tif'\n\n with rasterio.open(channel_name) as src:\n img[:,:,i] = src.read(window=window)\n if self.normalize_channel != None:\n width=3\n mean = self.normalize_channel[imgpath][0]\n std = self.normalize_channel[imgpath][1]\n img_max = self.normalize_channel[imgpath][2]\n img_min = self.normalize_channel[imgpath][3]\n m = max(0, mean - width*std)\n M = min(img_max, mean + width*std)\n\n img = ((img - m)/(M-m)).clip(0., 1.)\n else:\n img /= 255.\n img = img.clip(0, 1)\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # suplementary materials\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n if self.sup_materials:\n if self.wv:\n channel_name = imgpath + self.sup_name\n else:\n channel_name = '_'.join(imgpath.split('_')[:-1]) + self.sup_name\n with rasterio.open(channel_name) as src:\n img[:,:,-1] = src.read(window=window).astype(np.float)\n img[:,:,-1] = (img[:,:,-1] / self.sup_normalization).clip(0., 1.)\n\n mask = np.ones((self.IMG_ROW, self.IMG_COL, self.num_classes)) \n mask[:,:,0] = mask_0 \n mask[:,:,1] = mask_1 \n\n return np.asarray(img), np.asarray(mask) \n \n def extract_val(self, sample):\n return sample['upper_left_x'], sample['upper_left_y'], sample['pol_width'], sample['pol_height']\n \n def train_gen(self):\n while(True):\n imgarr=[]\n maskarr=[]\n for i in range(self.batch_size):\n if random.random() > 0.5:\n random_key = random.choice(list(self.json_file_oak_train.keys()))\n upper_left_x, upper_left_y, pol_width, pol_height = self.extract_val(self.json_file_oak_train[random_key])\n else:\n random_key = random.choice(list(self.json_file_linden_train.keys()))\n upper_left_x, upper_left_y, pol_width, pol_height = self.extract_val(self.json_file_linden_train[random_key])\n if not self.sentinel:\n img_name = '/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/'+random_key.split('_')[0]\n else:\n img_name = random_key#.split('_')[0]\n #print(img_name)\n img,mask=self.get_img_mask_array(img_name, upper_left_x, upper_left_y, pol_width, pol_height)\n imgarr.append(img)\n maskarr.append(mask)\n yield (np.asarray(imgarr),np.asarray(maskarr))\n imgarr=[]\n maskarr=[] \n\n def val_gen(self):\n while(True):\n imgarr=[]\n maskarr=[]\n for i in range(self.batch_size):\n if random.random() > 0.5:\n random_key = random.choice(list(self.json_file_oak_val.keys()))\n upper_left_x, upper_left_y, pol_width, pol_height = self.extract_val(self.json_file_oak_val[random_key])\n else:\n random_key = random.choice(list(self.json_file_linden_val.keys()))\n upper_left_x, upper_left_y, pol_width, pol_height = self.extract_val(self.json_file_linden_val[random_key])\n if not self.sentinel:\n img_name = '/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/'+random_key.split('_')[0]\n else:\n img_name = random_key#.split('_')[0]\n img,mask=self.get_img_mask_array(img_name, upper_left_x, upper_left_y, pol_width, pol_height)\n imgarr.append(img)\n maskarr.append(mask)\n yield (np.asarray(imgarr),np.asarray(maskarr))\n imgarr=[]\n maskarr=[]\n \n def set_prob(self):\n img_prob = np.zeros((len(self.train_img_list)))\n for i, img_path in enumerate(self.train_img_list):\n for cl in self.class_0+self.class_1:\n if cl+'_05.tif' in os.listdir(img_path):\n img_prob[i] += np.sum(tiff.imread(img_path+'/'+cl+'_05.tif'))\n img_prob = img_prob/np.sum(img_prob)\n return img_prob\n\n def weighted_categorical_crossentropy(self, weights):\n def loss(target,output,from_logits=False):\n output /= tf.reduce_sum(output,\n len(output.get_shape()) - 1,\n True)\n non_zero_pixels = tf.reduce_sum(target, axis=-1)\n _epsilon = tf.convert_to_tensor(K.epsilon(), dtype=output.dtype.base_dtype)\n output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)\n weighted_losses = target * tf.log(output) * weights\n return - tf.reduce_sum(weighted_losses,len(output.get_shape()) - 1) \\\n * (self.IMG_ROW*self.IMG_COL*self.batch_size) / K.sum(non_zero_pixels)\n\n return loss\n \n def read_json(self, folders, class_name):\n js_full = {}\n samples_set = set()\n for folder in folders:\n if self.wv:\n json_file = '/home/user/data/projects/research-project/notebooks/Illarionova/Forestry_inventory/wv_inference/krasnoborsk/{}_{}.json'.format(folder, class_name) # folder 0 1\n else:\n json_file = folder + class_name + '.json'\n with open(json_file, 'r') as f:\n js_tmp = json.load(f)\n keys_list = set(js_tmp.keys())\n for key in keys_list:\n #if tuple(self.extract_val(js_tmp[key])) not in samples_set:\n js_tmp[folder+'_'+key] = js_tmp[key]\n samples_set.add(tuple(self.extract_val(js_tmp[key])))\n del js_tmp[key]\n js_full.update(js_tmp)\n return js_full\n \n def train_val_split(self, json_file, split_ration):\n keys_list = set(json_file.keys())\n train_samples, val_samples = {}, {}\n if self.val_region:\n with open('val_region.json', 'r') as f:\n val_region_dict = json.load(f) \n for key in keys_list:\n val_region_flag = False\n for ind in val_region_dict.keys():\n if json_file[key]['upper_left_x'] < val_region_dict[ind]['upper_left_x'] + \\\n val_region_dict[ind]['pol_width']\\\n and json_file[key]['upper_left_x'] > val_region_dict[ind]['upper_left_x'] \\\n and json_file[key]['upper_left_y'] < val_region_dict[ind]['upper_left_y'] + \\\n val_region_dict[ind]['pol_height']\\\n and json_file[key]['upper_left_y'] > val_region_dict[ind]['upper_left_y']:\n val_region_flag = True\n if val_region_flag:\n val_samples[key] = json_file[key]\n else:\n train_samples[key] = json_file[key]\n else:\n seed(1)\n for key in keys_list:\n if random.random() < split_ration:\n train_samples[key] = json_file[key]\n else:\n val_samples[key] = json_file[key]\n return train_samples, val_samples\n\n def load_dataset(self, folders, split_ration=0.7):\n json_file_oak_train = self.read_json(folders, \"conifer\")\n json_file_linden_train = self.read_json(folders, \"decidious\")\n \n self.json_file_oak_train, self.json_file_oak_val = self.train_val_split(json_file_oak_train, split_ration)\n self.json_file_linden_train, self.json_file_linden_val = self.train_val_split(json_file_linden_train, split_ration)\n","repo_name":"LanaLana/forest_height","sub_path":"species_classification_scripts/individual_stands_classification.py","file_name":"individual_stands_classification.py","file_ext":"py","file_size_in_byte":13227,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"9741126435","text":"# * ---------- IMPORTS --------- *\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\nimport os\nimport psycopg2\nimport cv2\nimport numpy as np\nimport re\nfrom paho.mqtt import client as mqtt\nimport time\nimport json\nfrom base64 import b64encode, b64decode\nfrom hashlib import sha256\nfrom time import time\nfrom urllib.parse import quote_plus, urlencode\nfrom hmac import HMAC\n\n\n# Get the relativ path to this file (we will use it later)\n# FILE_PATH = \"/home/pi/DOCKERS\"\nFILE_PATH = \"/app\"\n# * ---------- Create App --------- *\napp = Flask(__name__)\nCORS(app, support_credentials=True)\n\n\n# * ---------- DATABASE CONFIG --------- *\n# DATABASE_USER = os.environ['DATABASE_USER']\n# DATABASE_PASSWORD = os.environ['DATABASE_PASSWORD']\n# DATABASE_HOST = os.environ['DATABASE_HOST']\n# DATABASE_PORT = os.environ['DATABASE_PORT']\n# DATABASE_NAME = os.environ['DATABASE_NAME']\n\ndef DATABASE_CONNECTION():\n return psycopg2.connect(user=\"ujwbtgmu\",\n password=\"KMYz8yHoeoIavScyMk-Y2GmJJYWxikWM\",\n host=\"queenie.db.elephantsql.com\",\n port=\"5432\", database=\"ujwbtgmu\")\n\n\ndef PUBLISH_USER(message):\n\n # FOR SUBSCRIBE\n \"\"\"def on_subscribe(client, userdata, mid, granted_qos):\n print('Subscribed for m' + str(mid))\n\n def on_message(client, userdata, message):\n print(\"Received message '\" + str(message.payload) + \"' on topic '\" +\n message.topic + \"' with QoS \" + str(message.qos))\"\"\"\n\n def on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n\n def on_log(client, userdata, level, buf):\n print(\"log: \", buf)\n\n def generate_sas_token(uri, key, policy_name, expiry=3600):\n ttl = time() + expiry\n sign_key = \"%s\\n%d\" % ((quote_plus(uri)), int(ttl))\n sign_key = sign_key.encode('utf-8')\n signature = b64encode(HMAC(b64decode(key), sign_key, sha256).digest())\n\n rawtoken = {\n 'sr': uri,\n 'sig': signature,\n 'se': str(int(ttl))\n }\n return 'SharedAccessSignature ' + urlencode(rawtoken)\n\n device_id = \"rpi-core\" # Add device id\n iot_hub_name = \"MWIoTHub\" # Add iot hub name\n device_key = \"lkG1pZn5PAGTNDsMBQlHtRw2zr6FNQJOcOO0yu0WLtE=\" \n\n client = mqtt.Client(client_id=device_id, protocol=mqtt.MQTTv311, clean_session=False)\n\n client.on_log = on_log\n client.tls_set_context(context=None)\n client.tls_insecure_set(True)\n\n # Set up client credentials\n username = \"{}.azure-devices.net/{}/api-version=2018-06-30\".format(\n iot_hub_name, device_id)\n client.username_pw_set(username=username, password=generate_sas_token(iot_hub_name + \".azure-devices.net/devices/\" + device_id, device_key, device_id))\n\n # Connect to the Azure IoT Hub\n client.on_connect = on_connect\n client.connect(iot_hub_name+\".azure-devices.net\", port=8883)\n\n # Publish\n # time.sleep(1)\n client.publish(\"devices/{device_id}/messages/events/\".format(\n device_id=device_id), payload=message, qos=1, retain=False)\n print(\"Publishing on devices/\" + device_id +\n \"/messages/events/\", message)\n time.sleep(5)\n client.disconnect()\n\n # Subscribe\n \"\"\"client.on_message = on_message\n client.on_subscribe = on_subscribe\n client.subscribe(\n \"devices/{device_id}/messages/devicebound/#\".format(device_id=device_id))\n client.loop_forever()\"\"\"\n\n# * -------------------- ROUTES ------------------- *\n# * ---------- Test server ---------- *\n@app.route('/')\ndef index():\n return \"AZURE backend server side is live!\"\n\n# * ---------- Get data from the face recognition ---------- *\n@app.route('/receive_data', methods=['POST'])\ndef get_receive_data():\n if request.method == 'POST':\n json_data = request.get_json()\n\n # Check if the user is already in the DB\n try:\n # Connect to the DB\n connection = DATABASE_CONNECTION()\n cursor = connection.cursor()\n\n # Query to check if the user as been saw by the camera today\n user_saw_today_sql_query =\\\n f\"SELECT * FROM users WHERE date = '{json_data['date']}' AND name = '{json_data['name']}'\"\n\n cursor.execute(user_saw_today_sql_query)\n result = cursor.fetchall()\n connection.commit()\n\n # If use is already in the DB for today:\n if result:\n print('user IN')\n image_path = f\"{FILE_PATH}/assets/img/{json_data['date']}/{json_data['name']}/departure.jpg\"\n\n # Save image\n os.makedirs(\n f\"{FILE_PATH}/assets/img/{json_data['date']}/{json_data['name']}\", exist_ok=True)\n cv2.imwrite(image_path, np.array(json_data['picture_array']))\n json_data['picture_path'] = image_path\n\n # Update user in the DB\n update_user_querry = f\"UPDATE users SET departure_time = '{json_data['hour']}', departure_picture = '{json_data['picture_path']}' WHERE name = '{json_data['name']}' AND date = '{json_data['date']}'\"\n cursor.execute(update_user_querry)\n\n # Publish user leave\n data = {\n \"name\": f\"{json_data['name']}\",\n \"date\": f\"{json_data['date']}\",\n \"departure_time\": f\"{json_data['hour']}\",\n \"picture_path\": f\"{json_data['picture_path']}\"\n }\n user_data = json.dumps(data)\n PUBLISH_USER(message=user_data)\n\n else:\n print(\"user OUT\")\n # Save image\n image_path = f\"{FILE_PATH}/assets/img/history/{json_data['date']}/{json_data['name']}/arrival.jpg\"\n os.makedirs(\n f\"{FILE_PATH}/assets/img/history/{json_data['date']}/{json_data['name']}\", exist_ok=True)\n cv2.imwrite(image_path, np.array(json_data['picture_array']))\n json_data['picture_path'] = image_path\n\n # Create a new row for the user today:\n insert_user_querry = f\"INSERT INTO users (name, date, arrival_time, arrival_picture) VALUES ('{json_data['name']}', '{json_data['date']}', '{json_data['hour']}', '{json_data['picture_path']}')\"\n cursor.execute(insert_user_querry)\n\n # Publish user arrival\n data = {\n \"name\": f\"{json_data['name']}\",\n \"date\": f\"{json_data['date']}\",\n \"arrival_time\": f\"{json_data['hour']}\",\n \"picture_path\": f\"{json_data['picture_path']}\"\n }\n user_data = json.dumps(data)\n PUBLISH_USER(message=user_data)\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"ERROR DB: \", error)\n finally:\n connection.commit()\n\n # closing database connection.\n if connection:\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n\n # Return user's data to the front\n return jsonify(json_data)\n\n\n# * ---------- Get all the data of an employee ---------- *\n@app.route('/get_employee/', methods=['GET'])\ndef get_employee(name):\n answer_to_send = {}\n # Check if the user is already in the DB\n try:\n # Connect to DB\n connection = DATABASE_CONNECTION()\n cursor = connection.cursor()\n # Query the DB to get all the data of a user:\n user_information_sql_query = f\"SELECT * FROM users WHERE name = '{name}'\"\n\n cursor.execute(user_information_sql_query)\n result = cursor.fetchall()\n connection.commit()\n\n # if the user exist in the db:\n if result:\n print('RESULT: ', result)\n # Structure the data and put the dates in string for the front\n for k, v in enumerate(result):\n answer_to_send[k] = {}\n for ko, vo in enumerate(result[k]):\n answer_to_send[k][ko] = str(vo)\n print('answer_to_send: ', answer_to_send)\n else:\n answer_to_send = {'error': 'User not found...'}\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"ERROR DB: \", error)\n finally:\n # closing database connection:\n if (connection):\n cursor.close()\n connection.close()\n\n # Return the user's data to the front\n return jsonify(answer_to_send)\n\n\n# * --------- Get the 5 last users seen by the camera --------- *\n@app.route('/get_5_last_entries', methods=['GET'])\ndef get_5_last_entries():\n answer_to_send = {}\n # Check if the user is already in the DB\n try:\n # Connect to DB\n connection = DATABASE_CONNECTION()\n\n cursor = connection.cursor()\n # Query the DB to get all the data of a user:\n lasts_entries_sql_query = f\"SELECT * FROM users ORDER BY id DESC;\"\n\n cursor.execute(lasts_entries_sql_query)\n result = cursor.fetchall()\n connection.commit()\n\n # if DB is not empty:\n if result:\n # Structure the data and put the dates in string for the front\n for k, v in enumerate(result):\n answer_to_send[k] = {}\n for ko, vo in enumerate(result[k]):\n answer_to_send[k][ko] = str(vo)\n else:\n answer_to_send = {'error': 'error detect'}\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"ERROR DB: \", error)\n finally:\n # closing database connection:\n if (connection):\n cursor.close()\n connection.close()\n\n # Return the user's data to the front\n return jsonify(answer_to_send)\n\n\n# * ---------- Add new employee ---------- *\n@app.route('/add_employee', methods=['POST'])\n@cross_origin(supports_credentials=True)\ndef add_employee():\n try:\n # Get the picture from the request\n image_file = request.files['image']\n print(request.form['nameOfEmployee'])\n\n # Store it in the folder of the know faces:\n file_path = os.path.join(\n f\"{FILE_PATH}/assets/img/users/{request.form['nameOfEmployee']}.jpg\")\n image_file.save(file_path)\n answer = 'new employee succesfully added'\n except:\n answer = 'Error while adding new employee. Please try later...'\n return jsonify(answer)\n\n\n# * ---------- Get employee list ---------- *\n@app.route('/get_employee_list', methods=['GET'])\ndef get_employee_list():\n employee_list = {}\n\n # Walk in the user folder to get the user list\n walk_count = 0\n for file_name in os.listdir(f\"{FILE_PATH}/assets/img/users/\"):\n # Capture the employee's name with the file's name\n name = re.findall(\"(.*)\\.jpg\", file_name)\n if name:\n employee_list[walk_count] = name[0]\n walk_count += 1\n\n return jsonify(employee_list)\n\n\n# * ---------- Delete employee ---------- *\n@app.route('/delete_employee/', methods=['GET'])\ndef delete_employee(name):\n try:\n # Remove the picture of the employee from the user's folder:\n print('name: ', name)\n file_path = os.path.join(f'{FILE_PATH}/assets/img/users/{name}.jpg')\n os.remove(file_path)\n answer = 'Employee succesfully removed'\n except:\n answer = 'Error while deleting new employee. Please try later'\n\n return jsonify(answer)\n\n\n# * -------------------- RUN SERVER -------------------- *\nif __name__ == '__main__':\n # * --- DEBUG MODE: --- *\n # app.run(host='localhost', port=5000, debug=True)\n # * --- DOCKER PRODUCTION MODE: --- *\n app.run(debug=True, host='0.0.0.0', port=5000)\n","repo_name":"iradbouzidi/edge-azure","sub_path":"back-end/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33908150925","text":"import socket\nimport pickle\nimport os\n\nclass Pessoa:\n def __init__(self, nome, cpf, idade):\n self.nome = nome\n self.cpf = cpf\n self.idade = idade\n \nclass PessoasInputStream:\n def __init__(self, input_stream):\n self.input_stream = input_stream\n\n def receber(self):\n try:\n received_data = pickle.load(self.input_stream)\n return received_data\n except Exception as e:\n print(f\"Erro ao receber dados: {e}\")\n\ndef calcular_tamanho_bytes_arquivo(nome_arquivo):\n tamanho_bytes = os.stat(nome_arquivo).st_size\n return tamanho_bytes\n\ndef servidor_remoto():\n host = 'localhost'\n port = 12345\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind((host, port))\n server_socket.listen(1)\n\n print(f\"Servidor escutando em {host}:{port}\")\n\n client_socket, client_address = server_socket.accept()\n print(f\"Conexão de {client_address}\")\n\n input_stream = PessoasInputStream(client_socket.makefile('rb'))\n received_pessoas = input_stream.receber()\n\n with open('pessoas_output.txt', 'w') as output_file:\n for pessoa in received_pessoas:\n output_file.write(f\"Nome: {pessoa.nome}, CPF: {pessoa.cpf}, Idade: {pessoa.idade}\\n\")\n tamanho_bytes_pessoa_gravada = len(f\"Nome: {pessoa.nome}, CPF: {pessoa.cpf}, Idade: {pessoa.idade}\\n\")\n print(f\"Nome: {pessoa.nome}, CPF: {pessoa.cpf}, Idade: {pessoa.idade}, Tamanho em bytes por pessoa: {tamanho_bytes_pessoa_gravada + 1}\")\n\n qtd_Pessoas = len(received_pessoas)\n print(f\"Quantidade de pessoas: {qtd_Pessoas}\")\n print(f\"Tamanho total em bytes: {calcular_tamanho_bytes_arquivo('pessoas_output.txt')}\")\n\n client_socket.close()\n server_socket.close()\n\nif __name__ == \"__main__\":\n servidor_remoto()\n","repo_name":"brunoalves0921/SD","sub_path":"Questão 1 & 2/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17138892809","text":"#!/usr/bin/env python3\nimport cv2\nimport numpy as np\nimport gi\nimport os\ngi.require_version(\"Gdk\", \"3.0\")\nfrom gi.repository import Gdk\n\nallmonitors = []\ngdkdsp = Gdk.Display.get_default()\nfor i in range(gdkdsp.get_n_monitors()):\n monitor = gdkdsp.get_monitor(i)\n scale = monitor.get_scale_factor()\n geo = monitor.get_geometry()\n allmonitors.append([\n monitor.get_model()] + [n * scale for n in [\n geo.x, geo.y, geo.width, geo.height\n ]\n ])\n allmonitors[i].extend([monitor.get_width_mm(),monitor.get_height_mm()])\nprint(allmonitors)\ndef scaleFactor(x1,y1,x2,y2):\n return (x1**2 + y1**2)**0.5/(x2**2 + y2**2)**0.5\n\ndef spanned(allmonitors):\n monitorStatus=len(allmonitors)\n #wp=cv2.imread(os.path.dirname(__file__)+'/cyberpunk-black-widow-4k-y0-2160x3840.jpg')\n wp=cv2.imread(os.path.dirname(__file__)+'/cyberpunk-2077-black-widow-ve-2160x3840.jpg')\n pscale=0.9 #downscaling to 1920 from 2160\n bezel_offset=150\n topLeft=[500,0] #corner for cropping to a 1920x2160 window\n scaling=scaleFactor(allmonitors[0][-1],allmonitors[0][-2],\\\n allmonitors[1][-1],allmonitors[1][-2]) #returns 1.5153 for (24inch/15.6inch) upscale higher ppi monitor\n\n wp=cv2.resize(wp,(0,0),fx=pscale,fy=pscale,interpolation=cv2.INTER_AREA)\n wp=wp[topLeft[0]:topLeft[0]+2160, topLeft[1]:topLeft[1]+1920]\n wp_top=wp[:1080,:1920]\n wp_bot=wp[1080:2160,:1920]\n wp_bot=cv2.resize(wp_bot,(0,0),fx=scaling,fy=scaling,interpolation=cv2.INTER_AREA)\n newTL=[bezel_offset,(wp_bot.shape[1]-1920)//2]\n wp_bot=wp_bot[newTL[0]:newTL[0]+1080,newTL[1]:newTL[1]+1920]\n nwp=np.vstack([wp_top,wp_bot])\n\n #cv2.imshow('wp',wp[topLeft[0]:topLeft[0]+2160, topLeft[1]:topLeft[1]+1920])\n ##cv2.imshow('wpb',wp_bot)\n ##cv2.waitKey()\n ##cv2.imshow('wpt',wp_top)\n ##cv2.waitKey()\n ##cv2.imshow('wpt',nwp)\n ##cv2.waitKey()\n ##cv2.destroyAllWindows()\n \n if monitorStatus==2:\n cv2.imwrite(os.path.dirname(__file__)+'/CyberWidow.jpg',nwp)\n else:\n cv2.imwrite(os.path.dirname(__file__)+'/CyberWidow.jpg',wp_top)\n \n\nif __name__==\"__main__\":\n spanned(allmonitors)\n \n","repo_name":"ringo47/wallpaperSpanner","sub_path":"rescaler.py","file_name":"rescaler.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"234211824","text":"import argparse\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom pathlib import Path\n\nfrom pynput import keyboard\n\n# ================= Config =================\nIMAGE_VIEWER_COMMAND = 'feh -. \"{}\"'\n\nSEARCH_PATH = Path('/home/user/somepath')\nEXTENSIONS = ['jpg', 'jpeg', 'png', 'webp']\n\nCATEGORIES = [\n Path('IRL'),\n Path('Art/sfw'),\n Path('Art/nsfw'),\n]\n# ==========================================\n\n# ================= Setup ==================\nparser = argparse.ArgumentParser(description='Interactively sorts images into categories')\nparser.add_argument('--dry-run', help='Do not move, only print', action='store_true', default=False)\nparser.add_argument('--copy', dest='sorter', help='Copy instead of moving', action='store_const', const=shutil.copy,\n default=shutil.move)\nargs = parser.parse_args()\n\nif args.dry_run:\n args.sorter = lambda file, cat: print(f'[DRY RUN] Sorted {file.name} into {cat}')\n\nif not SEARCH_PATH.exists():\n sys.exit(f'Search dir \"{SEARCH_PATH}\" does not exist')\n\n# Create sorted dirs\nfor c in CATEGORIES:\n c.mkdir(exist_ok=True, parents=True)\n# ==========================================\n\nprint('=============== Categories ===============')\nprint('\\n'.join([f'{i + 1}\\t{c}' for i, c in enumerate(CATEGORIES)]))\n\nfor f in SEARCH_PATH.rglob('*'):\n ext = os.path.splitext(f)[1][1:]\n\n if ext.lower() not in EXTENSIONS:\n continue\n\n process = subprocess.Popen(IMAGE_VIEWER_COMMAND.format(f), shell=True)\n\n with keyboard.Events() as events:\n # Try to get correct category index\n while True:\n event = events.get()\n\n try:\n key = int(event.key.char)\n category = CATEGORIES[key - 1] # For 0 index\n break\n except (AttributeError, ValueError):\n continue\n except IndexError:\n print(f'Category number {key} too large (max {len(CATEGORIES)})')\n finally:\n # For the key release event. Not very reliable\n events.get()\n\n # Move the file\n args.sorter(f, category / f.name)\n\n process.terminate()\n","repo_name":"kylosus/Interactive-Categorizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40060608632","text":"from sqlalchemy.sql import exists\n\nimport dnautils\nimport immunedb.common.config as config\nfrom immunedb.common.models import (Clone, Sample, Sequence, SequenceCollapse,\n Subject)\nimport immunedb.common.modification_log as mod_log\nimport immunedb.util.concurrent as concurrent\n\nfrom immunedb.util.log import logger\n\n\nclass CollapseWorker(concurrent.Worker):\n \"\"\"A worker for collapsing sequences without including positions where\n either sequences has an 'N'.\n :param Session session: The database session\n \"\"\"\n def __init__(self, session):\n self._session = session\n self._tasks = 0\n\n def do_task(self, bucket):\n seqs = self._session.query(\n Sequence.sample_id, Sequence.ai, Sequence.seq_id,\n Sequence.sequence, Sequence.copy_number\n ).filter(\n Sequence.subject_id == bucket.subject_id,\n Sequence.v_gene == bucket.v_gene,\n Sequence.j_gene == bucket.j_gene,\n Sequence.cdr3_num_nts == bucket.cdr3_num_nts,\n Sequence._insertions == bucket._insertions,\n Sequence._deletions == bucket._deletions\n )\n\n to_process = sorted([{\n 'sample_id': s.sample_id,\n 'ai': s.ai,\n 'seq_id': s.seq_id,\n 'sequence': s.sequence,\n 'cn': s.copy_number\n } for s in seqs], key=lambda e: -e['cn'])\n\n while len(to_process) > 0:\n # Get the largest sequence in the list\n larger = to_process.pop(0)\n # Iterate over all smaller sequences to find matches\n instances = 1\n samples = set([larger['sample_id']])\n for i in reversed(range(len(to_process))):\n smaller = to_process[i]\n if len(larger['sequence']) != len(smaller['sequence']):\n self.warning('Tried to collapse sequences of different '\n 'lengths. AIs are {} {}'.format(\n larger['ai'], smaller['ai']))\n elif dnautils.equal(larger['sequence'], smaller['sequence']):\n # Add the smaller sequence's copy number to the larger\n larger['cn'] += smaller['cn']\n # If the smaller sequence matches the larger, collapse it\n # to the larger\n self._session.add(SequenceCollapse(**{\n 'sample_id': smaller['sample_id'],\n 'seq_ai': smaller['ai'],\n 'collapse_to_subject_seq_ai': larger['ai'],\n 'collapse_to_subject_sample_id': larger['sample_id'],\n 'collapse_to_subject_seq_id': larger['seq_id'],\n 'instances_in_subject': 0,\n 'copy_number_in_subject': 0,\n 'samples_in_subject': 0,\n }))\n instances += 1\n samples.add(smaller['sample_id'])\n # Delete the smaller sequence from the list to process\n # since it's been collapsed\n del to_process[i]\n\n # Update the larger sequence's copy number and \"collapse\" to itself\n self._session.add(SequenceCollapse(**{\n 'sample_id': larger['sample_id'],\n 'seq_ai': larger['ai'],\n 'collapse_to_subject_sample_id': larger['sample_id'],\n 'collapse_to_subject_seq_id': larger['seq_id'],\n 'collapse_to_subject_seq_ai': larger['ai'],\n 'instances_in_subject': instances,\n 'copy_number_in_subject': larger['cn'],\n 'samples_in_subject': len(samples),\n }))\n\n self._session.commit()\n self._tasks += 1\n if self._tasks > 0 and self._tasks % 100 == 0:\n self.info('Collapsed {} buckets'.format(self._tasks))\n\n def cleanup(self):\n self.info('Committing collapsed sequences')\n self._session.commit()\n self._session.close()\n\n\ndef run_collapse(session, args):\n mod_log.make_mod('collapse', session=session, commit=True,\n info=vars(args))\n subject_ids = []\n\n subjects = (args.subject_ids or [e.id for e in session.query(Subject.id)])\n for subject in subjects:\n if session.query(Sample).filter(\n Sample.subject_id == subject,\n ~exists().where(\n SequenceCollapse.sample_id == Sample.id\n )).first() is None:\n logger.info('Subject {} already collapsed. Skipping.'.format(\n subject))\n else:\n logger.info('Resetting collapse info for subject {}'.format(\n subject))\n samples = session.query(Sample).filter(\n Sample.subject_id == subject\n )\n for sample in samples:\n session.query(SequenceCollapse).filter(\n SequenceCollapse.sample_id == sample.id\n ).delete(synchronize_session=False)\n sample.sample_stats = []\n logger.info('Resetting clone info for subject {}'.format(subject))\n session.query(Clone).filter(Clone.subject_id == subject).delete()\n subject_ids.append(subject)\n session.commit()\n\n logger.info('Creating task queue to collapse {} subjects.'.format(\n len(subject_ids)))\n\n tasks = concurrent.TaskQueue()\n\n for subject_id in subject_ids:\n buckets = session.query(\n Sequence.subject_id, Sequence.v_gene, Sequence.j_gene,\n Sequence.cdr3_num_nts, Sequence._insertions, Sequence._deletions\n ).filter(\n Sequence.subject_id == subject_id\n ).group_by(\n Sequence.subject_id, Sequence.v_gene, Sequence.j_gene,\n Sequence.cdr3_num_nts, Sequence._insertions, Sequence._deletions\n )\n for bucket in buckets:\n tasks.add_task(bucket)\n\n logger.info('Generated {} total tasks'.format(tasks.num_tasks()))\n\n for i in range(0, min(tasks.num_tasks(), args.nproc)):\n tasks.add_worker(CollapseWorker(config.init_db(args.db_config)))\n tasks.start()\n\n session.close()\n","repo_name":"arosenfeld/immunedb","sub_path":"immunedb/aggregation/collapse.py","file_name":"collapse.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"43122439328","text":"import time\nimport string\nfrom alphabeta import alpha_beta_value, get_rounds\nfrom parameters import delay\nfrom parameters import LARGE_NUMBER, error_message\nfrom tictactoe import TicTacToe\n\n# Handles humans turn\ndef play_human_turn(state: TicTacToe):\n print(\"------------\")\n while True:\n print(\"What's your move? (A1, B2, C3, etc.)\")\n print(\"------------\")\n coordinate = input(\"Your choice: \")\n print(\"------------\")\n try:\n letter = coordinate[0]\n number = int(coordinate[1:])\n aux = string.ascii_uppercase.find(letter.capitalize())\n if aux > state.board_size or number > state.board_size or number < 1:\n print(error_message)\n print(\"------------\")\n continue\n index = int(aux * state.board_size + number - 1)\n if state.crosses_turn:\n mark = \"X\"\n else:\n mark = \"O\"\n if state.state[index] == \"-\":\n aux = state.state[:index] + mark + state.state[index + 1 :]\n latest_move = (letter, number)\n new_state = TicTacToe(\n aux, not state.crosses_turn, state.level, state.players, latest_move\n )\n return new_state\n else:\n print(\"Sorry, but that one is already taken. Please try again.\")\n except:\n print(error_message)\n print(\"------------\")\n\n\n# Handles computer's turns. Calls the minimax algorithm to find the best position.\ndef play_computer_turn(state: TicTacToe):\n if state.crosses_turn:\n arvo = -LARGE_NUMBER\n else:\n arvo = LARGE_NUMBER\n new_state = \"\"\n children = state.generate_children()\n for i, siirto in enumerate(children):\n siirron_arvo = alpha_beta_value(siirto)\n\n if state.crosses_turn:\n if siirron_arvo > arvo:\n arvo = siirron_arvo\n new_state = siirto\n if arvo == 1:\n break\n if not state.crosses_turn:\n if siirron_arvo < arvo:\n arvo = siirron_arvo\n new_state = siirto\n if arvo == -1:\n break\n\n old = state.state\n new = new_state.state\n index = \"\"\n for i, letter in enumerate(new):\n if letter != old[i]:\n index = i\n break\n row = (index + 1) // state.board_size\n column = index % state.board_size\n latest_move = (string.ascii_uppercase[row], (column + 1))\n if state.crosses_turn:\n player = \"X\"\n else:\n player = \"O\"\n print(\"------------------------------------------------------\")\n print(player + \" plays to\", latest_move[0] + str(latest_move[1]))\n return new_state\n\n\n# Coordinates whether to play human or computer turn. Ends the game if the board is full or one of the players won.\ndef play(state: TicTacToe):\n time.sleep(delay)\n if state.crosses_turn:\n print(\"Next turn: X\")\n else:\n print(\"Next turn: O\")\n print(\"\")\n print(state)\n time.sleep(delay)\n\n if state.players == 2 or (state.players == 1 and not state.crosses_turn):\n new_state = play_human_turn(state)\n else:\n new_state = play_computer_turn(state)\n\n new_state.first_turn = False\n\n if new_state.is_end_state():\n print(new_state)\n print(\"----------------\")\n if new_state.won(\"O\", new_state.to_win):\n print(\"AND THE WINNER IS: O\")\n elif new_state.won(\"X\", new_state.to_win):\n print(\"AND THE WINNER IS: X\")\n else:\n print(\"DRAW\")\n print(\"----------------\")\n print(\"Stats:\")\n print(\"-Recursion calls:\", get_rounds())\n return\n else:\n play(new_state)\n","repo_name":"MillaKelhu/tiralabra","sub_path":"Main/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2902468548","text":"import random\n\nwith open('directories.txt', 'r') as f:\n dirs = f.readlines()\n\ndirs = [i.strip() for i in dirs]\n\nwith open('location_data_2.txt', 'r') as f:\n all_locs = f.read()\n\nlocs_list = all_locs.split()\n\nlocs_list = [i.strip() for i in locs_list]\n\nprint(\"Len locs_list:\", len(locs_list))\nprint(\"Len of ids already dled:\", len(dirs))\n\nnot_yet_dl = [id for id in locs_list if id not in dirs]\n\nprint(\"Len of ids to dl:\", len(not_yet_dl))\n\nnot_yet_dl_str = ' '.join(not_yet_dl)\n\nwith open('ids_to_dl_1.txt', 'w') as f:\n f.write(not_yet_dl_str)\n\n# randomize id order to get new ids to the top\nrandom.shuffle(not_yet_dl)\nnot_yet_dl_shuffle_str = ' '.join(not_yet_dl)\n\nwith open('ids_to_dl_2.txt', 'w') as f:\n f.write(not_yet_dl_shuffle_str)\n","repo_name":"EricaXia/hpcfiles","sub_path":"check_which_locs_notyet_dled.py","file_name":"check_which_locs_notyet_dled.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10292834604","text":"from simple_unittest import test\n\ndef twoSum(nums, target):\n\n lookup_cache = {}\n\n for i, a in enumerate(nums):\n b = target - a\n if b in lookup_cache:\n return [lookup_cache[b], i]\n else:\n lookup_cache[a] = i\n\n return []\n\ntest('empty', twoSum([], 0), [])\ntest('one', twoSum([1, 1], 2), [0, 1])\ntest('[2, 7, 11, 15] -> 9', twoSum([2, 7, 11, 15, 9], 9), [0, 1])\ntest('[0, 1] -> 1', twoSum([0, 1], 1), [0, 1])\ntest('[7, 11, 15] -> 26', twoSum([7, 11, 15], 26), [1, 2])\n","repo_name":"michal-franc/CodingKatas","sub_path":"leetcode/1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3607800069","text":"from unittest import TestCase\n\nfrom uhlive.stream.recognition import Opened, ProtocolError, Recognizer\n\nfrom .recog_events import recognition_complete, recognition_in_progress, session_opened\n\n\nclass TestConnection(TestCase):\n def test_open(self):\n client = Recognizer()\n frame = client.open(\"mycustomer\", \"mychan\")\n self.assertEqual(\n frame,\n r'{\"command\":\"OPEN\",\"request_id\":1,\"channel_id\":\"mychan\",\"headers\":{\"custom_id\":\"mycustomer\",\"audio_codec\":\"linear\"},\"body\":\"\"}',\n )\n with self.assertRaises(ProtocolError):\n client.get_params()\n with self.assertRaises(ProtocolError):\n client.send_audio_chunk(b\"trntrntrntnrtsrn\")\n\n def test_opened(self):\n client = Recognizer()\n event = client.receive(session_opened)\n self.assertIsInstance(event, Opened)\n frame = client.get_params()\n self.assertEqual(\n frame,\n r'{\"command\":\"GET-PARAMS\",\"request_id\":1,\"channel_id\":\"testuie46e4ui6\",\"headers\":{},\"body\":\"\"}',\n )\n # Can't open twice\n with self.assertRaises(ProtocolError):\n client.open(\"test\", \"test\")\n\n # we can stream\n client.send_audio_chunk(b\"trntrntrntnrtsrn\")\n\n def test_recognize(self):\n client = Recognizer()\n client.receive(session_opened)\n frame = client.define_grammar(\n \"speech/spelling/mixed?regex=[a-z]{2}[0-9]{9}[a-z]{2}\", \"parcel_num\"\n )\n self.assertEqual(\n frame,\n r'{\"command\":\"DEFINE-GRAMMAR\",\"request_id\":1,\"channel_id\":\"testuie46e4ui6\",\"headers\":{\"content_id\":\"parcel_num\",\"content_type\":\"text/uri-list\"},\"body\":\"builtin:speech/spelling/mixed?regex=[a-z]{2}[0-9]{9}[a-z]{2}\"}',\n )\n frame = client.recognize(\"session:parcel_num\", no_input_timeout=5000)\n self.assertEqual(\n frame,\n r'{\"command\":\"RECOGNIZE\",\"request_id\":2,\"channel_id\":\"testuie46e4ui6\",\"headers\":{\"recognition_mode\":\"normal\",\"content_type\":\"text/uri-list\",\"start_input_timers\":true,\"no_input_timeout\":5000},\"body\":\"session:parcel_num\"}',\n )\n\n def test_recognition_in_progress(self):\n client = Recognizer()\n client.receive(session_opened)\n client.receive(recognition_in_progress)\n # Can't have multiple recognition processes\n with self.assertRaises(ProtocolError):\n client.recognize(\"builtin:speech/transcribe\")\n\n # Only stop and start-input-timers are valid\n with self.assertRaises(ProtocolError):\n client.get_params()\n\n self.assertEqual(\n client.start_input_timers(),\n r'{\"command\":\"START-INPUT-TIMERS\",\"request_id\":1,\"channel_id\":\"testuie46e4ui6\",\"headers\":{},\"body\":\"\"}',\n )\n\n self.assertEqual(\n client.stop(),\n r'{\"command\":\"STOP\",\"request_id\":2,\"channel_id\":\"testuie46e4ui6\",\"headers\":{},\"body\":\"\"}',\n )\n\n # we can stream too\n client.send_audio_chunk(b\"trntrntrntnrtsrn\")\n\n def test_set_params(self):\n client = Recognizer()\n client.receive(session_opened)\n self.assertEqual(\n client.set_params(speech_language=\"fr\", confidence_threshold=0.7),\n r'{\"command\":\"SET-PARAMS\",\"request_id\":1,\"channel_id\":\"testuie46e4ui6\",\"headers\":{\"speech_language\":\"fr\",\"confidence_threshold\":0.7},\"body\":\"\"}',\n )\n\n def test_recognition_complete(self):\n client = Recognizer()\n client.receive(session_opened)\n client.receive(recognition_in_progress)\n with self.assertRaises(ProtocolError):\n client.recognize(\"builtin:speech/transcribe\")\n client.receive(recognition_complete)\n # Now we can start another recognition process\n client.recognize(\"builtin:speech/transcribe\")\n","repo_name":"uhlive/python-sdk","sub_path":"tests/test_recog_connection.py","file_name":"test_recog_connection.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"69967847875","text":"state = []\nhash_len=50\nfor i in range(0,hash_len):\n\tstate.append(0)\ndef round():\n\tfor i in range(0,hash_len):\n\t\tstate[i]=(state[i]+state[(i+1)%hash_len])%128\ndef absorb(message):\n\tmessage_list = list(message)\n\tmessage_list_ints = []\n\tfor m in message_list:\n\t\tmessage_list_ints.append(ord(m))\n\tmessage_len = len(message_list_ints)\n\tfor i in range(hash_len):\n\t\tstate[i]=state[i]+message_list_ints[i%message_len]\ndef hash(message):\n\tabsorb(message)\n\tfor i in range(0,21):\n\t\tround()\n\thash_string = \"\"\n\tfor c in range(0,hash_len):\n\t\tadd_str = hex(state[c])[2:]\n\t\tif len(add_str) < 2:\n\t\t\tadd_str=\"0\"+add_str\n\t\thash_string+=add_str\n\tprint('0x' + hash_string)\n\treturn \"0x\"+hash_string\n#message = input(\"message>\")\n\n#hash(message)\n","repo_name":"eiharun/Personal-Projects","sub_path":"School/MCC/BadHash/BadHashCTF.py","file_name":"BadHashCTF.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35465558706","text":"# 给定一个大小为 n 的数组,找到其中的多数元素。多数元素是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。\n#\n# 你可以假设数组是非空的,并且给定的数组总是存在多数元素。\n\n# 使用一个计数器,遍历每个元素,当遇到其他元素使计数器-1,相同+1,为0更换新的元素\nfrom typing import List\nclass Solution:\n def majorityElement(self,nums:List[int]) ->int :\n count, majority = 1, nums[0]\n for num in nums[1:]:\n if count == 0:\n majority = num\n if num == majority:\n count+=1\n else:\n count -= 1\n return majority\n","repo_name":"Freddy5299/LeetCode","sub_path":"test_algorithm/169. Majority Element.py","file_name":"169. Majority Element.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15140467594","text":"from collections import defaultdict\nimport sqlite3\nconn = sqlite3.connect('stat.db')\nc = conn.cursor()\nf = open('markers.txt','w+')\nfor row in c.execute('select * from markers order by tag,count desc;'):\n f.write(row[0] +' & '+row[1]+' & '+str(row[2])+'\\\\\\\\')\n f.write('\\n\\\\hline\\n')\n\n# stats = defaultdict(int)\n\n# sentences = open('20171089.txt').read().split('\\n----------------------------------------\\n')\n# for sentence in sentences:\n# words = sentence.split('\\n')[1:-1]\n# for word in words:\n# attrs = word.split('\\t')\n# if(attrs[7]=='lwg__psp'):\n# head = words[int(attrs[6])-1]\n# head_attrs = head.split('\\t')\n# stats[(head_attrs[7],attrs[1])]+=1\n\n# print(stats)\n\n# for stat in stats:\n# c.execute(\"INSERT INTO markers VALUES('{tag}','{marker}',{count})\".format(tag=stat[0],marker=stat[1],count=stats[stat]))\n\nconn.commit()\nconn.close()","repo_name":"abhigyanghosh30/6th-Semester","sub_path":"Linguistics Data 2/Assignent2/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32472186011","text":"from __future__ import print_function\n\nimport random\n\nclass Maze():\n\tdef __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.walls_left = [[False for x in range(width + 1)] for y in range(height)]\n\t\tself.walls_up = [[False for x in range(width)] for y in range(height + 1)]\n\n\tdef create_bounds(self):\n\t\tfor y in range(self.height):\n\t\t\tself.walls_left[y][0] = True\n\t\t\tself.walls_left[y][self.width] = True\n\t\tfor x in range(self.width):\n\t\t\tself.walls_up[0][x] = True\n\t\t\tself.walls_up[self.height][x] = True\n\n\tdef render_text(self):\n\t\tprint('')\n\t\tfor y in range(self.height + 1):\n\t\t\t# Walls facing up\n\t\t\tfor x in range(self.width):\n\t\t\t\tprint('#', end='')\n\t\t\t\tif self.is_wall_up(x, y):\n\t\t\t\t\tprint('#', end='')\n\t\t\t\telse:\n\t\t\t\t\tprint(' ', end='')\n\t\t\tprint('#')\n\n\t\t\t# Walls facing left\n\t\t\tif y < self.height:\n\t\t\t\tfor _ in range(1):\n\t\t\t\t\tfor x in range(self.width + 1):\n\t\t\t\t\t\tif self.is_wall_left(x, y):\n\t\t\t\t\t\t\tprint('#', end='')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint(' ', end='')\n\t\t\t\t\t\tif x < self.width:\n\t\t\t\t\t\t\tprint(' ', end='')\n\t\t\t\t\tprint('')\n\t\tprint('')\n\n\tdef is_wall_up(self, x, y):\n\t\treturn self.walls_up[y][x]\n\n\tdef is_wall_down(self, x, y):\n\t\treturn self.walls_up[y + 1][x]\n\n\tdef is_wall_left(self, x, y):\n\t\treturn self.walls_left[y][x]\n\n\tdef is_wall_right(self, x, y):\n\t\treturn self.walls_left[y][x + 1]\n\n\tdef is_wall_up_c(self, c):\n\t\treturn self.is_wall_up(c[0], c[1])\n\n\tdef is_wall_down_c(self, c):\n\t\treturn self.is_wall_down(c[0], c[1])\n\n\tdef is_wall_left_c(self, c):\n\t\treturn self.is_wall_left(c[0], c[1])\n\n\tdef is_wall_right_c(self, c):\n\t\treturn self.is_wall_right(c[0], c[1])\n\n\tdef get_walls(self, x, y):\n\t\treturn (self.is_wall_up(x, y), self.is_wall_left(x, y), self.is_wall_down(x, y), self.is_wall_right(x, y))\n\n\tdef get_walls_c(self, c):\n\t\treturn (self.is_wall_up_c(c), self.is_wall_left_c(c), self.is_wall_down_c(c), self.is_wall_right_c(c))\n\n\tdef get_neighbours(self, x, y):\n\t\tneighbours = []\n\t\tif y > 0 and not self.is_wall_up(x, y):\n\t\t\tneighbours.append((x, y - 1))\n\t\tif y + 1 < self.height and not self.is_wall_down(x, y):\n\t\t\tneighbours.append((x, y + 1))\n\t\tif x > 0 and not self.is_wall_left(x, y):\n\t\t\tneighbours.append((x - 1, y))\n\t\tif x + 1 < self.width and not self.is_wall_right(x, y):\n\t\t\tneighbours.append((x + 1, y))\n\t\treturn neighbours\n\n\tdef get_neighbours_c(self, c):\n\t\treturn self.get_neighbours(c[0], c[1])\n\n\tdef fully_connected(self):\n\t\t# Recursively follow paths that are unvisited, counting cells\n\t\tqueue = [(0, 0)]\n\t\tvisited = set(queue)\n\t\tcount = 1\n\n\t\twhile len(queue) > 0:\n\t\t\tcell = queue[0]\n\t\t\tqueue = queue[1:]\n\n\t\t\tfor n in self.get_neighbours_c(cell):\n\t\t\t\tif n not in visited:\n\t\t\t\t\tqueue.append(n)\n\t\t\t\t\tvisited.add(n)\n\t\t\t\t\tcount += 1\n\n\t\t\tif count > self.width * self.height:\n\t\t\t\tprint('INFINITE RECURSION DETECTED')\n\t\t\t\treturn false\n\n\t\treturn count == self.width * self.height\n\n\tdef test_add_wall(self, is_up, c, full_check=False):\n\t\tif is_up:\n\t\t\tif self.walls_up[c[1]][c[0]]:\n\t\t\t\treturn True\n\t\t\tc2 = (c[0], c[1] - 1)\n\t\t\tself.walls_up[c[1]][c[0]] = True\n\t\telse:\n\t\t\tif self.walls_left[c[1]][c[0]]:\n\t\t\t\treturn True\n\t\t\tc2 = (c[0] - 1, c[1])\n\t\t\tself.walls_left[c[1]][c[0]] = True\n\n\t\tviolated = self.get_walls_c(c).count(False) < 2 or self.get_walls_c(c2).count(False) < 2\n\t\tif full_check:\n\t\t\tviolated = violated or not self.fully_connected()\n\n\t\tif violated:\n\t\t\tif is_up:\n\t\t\t\tself.walls_up[c[1]][c[0]] = False\n\t\t\telse:\n\t\t\t\tself.walls_left[c[1]][c[0]] = False\n\t\t\treturn False\n\t\treturn True\n\ndef generate_braid_maze(width=10, height=10):\n\tmaze = Maze(width, height)\n\tmaze.create_bounds()\n\n\t# Remove all pole walls\n\tall_poles = set() # Poles are above and left of these cells\n\tpole_options = []\n\tfor y in range(1, maze.height):\n\t\tfor x in range(1, maze.width):\n\t\t\tall_poles.add((x, y))\n\t\t\tif x + 1 < maze.width and y + 1 < maze.height:\n\t\t\t\tpole_options.append((x, y))\n\trandom.shuffle(pole_options)\n\n\t# Quick, cheap pole removal, I think this creates a nicer texture\n\tfor pole in pole_options:\n\t\tif pole in all_poles:\n\t\t\tif random.random() < 0.5:\n\t\t\t\tif maze.test_add_wall(True, pole):\n\t\t\t\t\tall_poles.discard(pole)\n\t\t\t\t\tall_poles.discard((pole[0] + 1, pole[1]))\n\t\t\telse:\n\t\t\t\tif maze.test_add_wall(False, pole):\n\t\t\t\t\tall_poles.discard(pole)\n\t\t\t\t\tall_poles.discard((pole[0], pole[1] + 1))\n\n\t# Populate a list of all walls that can be added\n\twall_options = [] # Tuples of (wall is up?, (x, y)\n\tfor y in range(maze.height):\n\t\tfor x in range(maze.width):\n\t\t\tif y > 0:\n\t\t\t\twall_options.append((True, (x, y)))\n\t\t\tif x > 0:\n\t\t\t\twall_options.append((False, (x, y)))\n\trandom.shuffle(wall_options)\n\n\t# Add each wall that does not violate the maze principle\n\tfor wall in wall_options:\n\t\tif random.random() < 0.7: # Leave some corridors open\n\t\t\tmaze.test_add_wall(wall[0], wall[1], full_check=True)\n\n\treturn maze\n\nif __name__ == '__main__':\n\tgenerate_braid_maze(20, 20)","repo_name":"sgilhuly/mire","sub_path":"app/braid.py","file_name":"braid.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"30663072118","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\nimport utilities\n\n\ndef scrape_el_deber(url, soup):\n print('Scrapping in web {}'.format(url))\n data = {}\n data['type'] = 'article'\n data['source'] = url\n data['title'] = utilities.clean_soup(soup.find('h1'))\n data['text'] = ''\n for t in soup.find_all('span', style=\"mso-ansi-language:ES-MX\"):\n data['text'] = data['text'] + clean_soup(t) + ' '\n return json.dumps(data, ensure_ascii=False)","repo_name":"wirauzz/Web-Scrapping-VS","sub_path":"Scrappers/scrape_el_deber.py","file_name":"scrape_el_deber.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5009852146","text":"import copy\nimport math\nimport warnings\nfrom functools import partial\nfrom typing import Optional\nfrom typing import Union\n\nimport attr\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom pytorch_lightning.utilities import AttributeDict\nfrom torch.utils.data import DataLoader\n\nimport utils\nfrom batchrenorm import BatchRenorm1d\nfrom lars import LARS\nfrom model_params import ModelParams\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef get_mlp_normalization(hparams: ModelParams, prediction=False):\n normalization_str = hparams.mlp_normalization\n if prediction and hparams.prediction_mlp_normalization != \"same\":\n normalization_str = hparams.prediction_mlp_normalization\n\n if normalization_str is None:\n return None\n elif normalization_str == \"bn\":\n return partial(torch.nn.BatchNorm1d, num_features=hparams.mlp_hidden_dim)\n elif normalization_str == \"br\":\n return partial(BatchRenorm1d, num_features=hparams.mlp_hidden_dim)\n elif normalization_str == \"ln\":\n return partial(torch.nn.LayerNorm, normalized_shape=[hparams.mlp_hidden_dim])\n elif normalization_str == \"gn\":\n return partial(torch.nn.GroupNorm, num_channels=hparams.mlp_hidden_dim, num_groups=32)\n else:\n raise NotImplementedError(f\"mlp normalization {normalization_str} not implemented\")\n\n\nclass SelfSupervisedMethod(pl.LightningModule):\n model: torch.nn.Module\n dataset: utils.DatasetBase\n hparams: AttributeDict\n embedding_dim: Optional[int]\n\n def __init__(\n self,\n hparams: Union[ModelParams, dict, None] = None,\n **kwargs,\n ):\n super().__init__()\n\n if hparams is None:\n hparams = self.params(**kwargs)\n elif isinstance(hparams, dict):\n hparams = self.params(**hparams, **kwargs)\n\n if isinstance(self.hparams, AttributeDict):\n self.hparams.update(AttributeDict(attr.asdict(hparams)))\n else:\n self.hparams = AttributeDict(attr.asdict(hparams))\n\n # Check for configuration issues\n if (\n hparams.gather_keys_for_queue\n and not hparams.shuffle_batch_norm\n and not hparams.encoder_arch.startswith(\"ws_\")\n ):\n warnings.warn(\n \"Configuration suspicious: gather_keys_for_queue without shuffle_batch_norm or weight standardization\"\n )\n\n some_negative_examples = hparams.use_negative_examples_from_batch or hparams.use_negative_examples_from_queue\n if hparams.loss_type == \"ce\" and not some_negative_examples:\n warnings.warn(\"Configuration suspicious: cross entropy loss without negative examples\")\n\n # Create encoder model\n self.model = utils.get_encoder(hparams.encoder_arch, hparams.dataset_name)\n\n # Create dataset\n self.dataset = utils.get_moco_dataset(hparams)\n\n if hparams.use_lagging_model:\n # \"key\" function (no grad)\n self.lagging_model = copy.deepcopy(self.model)\n for param in self.lagging_model.parameters():\n param.requires_grad = False\n else:\n self.lagging_model = None\n\n self.projection_model = utils.MLP(\n hparams.embedding_dim,\n hparams.dim,\n hparams.mlp_hidden_dim,\n num_layers=hparams.projection_mlp_layers,\n normalization=get_mlp_normalization(hparams),\n weight_standardization=hparams.use_mlp_weight_standardization,\n )\n\n self.prediction_model = utils.MLP(\n hparams.dim,\n hparams.dim,\n hparams.mlp_hidden_dim,\n num_layers=hparams.prediction_mlp_layers,\n normalization=get_mlp_normalization(hparams, prediction=True),\n weight_standardization=hparams.use_mlp_weight_standardization,\n )\n\n if hparams.use_lagging_model:\n # \"key\" function (no grad)\n self.lagging_projection_model = copy.deepcopy(self.projection_model)\n for param in self.lagging_projection_model.parameters():\n param.requires_grad = False\n else:\n self.lagging_projection_model = None\n\n # this classifier is used to compute representation quality each epoch\n self.sklearn_classifier = LogisticRegression(max_iter=100, solver=\"liblinear\")\n\n if hparams.use_negative_examples_from_queue:\n # create the queue\n self.register_buffer(\"queue\", torch.randn(hparams.dim, hparams.K))\n self.queue = torch.nn.functional.normalize(self.queue, dim=0)\n self.register_buffer(\"queue_ptr\", torch.zeros(1, dtype=torch.long))\n else:\n self.queue = None\n\n def _get_embeddings(self, x):\n \"\"\"\n Input:\n im_q: a batch of query images\n im_k: a batch of key images\n Output:\n logits, targets\n \"\"\"\n bsz, nd, nc, nh, nw = x.shape\n assert nd == 2, \"second dimension should be the split image -- dims should be N2CHW\"\n im_q = x[:, 0].contiguous()\n im_k = x[:, 1].contiguous()\n\n # compute query features\n emb_q = self.model(im_q)\n q_projection = self.projection_model(emb_q)\n q = self.prediction_model(q_projection) # queries: NxC\n if self.hparams.use_lagging_model:\n # compute key features\n with torch.no_grad(): # no gradient to keys\n if self.hparams.shuffle_batch_norm:\n im_k, idx_unshuffle = utils.BatchShuffleDDP.shuffle(im_k)\n k = self.lagging_projection_model(self.lagging_model(im_k)) # keys: NxC\n if self.hparams.shuffle_batch_norm:\n k = utils.BatchShuffleDDP.unshuffle(k, idx_unshuffle)\n else:\n emb_k = self.model(im_k)\n k_projection = self.projection_model(emb_k)\n k = self.prediction_model(k_projection) # queries: NxC\n\n if self.hparams.use_unit_sphere_projection:\n q = torch.nn.functional.normalize(q, dim=1)\n k = torch.nn.functional.normalize(k, dim=1)\n\n return emb_q, q, k\n\n def _get_contrastive_predictions(self, q, k):\n if self.hparams.use_negative_examples_from_batch:\n logits = torch.mm(q, k.T)\n labels = torch.arange(0, q.shape[0], dtype=torch.long).to(logits.device)\n return logits, labels\n\n # compute logits\n # Einstein sum is more intuitive\n # positive logits: Nx1\n l_pos = torch.einsum(\"nc,nc->n\", [q, k]).unsqueeze(-1)\n\n if self.hparams.use_negative_examples_from_queue:\n # negative logits: NxK\n l_neg = torch.einsum(\"nc,ck->nk\", [q, self.queue.clone().detach()])\n logits = torch.cat([l_pos, l_neg], dim=1)\n else:\n logits = l_pos\n\n # labels: positive key indicators\n labels = torch.zeros(logits.shape[0], dtype=torch.long).to(logits.device)\n\n return logits, labels\n\n def _get_pos_neg_ip(self, emb_q, k):\n with torch.no_grad():\n z = self.projection_model(emb_q)\n z = torch.nn.functional.normalize(z, dim=1)\n ip = torch.mm(z, k.T)\n eye = torch.eye(z.shape[0]).to(z.device)\n pos_ip = (ip * eye).sum() / z.shape[0]\n neg_ip = (ip * (1 - eye)).sum() / (z.shape[0] * (z.shape[0] - 1))\n\n return pos_ip, neg_ip\n\n def _get_contrastive_loss(self, logits, labels):\n if self.hparams.loss_type == \"ce\":\n if self.hparams.use_eqco_margin:\n if self.hparams.use_negative_examples_from_batch:\n neg_factor = self.hparams.eqco_alpha / self.hparams.batch_size\n elif self.hparams.use_negative_examples_from_queue:\n neg_factor = self.hparams.eqco_alpha / self.hparams.K\n else:\n raise Exception(\"Must have negative examples for ce loss\")\n\n predictions = utils.log_softmax_with_factors(logits / self.hparams.T, neg_factor=neg_factor)\n return F.nll_loss(predictions, labels)\n\n return F.cross_entropy(logits / self.hparams.T, labels)\n\n new_labels = torch.zeros_like(logits)\n new_labels.scatter_(1, labels.unsqueeze(1), 1)\n if self.hparams.loss_type == \"bce\":\n return F.binary_cross_entropy_with_logits(logits / self.hparams.T, new_labels) * logits.shape[1]\n\n if self.hparams.loss_type == \"ip\":\n # inner product\n # negative sign for label=1 (maximize ip), positive sign for label=0 (minimize ip)\n inner_product = (1 - new_labels * 2) * logits\n return torch.mean((inner_product + 1).sum(dim=-1))\n\n raise NotImplementedError(f\"Loss function {self.hparams.loss_type} not implemented\")\n\n def _get_vicreg_loss(self, z_a, z_b, batch_idx):\n assert z_a.shape == z_b.shape and len(z_a.shape) == 2\n\n # invariance loss\n loss_inv = F.mse_loss(z_a, z_b)\n\n # variance loss\n std_z_a = torch.sqrt(z_a.var(dim=0) + self.hparams.variance_loss_epsilon)\n std_z_b = torch.sqrt(z_b.var(dim=0) + self.hparams.variance_loss_epsilon)\n loss_v_a = torch.mean(F.relu(1 - std_z_a))\n loss_v_b = torch.mean(F.relu(1 - std_z_b))\n loss_var = loss_v_a + loss_v_b\n\n # covariance loss\n N, D = z_a.shape\n z_a = z_a - z_a.mean(dim=0)\n z_b = z_b - z_b.mean(dim=0)\n cov_z_a = ((z_a.T @ z_a) / (N - 1)).square() # DxD\n cov_z_b = ((z_b.T @ z_b) / (N - 1)).square() # DxD\n loss_c_a = (cov_z_a.sum() - cov_z_a.diagonal().sum()) / D\n loss_c_b = (cov_z_b.sum() - cov_z_b.diagonal().sum()) / D\n loss_cov = loss_c_a + loss_c_b\n\n weighted_inv = loss_inv * self.hparams.invariance_loss_weight\n weighted_var = loss_var * self.hparams.variance_loss_weight\n weighted_cov = loss_cov * self.hparams.covariance_loss_weight\n\n loss = weighted_inv + weighted_var + weighted_cov\n\n return {\n \"loss\": loss,\n \"loss_invariance\": weighted_inv,\n \"loss_variance\": weighted_var,\n \"loss_covariance\": weighted_cov,\n }\n\n def forward(self, x):\n return self.model(x)\n\n def training_step(self, batch, batch_idx, optimizer_idx=None):\n all_params = list(self.model.parameters())\n x, class_labels = batch # batch is a tuple, we just want the image\n\n emb_q, q, k = self._get_embeddings(x)\n pos_ip, neg_ip = self._get_pos_neg_ip(emb_q, k)\n\n logits, labels = self._get_contrastive_predictions(q, k)\n if self.hparams.use_vicreg_loss:\n losses = self._get_vicreg_loss(q, k, batch_idx)\n contrastive_loss = losses[\"loss\"]\n else:\n losses = {}\n contrastive_loss = self._get_contrastive_loss(logits, labels)\n\n if self.hparams.use_both_augmentations_as_queries:\n x_flip = torch.flip(x, dims=[1])\n emb_q2, q2, k2 = self._get_embeddings(x_flip)\n logits2, labels2 = self._get_contrastive_predictions(q2, k2)\n\n pos_ip2, neg_ip2 = self._get_pos_neg_ip(emb_q2, k2)\n pos_ip = (pos_ip + pos_ip2) / 2\n neg_ip = (neg_ip + neg_ip2) / 2\n contrastive_loss += self._get_contrastive_loss(logits2, labels2)\n\n contrastive_loss = contrastive_loss.mean() * self.hparams.loss_constant_factor\n\n log_data = {\n \"step_train_loss\": contrastive_loss,\n \"step_pos_cos\": pos_ip,\n \"step_neg_cos\": neg_ip,\n **losses,\n }\n\n with torch.no_grad():\n self._momentum_update_key_encoder()\n\n some_negative_examples = (\n self.hparams.use_negative_examples_from_batch or self.hparams.use_negative_examples_from_queue\n )\n if some_negative_examples:\n acc1, acc5 = utils.calculate_accuracy(logits, labels, topk=(1, 5))\n log_data.update({\"step_train_acc1\": acc1, \"step_train_acc5\": acc5})\n\n # dequeue and enqueue\n if self.hparams.use_negative_examples_from_queue:\n self._dequeue_and_enqueue(k)\n\n self.log_dict(log_data)\n return {\"loss\": contrastive_loss}\n\n def validation_step(self, batch, batch_idx):\n x, class_labels = batch\n with torch.no_grad():\n emb = self.model(x)\n\n return {\"emb\": emb, \"labels\": class_labels}\n\n def validation_epoch_end(self, outputs):\n embeddings = torch.cat([x[\"emb\"] for x in outputs]).cpu().detach().numpy()\n labels = torch.cat([x[\"labels\"] for x in outputs]).cpu().detach().numpy()\n num_split_linear = embeddings.shape[0] // 2\n self.sklearn_classifier.fit(embeddings[:num_split_linear], labels[:num_split_linear])\n train_accuracy = self.sklearn_classifier.score(embeddings[:num_split_linear], labels[:num_split_linear]) * 100\n valid_accuracy = self.sklearn_classifier.score(embeddings[num_split_linear:], labels[num_split_linear:]) * 100\n\n log_data = {\n \"epoch\": self.current_epoch,\n \"train_class_acc\": train_accuracy,\n \"valid_class_acc\": valid_accuracy,\n \"T\": self._get_temp(),\n \"m\": self._get_m(),\n }\n print(f\"Epoch {self.current_epoch} accuracy: train: {train_accuracy:.1f}%, validation: {valid_accuracy:.1f}%\")\n self.log_dict(log_data)\n\n def configure_optimizers(self):\n # exclude bias and batch norm from LARS and weight decay\n regular_parameters = []\n regular_parameter_names = []\n excluded_parameters = []\n excluded_parameter_names = []\n for name, parameter in self.named_parameters():\n if parameter.requires_grad is False:\n continue\n if any(x in name for x in self.hparams.exclude_matching_parameters_from_lars):\n excluded_parameters.append(parameter)\n excluded_parameter_names.append(name)\n else:\n regular_parameters.append(parameter)\n regular_parameter_names.append(name)\n\n param_groups = [\n {\"params\": regular_parameters, \"names\": regular_parameter_names, \"use_lars\": True},\n {\n \"params\": excluded_parameters,\n \"names\": excluded_parameter_names,\n \"use_lars\": False,\n \"weight_decay\": 0,\n },\n ]\n if self.hparams.optimizer_name == \"sgd\":\n optimizer = torch.optim.SGD\n elif self.hparams.optimizer_name == \"lars\":\n optimizer = partial(LARS, warmup_epochs=self.hparams.lars_warmup_epochs, eta=self.hparams.lars_eta)\n else:\n raise NotImplementedError(f\"No such optimizer {self.hparams.optimizer_name}\")\n\n encoding_optimizer = optimizer(\n param_groups,\n lr=self.hparams.lr,\n momentum=self.hparams.momentum,\n weight_decay=self.hparams.weight_decay,\n )\n self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n encoding_optimizer,\n self.hparams.max_epochs,\n eta_min=self.hparams.final_lr_schedule_value,\n )\n return [encoding_optimizer], [self.lr_scheduler]\n\n def _get_m(self):\n if self.hparams.use_momentum_schedule is False:\n return self.hparams.m\n return 1 - (1 - self.hparams.m) * (math.cos(math.pi * self.current_epoch / self.hparams.max_epochs) + 1) / 2\n\n def _get_temp(self):\n return self.hparams.T\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n \"\"\"\n Momentum update of the key encoder\n \"\"\"\n if not self.hparams.use_lagging_model:\n return\n m = self._get_m()\n for param_q, param_k in zip(self.model.parameters(), self.lagging_model.parameters()):\n param_k.data = param_k.data * m + param_q.data * (1.0 - m)\n for param_q, param_k in zip(self.projection_model.parameters(), self.lagging_projection_model.parameters()):\n param_k.data = param_k.data * m + param_q.data * (1.0 - m)\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, keys):\n # gather keys before updating queue\n if self.hparams.gather_keys_for_queue:\n keys = utils.concat_all_gather(keys)\n\n batch_size = keys.shape[0]\n\n ptr = int(self.queue_ptr)\n assert self.hparams.K % batch_size == 0 # for simplicity\n\n # replace the keys at ptr (dequeue and enqueue)\n self.queue[:, ptr : ptr + batch_size] = keys.T\n ptr = (ptr + batch_size) % self.hparams.K # move pointer\n\n self.queue_ptr[0] = ptr\n\n def prepare_data(self) -> None:\n self.dataset.get_train()\n self.dataset.get_validation()\n\n def train_dataloader(self):\n return DataLoader(\n self.dataset.get_train(),\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_data_workers,\n pin_memory=self.hparams.pin_data_memory,\n drop_last=self.hparams.drop_last_batch,\n shuffle=True,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.dataset.get_validation(),\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_data_workers,\n pin_memory=self.hparams.pin_data_memory,\n drop_last=self.hparams.drop_last_batch,\n )\n\n @classmethod\n def params(cls, **kwargs) -> ModelParams:\n return ModelParams(**kwargs)\n","repo_name":"untitled-ai/self_supervised","sub_path":"moco.py","file_name":"moco.py","file_ext":"py","file_size_in_byte":17614,"program_lang":"python","lang":"en","doc_type":"code","stars":496,"dataset":"github-code","pt":"61"} +{"seq_id":"27271799940","text":"#직각삼각형\nwhile(1):\n list_=list(map(int,input().split())) #리스트 입력받기\n list_.sort() # 정렬\n if(list_[0]==0 and list_[1]==0 and list_[2]==0): #0입력 받으면\n break\n #피타고라스 정리\n if(list_[2]*list_[2]==list_[0]*list_[0]+list_[1]*list_[1]):\n print(\"right\")\n else:\n print(\"wrong\")","repo_name":"mseo39/python","sub_path":"python_algorithm/step9/4153.py","file_name":"4153.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37959837062","text":"# 直接生成标准的运动数据\nimport os,json\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\n\ndef createBaseLine():\n x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]\n y = [80, 90, 130, 160, 144, 139, 134, 140, 133, 142, 129, 135, 140, 138, 132, 138, 142, 133, 132, 138, 142, 137,\n 133, 138, 142, 134, 139]\n x = np.array(x).reshape([len(x), 1])\n y = np.array(y)\n\n degree = 6\n poly_reg = PolynomialFeatures(degree=degree)\n x_poly = poly_reg.fit_transform(x)\n lin_reg2 = LinearRegression()\n\n lin_reg2.fit(x_poly, y)\n return lin_reg2,len(x)\n\n\ndef createDeclineHeartData():\n x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ]\n y = [138, 126, 112, 99, 93, 92, 89, 101, 123, 133, 138]\n x = np.array(x).reshape([len(x), 1])\n y = np.array(y)\n\n degree = 6\n poly_reg = PolynomialFeatures(degree=degree)\n x_poly = poly_reg.fit_transform(x)\n lin_reg2 = LinearRegression()\n\n lin_reg2.fit(x_poly, y)\n return lin_reg2,len(x)\n\n\ndef createData(uid=\"10001\",prescription=\"10001\",startTime=\"2020-06-30 18:12\"):\n baseLine,xRange = createBaseLine()\n declineLine,xRange1 = createDeclineHeartData()\n data = []\n reg = PolynomialFeatures(degree=6)\n for i in range(xRange):\n y = baseLine.predict(reg.fit_transform(np.array([i]).reshape([1,1])))\n try:\n y = y.tolist()[0]\n data.append(y)\n except:\n pass\n start = 6\n x = start\n end = 26\n i = 0\n while(i<20000):\n y = baseLine.predict(reg.fit_transform(np.array([x]).reshape([1, 1])))\n try:\n y = y.tolist()[0]\n data.append(y)\n except:\n pass\n i += 1\n x += 1\n if x>end:\n x = start\n return {\n \"uid\":uid,\n \"prescription\":prescription,\n \"startTime\":startTime,\n \"data\":data,\n }\n\n\n\n\n\nif __name__ == '__main__':\n os.chdir(\"../\")\n data = createData()\n path = \"./DataBase/standard.json\"\n with open(path,\"w\") as f:\n f.write(json.dumps(data))","repo_name":"lxy1492/SportsPrescription","sub_path":"SportsData/standardHeartRateData.py","file_name":"standardHeartRateData.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6810430677","text":"\nimport arcade\n\n\ndef diagnose_name_gender_attraction_health(fish_list, info_list):\n # Funtions som skriver ut fiskarnas status\n # Informationen ligger lagrad i \"info_list\"\n if len(info_list) < len(fish_list):\n list_length = len(info_list)\n else:\n list_length = len(fish_list)\n for i in range(list_length):\n x = fish_list[i].center_x\n y = fish_list[i].center_y\n arcade.draw_text(str(info_list[i][0]) + \" \" + str(info_list[i][1]), x, y + 24, arcade.color.BLACK, 18)\n arcade.draw_text(str(info_list[i][2]), x, y, arcade.color.BLACK, 18)\n arcade.draw_text(str(info_list[i][3]), x, y, arcade.color.BLACK, 18, anchor_x=\"left\", anchor_y=\"top\")\n","repo_name":"owlnical/fc-aqua-fish","sub_path":"functions/diagnose_name_gender_attraction_health.py","file_name":"diagnose_name_gender_attraction_health.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"28127494253","text":"import telebot\nfrom telebot import types\nimport time\nimport btns\nimport get_airtable\nimport strings\n\n\n\n\ntime_out={}\nbot = telebot.TeleBot('5885884113:AAE_TcJavu1iWb1NTF3EZ7g-2iuMEWBm6gY') # this is test\n#bot = telebot.TeleBot('6148758021:AAGl4BGfSnTuwyKoDUKRP3UJlPIy3Pv2OqM') # this is prod\n@bot.message_handler(commands=[\"start\"])\ndef Start(message):\n main_menu(message, True)\n get_airtable.add_user_id(message.chat.id, message.from_user.username)\n@bot.message_handler(content_types=[\"text\"])\ndef handle_text(message):\n\n # user's func\n if message.text== btns.take_place.text:\n take_place_st_1(message)\n menu_time_out(message)\n if message.text== btns.my_reg.text:\n pass\n\n\n # my func\n if message.text== btns.test.text and message.chat.id==214130351:\n pass\n if message.text== btns.ping.text and message.chat.id==214130351:\n bot.send_message(message.chat.id, text=btns.ping.text)\n\ndef menu_time_out(message):\n time_out[message.chat.id]=1\n time.sleep(600)\n if time_out[message.chat.id]==1:\n main_menu(message, 2)\n time_out[message.chat.id]=0\ndef main_menu(message, reason): # 0-ending process, 1-first time, 2-timeout\n bot.clear_step_handler_by_chat_id(message.chat.id)\n time_out[message.chat.id] = 0\n if message.chat.id==214130351:\n markup= btns.my_main_menu_markup\n else:\n markup= btns.user_main_menu_markup\n if reason==1:\n text= strings.hello_text\n elif reason==2:\n text=strings.time_out\n else:\n text= strings.main_menu_text\n bot.send_message(message.chat.id, text=text, reply_markup=markup)\n\n\n\n\ndef take_place_st_1(message):\n if message.from_user.username==None:\n bot.send_message(message.chat.id, text=strings.need_dog)\n else:\n ev_dickt= get_airtable.get_open_for_reg_event_dickt()\n if ev_dickt=={}:\n bot.send_message(message.chat.id, text=strings.there_is_no_reg_events)\n main_menu(message, 0)\n else:\n markup=types.ReplyKeyboardMarkup(resize_keyboard=True)\n for i in range(len(ev_dickt)):\n markup.add(types.KeyboardButton(list(ev_dickt.keys())[i]))\n markup.add(btns.back)\n send=bot.send_message(message.chat.id, text=strings.what_ev_for_take_place, reply_markup=markup)\n bot.register_next_step_handler(send, take_place_st_2, ev_dickt)\n\ndef take_place_st_2(message, ev_dickt):\n if message.text== btns.back.text:\n main_menu(message, 0)\n elif message.text in list(ev_dickt.keys()):\n ev_id=ev_dickt[message.text]\n ev_name=message.text\n user_name= get_airtable.get_username(message.from_user.username)\n if user_name==None:\n markup=types.ReplyKeyboardMarkup(resize_keyboard=True)\n markup.add(btns.back, btns.skip)\n send=bot.send_message(message.chat.id, text=strings.whats_ur_name, reply_markup=markup)\n bot.register_next_step_handler(send, take_place_st_3, ev_id, ev_name)\n else:\n bot.send_message(message.chat.id, text=strings.wait)\n take_place_st_4(message, ev_id, ev_name, user_name, first_time=0)\n else:\n send=bot.send_message(message.chat.id, text=strings.didnt_get_it)\n bot.register_next_step_handler(message, take_place_st_2, ev_dickt)\n\ndef take_place_st_3(message, ev_id, ev_name):\n if message.text== btns.back.text:\n main_menu(message, 0)\n elif message.text== btns.skip.text or message.text==None:\n user_name= strings.didnt_whant_show_name\n elif message.text!=None:\n user_name=message.text\n\n bot.send_message(message.chat.id, text=strings.wait)\n take_place_st_4(message, ev_id, ev_name, user_name, first_time=1)\n\ndef take_place_st_4(message, ev_id, ev_name, user_name, first_time):\n user_nick=message.from_user.username\n get_airtable.write_in_reg(user_nick, user_name, ev_id)\n if user_name!= strings.didnt_whant_show_name and first_time==1:\n get_airtable.write_in_members(user_nick, user_name)\n bot.send_message(message.chat.id, text=strings.registration_done + ev_name)\n main_menu(message, 0)\n time_out[message.chat.id]=0\n\n\n\nbot.infinity_polling(timeout=30, long_polling_timeout=15)","repo_name":"Shkoterman/HUD_ZUR","sub_path":"V1.0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41822870741","text":"# Imports\n\nimport pandas as pd\nimport supervisedML as SML\n\n# Load Data\n\ndf = pd.read_csv(r'train.csv')\n\n# Clean data\n\ndf.isna().sum().sort_values()\n\ndf = df.dropna(subset=['Embarked'])\ndf = df.replace(regex={'female': 0, 'male': 1})\ndf = df.replace(regex={'S': 0, 'C': 1, 'Q': 2})\n\ndf_train = df[:600]\ndf_test = df[601:]\n\nfeatures = ['PassengerId', 'Pclass', 'Fare', 'Parch', 'SibSp', 'Sex', 'Embarked']\ntarget = df_train['Survived']\n\nif __name__ == \"__main__\":\n sml = SML.supervisedML(df_train, features, target, df_test)\n\n prediction = sml.makePrediction()\n\n # evaluate\n\n df_test_evaluate = df_test.copy()\n df_test_evaluate['pred'] = prediction\n df_test_evaluate['diff'] = df_test_evaluate['Survived'] - df_test_evaluate['pred']\n\n wrong_pred = len(df_test_evaluate.loc[df_test_evaluate['diff'] != 0])\n total_length_test = len(df_test_evaluate)\n print(f\"{wrong_pred} out of {total_length_test} were wrong predicted\\n-->{(wrong_pred / total_length_test)}%\")\n","repo_name":"GeorgKaltenbrunner/supervisedML","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1600244870","text":"import csv\nimport glob\nimport statistics\n\nimport pandas as pd\nimport numpy as np\nimport numpy.linalg as la\nimport os.path\nimport cv2\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\n\n# from local_test.yolov5 import detect\n# from PROJET.YOLOPv2 import demo\nEPSILON = 1e-10\n\n\ndef get_axes_of_a_view(view):\n x_axis = view['x-axis']\n y_axis = view['y-axis']\n\n x_axis_norm = la.norm(x_axis)\n y_axis_norm = la.norm(y_axis)\n\n if (x_axis_norm < EPSILON or y_axis_norm < EPSILON):\n raise ValueError(\"Norm of input vector(s) too small.\")\n\n # normalize the axes\n x_axis = x_axis / x_axis_norm\n y_axis = y_axis / y_axis_norm\n\n # make a new y-axis which lies in the original x-y plane, but is orthogonal to x-axis\n y_axis = y_axis - x_axis * np.dot(y_axis, x_axis)\n\n # create orthogonal z-axis\n z_axis = np.cross(x_axis, y_axis)\n\n # calculate and check y-axis and z-axis norms\n y_axis_norm = la.norm(y_axis)\n z_axis_norm = la.norm(z_axis)\n\n if (y_axis_norm < EPSILON) or (z_axis_norm < EPSILON):\n raise ValueError(\"Norm of view axis vector(s) too small.\")\n\n # make x/y/z-axes orthonormal\n y_axis = y_axis / y_axis_norm\n z_axis = z_axis / z_axis_norm\n\n return x_axis, y_axis, z_axis\n\n\ndef get_origin_of_a_view(view):\n return view['origin']\n\n\ndef get_transform_to_global(view):\n # get axes\n x_axis, y_axis, z_axis = get_axes_of_a_view(view)\n\n # get origin\n origin = get_origin_of_a_view(view)\n transform_to_global = np.eye(4)\n\n # rotation\n transform_to_global[0:3, 0] = x_axis\n transform_to_global[0:3, 1] = y_axis\n transform_to_global[0:3, 2] = z_axis\n\n # origin\n transform_to_global[0:3, 3] = origin\n\n return transform_to_global\n\n\ndef get_transform_from_global(view):\n # get transform to global\n transform_to_global = get_transform_to_global(view)\n trans = np.eye(4)\n rot = np.transpose(transform_to_global[0:3, 0:3])\n trans[0:3, 0:3] = rot\n trans[0:3, 3] = np.dot(rot, -transform_to_global[0:3, 3])\n\n return trans\n\n\ndef transform_from_to(src, target):\n transform = np.dot(get_transform_from_global(target), \\\n get_transform_to_global(src))\n\n return transform\n\n\ndef project_lidar_from_to(lidar, src_view, target_view):\n lidar = dict(lidar)\n trans = transform_from_to(src_view, target_view)\n points = lidar['points']\n points_hom = np.ones((points.shape[0], 4))\n points_hom[:, 0:3] = points\n points_trans = (np.dot(trans, points_hom.T)).T\n lidar['points'] = points_trans[:, 0:3]\n\n return lidar\n\n\ndef extract_image_file_name_from_lidar_file_name(path_lidar):\n replace_lidar = lambda x: x if x != 'lidar' else 'camera'\n path_lidar = path_lidar.split('/')\n path_lidar = list(map(replace_lidar, path_lidar))\n file_name_image = path_lidar[-1].split('.')[0]\n file_name_image = file_name_image.split('_')\n file_name_image = file_name_image[0] + '_' + \\\n 'camera_' + \\\n file_name_image[2] + '_' + \\\n file_name_image[3] + '.png'\n return os.path.join(\"/\".join(path_lidar[:-1] + [file_name_image]))\n\n\ndef extract_semantic_file_name_from_image_file_name(file_name_image):\n file_name_semantic_label = file_name_image.split('/')\n file_name_semantic_label = file_name_semantic_label[-1].split('.')[0]\n file_name_semantic_label = file_name_semantic_label.split('_')\n file_name_semantic_label = file_name_semantic_label[0] + '_' + \\\n 'label_' + \\\n file_name_semantic_label[2] + '_' + \\\n file_name_semantic_label[3] + '.png'\n\n return file_name_semantic_label\n\ndef extract_json_file_name_from_lidar_file_name(path_lidar):\n replace_lidar = lambda x: x if x != 'lidar' else 'camera'\n path_lidar = path_lidar.split('/')\n path_lidar = list(map(replace_lidar, path_lidar))\n file_name_json = path_lidar[-1].split('.')[0]\n file_name_json = file_name_json.split('_')\n file_name_json = file_name_json[0] + '_' + \\\n 'camera_' + \\\n file_name_json[2] + '_' + \\\n file_name_json[3] + '.json'\n return os.path.join(\"/\".join(path_lidar[:-1] + [file_name_json]))\ndef undistort_image(image, cam_name, config):\n if cam_name in ['front_left', 'front_center', \\\n 'front_right', 'side_left', \\\n 'side_right', 'rear_center']:\n # get parameters from config file\n intr_mat_undist = \\\n np.asarray(config['cameras'][cam_name]['CamMatrix'])\n intr_mat_dist = \\\n np.asarray(config['cameras'][cam_name]['CamMatrixOriginal'])\n dist_parms = \\\n np.asarray(config['cameras'][cam_name]['Distortion'])\n lens = config['cameras'][cam_name]['Lens']\n\n if (lens == 'Fisheye'):\n return cv2.fisheye.undistortImage(image, intr_mat_dist, \\\n D=dist_parms, Knew=intr_mat_undist)\n elif (lens == 'Telecam'):\n return cv2.undistort(image, intr_mat_dist, \\\n distCoeffs=dist_parms, newCameraMatrix=intr_mat_undist)\n else:\n return image\n else:\n return image\n\n\ndef map_lidar_points_onto_image(image_orig, lidar, pixel_size=3, pixel_opacity=1):\n image = np.copy(image_orig)\n\n # get rows and cols\n rows = (lidar['row'] + 0.5).astype(np.int)\n cols = (lidar['col'] + 0.5).astype(np.int)\n\n # lowest distance values to be accounted for in colour code\n MIN_DISTANCE = np.min(lidar['distance'])\n # largest distance values to be accounted for in colour code\n MAX_DISTANCE = np.max(lidar['distance'])\n\n # get distances\n distances = lidar['distance']\n # determine point colours from distance\n colours = (distances - MIN_DISTANCE) / (MAX_DISTANCE - MIN_DISTANCE)\n colours = np.asarray([np.asarray(hsv_to_rgb(0.75 * c, \\\n np.sqrt(pixel_opacity), 1.0)) for c in colours])\n pixel_rowoffs = np.indices([pixel_size, pixel_size])[0] - pixel_size // 2\n pixel_coloffs = np.indices([pixel_size, pixel_size])[1] - pixel_size // 2\n canvas_rows = image.shape[0]\n canvas_cols = image.shape[1]\n for i in range(len(rows)):\n pixel_rows = np.clip(rows[i] + pixel_rowoffs, 0, canvas_rows - 1)\n pixel_cols = np.clip(cols[i] + pixel_coloffs, 0, canvas_cols - 1)\n image[pixel_rows, pixel_cols, :] = \\\n (1. - pixel_opacity) * \\\n np.multiply(image[pixel_rows, pixel_cols, :], \\\n colours[i]) + pixel_opacity * 255 * colours[i]\n return image.astype(np.uint8)\n\ndef planeFit(points):\n import numpy as np\n try:\n points = np.reshape(points, (np.shape(points)[0], -1)).T\n except ValueError:\n raise ValueError(\"Error: Unable to reshape array.\")\n # Handle the error condition appropriately or re-raise the exception\n\n assert points.shape[0] <= points.shape[1], \"There are only {} points in {} dimensions.\".format(points.shape[1],\n points.shape[0])\n ctr = points.mean(axis=1)\n x = points - ctr[:, np.newaxis]\n M = np.dot(x, x.T)\n return ctr, np.linalg.svd(M)[0][:, -1]\n\n\ndef projectPoints(points, camMtx, dist):\n pose_to_cam_coord_transform = np.array([[0, -1, 0], [0, 0, -1], [1, 0, 0]])\n pixel_coords, _ = cv2.projectPoints(pose_to_cam_coord_transform.dot(points.T).T,\n np.asarray([[0, 0, 0]], dtype=np.float64),\n np.asarray([[0, 0, 0]], dtype=np.float64),\n np.asarray(camMtx),\n np.asarray(dist))\n pixel_coords = pixel_coords.squeeze()\n return pixel_coords\n\n\ndef join_txt(list_file: list):\n # create a uniq txt file from a list a txt files\n prediction_file = '/home/sa13291/Documents/ARTHUR_LAMARD/3d_projection/prediction/results_label.txt'\n with open(prediction_file, 'w') as outfile:\n for fname in list_file:\n with open(fname) as infile:\n for line in infile:\n outfile.write(line)\n return prediction_file\n\n\ndef get_bboxes_coords(image):\n # get the normal coordinates of a bbox using the yolo format\n\n # create the prediction folder if it doesn't exist\n label_path_file = '/home/sa13291/Documents/ARTHUR_LAMARD/3d_projection/prediction'\n if not os.path.exists(label_path_file):\n os.mkdir(label_path_file)\n\n list_of_file = glob.glob(str(label_path_file + '*/*/*/labels/'))\n\n '''\n Auto incrementation deactivate for the exp file -> check yolo general.py if you want to reactivate\n '''\n\n final_file_list = []\n\n # try to see if any signs/traffic lights or build detected in the images\n # read the txt file created by the yolo model if they exits\n try:\n latest_file_signs = glob.glob(str(list_of_file[0] + '*.txt'))\n latest_file_s = max(latest_file_signs, key=os.path.getctime)\n latest_file_s = change_class(latest_file_s, '2')\n if os.path.exists(str(latest_file_s)):\n file_signs_path = str(latest_file_s)\n final_file_list.append(str(file_signs_path))\n except:\n print(\"INFORMATION : No traffic sign detected\")\n pass\n\n try:\n latest_file_lane = glob.glob(str(list_of_file[2] + '*.txt'))\n latest_file_la = max(latest_file_lane, key=os.path.getctime)\n if os.path.exists(str(latest_file_la)):\n file_line_path = str(latest_file_la)\n final_file_list.append(file_line_path)\n except:\n print(\"INFORMATION : No build detected\")\n pass\n try:\n latest_file_lights = glob.glob(str(list_of_file[1] + '*.txt'))\n latest_file_l = max(latest_file_lights, key=os.path.getctime)\n latest_file_l = change_class(latest_file_l, '1')\n if os.path.exists(str(latest_file_l)):\n file_line_path = str(latest_file_l)\n final_file_list.append(file_line_path)\n except:\n print(\"INFORMATION : No traffic lights detected\")\n pass\n\n # create a single txt file from all the detection txt files\n latest_file = join_txt(final_file_list)\n png_name = os.path.basename(os.path.normpath(image))\n txt_path = str(png_name)\n final_extension = txt_path.replace('.png', '.txt')\n final_path = latest_file\n yolo_bbox = final_path\n\n img = cv2.imread(image)\n dh, dw, _ = img.shape\n\n fl = open(yolo_bbox, 'r')\n data = fl.readlines()\n fl.close()\n\n final_coord_list = []\n conf_list = []\n class_list = []\n\n # convert the yolo coordinates to a normal format\n for dt in data:\n # Split string to float\n class_val, x, y, w, h, conf = map(float, dt.split(' '))\n l = int((x - w / 2) * dw)\n r = int((x + w / 2) * dw)\n t = int((y - h / 2) * dh)\n b = int((y + h / 2) * dh)\n\n if l < 0:\n l = 0\n if r > dw - 1:\n r = dw - 1\n if t < 0:\n t = 0\n if b > dh - 1:\n b = dh - 1\n\n cv2.rectangle(img, (l, t), (r, b), (0, 0, 255), 10)\n final_coord = ([l, t], [r, b])\n final_coord_list.append(final_coord)\n conf_list.append(conf)\n class_list.append(class_val)\n return class_list, final_coord_list, conf_list\n\n\ndef file_writer(class_build,truck, coords, conf, corners,cam_stamp):\n # write all the usefull data in a csv file\n projet_path = \"/home/sa13291/Documents/ARTHUR_LAMARD/3d_projection/\"\n projet_prediction_path = glob.glob(str(projet_path + 'prediction/'))\n file = open(f'{projet_prediction_path[0]}results_pipeline.csv', 'a')\n file.write(str(class_build)+\",\"+str(truck)+\",\"+str(coords)+\",\"+str(conf)+\",\"+str(corners)+\",\"+str(cam_stamp)+'\\n')\n file.close()\n\ndef change_class(file, replacement_value):\n # function made to change the class of a detetection by the remplacement_value choosen by the user\n # used to have the same class number for each detection\n newdata = []\n fl = open(file, 'r')\n data = fl.readlines()\n fl.close()\n\n for i in range(len(data)):\n data[i].split(' ')[0] = replacement_value\n Data = str(replacement_value + ' ' + data[i].split(' ')[1] + ' '+ data[i].split(' ')[2] + ' ' + data[i].split(' ')[3] + ' ' + data[i].split(' ')[4] + ' ' + data[i].split(' ')[5])\n newdata.append(Data)\n f = open(file, 'w')\n for i in range(len(newdata)):\n f.write(newdata[i])\n f.close()\n return file\n\n\ndef anomaly_detection():\n list_class = []\n list_conf = []\n list_coords = []\n list_time = []\n list_build = []\n list_sign = []\n list_light = []\n list_corner = []\n average = []\n projet_path = \"/home/sa13291/Documents/ARTHUR_LAMARD/3d_projection/\"\n projet_prediction_path = glob.glob(str(projet_path + 'prediction/'))\n read_file = ((f'{projet_prediction_path[0]}results_pipeline.csv'))\n with open(read_file) as f:\n reader = csv.reader(f)\n for row in reader:\n # appends in list each element to sepearate them from the original list\n list_conf.append(row[len(row)-3])\n list_class.append(row[0])\n list_coords.append([row[5],row[6],row[7]])\n list_time.append(row[-1])\n list_corner.append(row[len(row)-2])\n\n # get a list of all the specific element\n if row[0] == '1.0':\n list_light.append(row)\n if row[0] == '2.0':\n list_sign.append(row)\n if row[0] == '3.0':\n list_build.append(row)\n paires = list(zip(list_build, list_build[1:] + list_build[:1]))\n\n\n #print(\"paires : \", paires)\n # start recording if the conf is too low\n for i in range(len(list_conf)):\n if list_conf[i]<=str(0.25):\n print(\"START RECORDING_conf\")\n\n # start recording id the number of point in the road mask is +/-3 points from the average point of the pipeline\n # it indicates a special case\n for i in (list_corner):\n digit = int(i)\n average.append(digit)\n mean = statistics.mean(average)\n if (int(i)) >= mean+3 or (int(i))<=mean-3:\n print(\"START RECORDING_corners\")\n print(\"list digits : \", average)\n print(\"average point : \", statistics.mean(average))\n\n\n\n\nif __name__ == \"__main__\":\n file_name_lidar = \"/Users/arthurlamard/Documents/Allemagne/cours/AI-PROJECT-SMART_RECORDING_PIPELINE/PROJET/camera_lidar_semantic_bboxes/test/20181204_170238/lidar/cam_front_center/20181204170238_lidar_frontcenter_000036276.npz\"\n file_name_image = extract_image_file_name_from_lidar_file_name(file_name_lidar)\n # print(file_name_image)\n\n get_bboxes_coords(file_name_image)\n","repo_name":"saed13/AI-PROJECT-SMART_RECORDING_PIPELINE","sub_path":"3d_projection/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":14909,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"40009789217","text":"#!/usr/bin/python3\n\n\"\"\"\nreturns a list containing the titles of all hot articles\n\"\"\"\n\nimport requests\n\n\ndef recurse(subreddit, hot_list=[]):\n \"\"\"\n recursive function that queries the Reddit API and returns a list\n containing the titles of all hot articles for a given subreddit.\n If no results are found for the given subreddit,\n the function should return None.\n \"\"\"\n url = 'https://www.reddit.com/r/{}/top.json'.format(subreddit)\n headers = {'User-Agent': 'Mozilla/5.0'}\n response = requests.get(url, headers=headers, allow_redirects=False)\n\n if response.status_code == 200:\n my_data = response.json().get(\"data\").get(\"after\")\n if my_data is not None:\n after = my_data\n recurse(subreddit, hot_list)\n titles = response.json().get(\"data\").get(\"children\")\n for tittle in titles:\n hot_list.append(tittle.get(\"data\").get(\"title\"))\n return hot_list\n else:\n return (None)\n","repo_name":"Trefania/alx-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29611354750","text":"\"\"\" Data generation \"\"\"\nimport datetime\nimport random\nstart_date = datetime.date(2010, 1, 1)\nend_date = datetime.date(2022,2,1)\n\nwith open('data.csv', 'w') as fp:\n fp.writelines([\"Date,Ville,Température,Humidité\"])\n for i in range(1000):\n date = (start_date + datetime.timedelta(\n days=random.randrange((end_date - start_date).days))\n ).strftime('%Y-%m-%d')\n temp = random.randint(-10, 36)\n moisture = random.randint(0, 100)\n city = random.choice([\"Yverdon\", \"Lausanne\", \"Geneve\", \"Berne\"])\n fp.write(f\"{date},{temp},{moisture},{city}\\n\")\n","repo_name":"gaetan-worch/diary","sub_path":"2/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23256377694","text":"import requests \nfrom bs4 import BeautifulSoup\n\n# 내 서비스키를 이용해서 만든 예제 코드 \nurl = 'https://apis.data.go.kr/1160100/service/GetStockSecuritiesInfoService/getStockPriceInfo?serviceKey=7JlKxM7fEbOErQRa32MtR3%2Fg%2FBxi3JTPbwPfCw781Ma4uvwql5x2r2wM0Zh051RRUK%2Bw7YSwijxr0Tklej3cOg%3D%3D&numOfRows=100'\n \ndef add_commas(num):\n #chatGPT로 생성한 코드\n num_str = str(num)\n result = ''\n count = 0\n for i in range(len(num_str) - 1, -1, -1):\n result = num_str[i] + result\n count += 1\n if count % 3 == 0 and i != 0:\n result = ',' + result\n return result\n \n\n# 일반 크롤링 코드와 유사함\ndef soupMaker(url):\n headers = {\"User-Agent\":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'}\n res = requests.get(url, headers = headers)\n soup = BeautifulSoup(res.text, 'lxml') \n return soup\n \nfor i in soupMaker(url).find_all('items'):\n total = []\n total.append(i.select('basdt')) #날짜\n total.append(i.select('itmsnm')) #종목명\n total.append(i.select('mrktctg'))\n total.append(i.select('mkp')) #시장가\n\nfor i in range(50):\n print(total[0][i].text, '\\t', total[1][i].text, '\\t', total[2][i].text, '\\t',add_commas(total[3][i].text))","repo_name":"skytreesea/PythonBasic","sub_path":"crawling/stock_api_price.py","file_name":"stock_api_price.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"4007625527","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport os\nimport urllib.request\nimport pyautogui\n\nkeyword = pyautogui.prompt(\"검색어를 입력하세요.\")\n\nif not os.path.exists(f\"{keyword}\"):\n os.mkdir(f\"{keyword}\")\n\nurl = f\"https://www.google.com/search?q={keyword}&sxsrf=ALiCzsbbDQXImlszympGzwSboSTdtuIz9g:1652166326485&source=lnms&tbm=isch&sa=X&ved=2ahUKEwjW2_-Xr9T3AhXqQfUHHT0QDm0Q_AUoAXoECAIQAw&biw=1863&bih=913&dpr=1\"\n\nbrowser = webdriver.Chrome(\"/Users/jade/Desktop/web_crawling/driver/chromedriver\")\nbrowser.implicitly_wait(10)\nbrowser.maximize_window()\nbrowser.get(url)\n\n# 무한 스크롤 처리\n\n# 스크롤 전 높이\nbefore_h = browser.execute_script(\"return window.scrollY\")\n\n# 무한 스크롤\nwhile True:\n # 맨 아래로 스크롤 내리기\n browser.find_element_by_css_selector(\"body\").send_keys(Keys.END)\n\n # 스크롤 사이 페이지 로딩 시간\n time.sleep(1)\n\n # 스크롤 후 높이\n after_h = browser.execute_script(\"return window.scrollY\")\n\n if after_h == before_h:\n break\n\n before_h = after_h\n\n# 이미지 태그 추출\nimgs = browser.find_elements_by_css_selector(\".rg_i.Q4LuWd\")\n\nfor i, img in enumerate(imgs, 1):\n # 이미지를 클릭\n # click intercepted error 처리\n browser.execute_script(\"arguments[0].click();\", img)\n time.sleep(1)\n \n # 큰 이미지 주소 추출\n if i == 1:\n target = browser.find_elements_by_css_selector(\"img.n3VNCb\")[0]\n else:\n target = browser.find_elements_by_css_selector(\"img.n3VNCb\")[1]\n \n img_src = target.get_attribute(\"src\")\n\n # 이미지 다운로드\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-Agent', 'Mozilla/5.0')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(img_src, f'{keyword}/{i}.jpg')","repo_name":"jadefactory/web_scraper","sub_path":"google/google_image_crawler.py","file_name":"google_image_crawler.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73833404354","text":"\"\"\"Auxiliary function to transform dicts into lists and back for transfer\n from cloud (sandman2) to local.\"\"\"\n\ndef listify(d):\n \"\"\"Function to convert dict to list with keys in last list element.\n Arguments:\n d: dict - input dict\n Returns:\n list with dict values as elements [:-1] and dict keys as \n last element.\"\"\"\n \n \"\"\"extract keys\"\"\"\n keys = list(d.keys())\n \n \"\"\"create list\"\"\"\n l = [d[key] for key in keys]\n l.append(keys)\n \n return l\n\ndef delistify(l):\n \"\"\"Function to convert listified dict back to dict.\n Arguments:\n l: list - input listified dict. This must be a list of dict \n elements as elements [:-1] and the corresponding\n dict keys as list in the last element.\n Returns:\n dict - The restored dict.\"\"\"\n \n \"\"\"extract keys\"\"\"\n keys = l.pop()\n assert len(keys) == len(l)\n \n \"\"\"create dict\"\"\"\n d = {key: l[i] for i,key in enumerate(keys)}\n \n return d\n","repo_name":"INET-Complexity/isle","sub_path":"listify.py","file_name":"listify.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"29053486468","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ### Dependencies \n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.stats as spstats\nget_ipython().run_line_magic('matplotlib', 'inline')\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n\n# Merged Data\n\n# In[3]:\n\n\ndf = pd.read_csv('df.csv')\n\n\n# Converting to datetime\n\n# In[16]:\n\n\ndf[['order_estimated_delivery_date','order_purchase_timestamp','review_creation_date','review_answer_timestamp','review_answer_timestamp','shipping_limit_date','order_approved_at','order_delivered_carrier_date','order_delivered_customer_date']] = df[['order_estimated_delivery_date','order_purchase_timestamp','review_creation_date','review_answer_timestamp','review_answer_timestamp','shipping_limit_date','order_approved_at','order_delivered_carrier_date','order_delivered_customer_date']].apply(pd.to_datetime, format='%Y-%m-%d %H:%M:%S')\n\n\n# Rearraging Columns \n\n# In[30]:\n\n\ndf = df.reindex(columns=['order_id', 'product_id', 'seller_id','customer_id', 'order_item_id','customer_unique_id', 'shipping_limit_date', 'order_purchase_timestamp', 'order_approved_at', 'order_delivered_carrier_date', 'order_delivered_customer_date', 'order_estimated_delivery_date','review_creation_date','review_answer_timestamp', 'price', 'freight_value', 'customer_zip_code_prefix', 'customer_city', 'customer_state', 'seller_zip_code_prefix', 'seller_city', 'seller_state', 'order_status', 'payment_sequential', 'payment_type', 'payment_installments', 'payment_value', 'review_id', 'review_score', 'review_comment_title', 'review_comment_message', 'product_category_name', 'product_name_lenght', 'product_description_lenght', 'product_photos_qty', 'product_weight_g', 'product_length_cm', 'product_height_cm', 'product_width_cm'])\n\n\n# Missing Values\n# \n# \n\n# In[31]:\n\n\ndf.isna().sum().sort_values(ascending=False)\n\n\n# In[ ]:\n\n\ndf.product_photos_qty.fillna(0,inplace=True)\n\n\n# Descriptive Statistics\n\n# In[9]:\n\n\ndf.describe().round(2).T\n\n\n# In[10]:\n\n\ndf.corr() \n\n\n# Example\n\n# In[27]:\n\n\ntemp1=df.groupby('customer_state')['price'].agg(['sum']).round(2).sort_values(by='customer_state',ascending=False)\ntemp1\n\n\n# In[22]:\n\n\ntemp1.plot(kind='bar', figsize = (15,12),fontsize = 15)\n\n\n# In[13]:\n\n\ndf.groupby('payment_type').price.agg(['describe','sum']).round(2)\n\n\n# In[14]:\n\n\ndf.groupby('product_category_name').price.value_counts()\n\n\n# In[15]:\n\n\ndf.order_status.value_counts()\n\n\n# Sellers\n\n# In[36]:\n\n\nhelp(df)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"harshrajsinh-rathod/1","sub_path":"timeIt/EDA/Code/Anticipatory_Version_2.py","file_name":"Anticipatory_Version_2.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23555414361","text":"'''\r\nCreated on Feb 22, 2017\r\n\r\n@author: cturkarslan\r\n'''\r\n###REDIRECT IO\r\nimport sys\r\nsys.stdin = open('B-large.in' ,'r')\r\n\r\n#sys.stdin = open('Input.in' ,'r')\r\nsys.stdout = open('output.txt' , 'w')\r\n\r\n\r\nT = int(input())\r\nfor t in range(T):\r\n N = str(input())\r\n answer = N\r\n for i,c in enumerate(N[:-1]):\r\n if(N[i]>N[i+1]):\r\n j = i\r\n while(j>-1 and N[j] == c): j -= 1\r\n# print(j,i,N)\r\n if(j == -1):\r\n if(c == \"1\"): answer =(i)*\"9\" #Number is aaaaaa...ccc\r\n else:\r\n if(i == 0): answer = str(int(c)-1)\r\n else: answer = str(int(c)-1) + i*\"9\"\r\n else:\r\n answer =N[0:j+1]\r\n answer += str(int(c)-1) + (i- j-1) * \"9\" \r\n# answer +=(i-j)*str(int(c)-1)\r\n answer +=(len(N)-i-1)*\"9\"\r\n break\r\n \r\n print (\"Case #%d:\" %(t+1),answer)\r\n\r\nif __name__ == '__main__':\r\n pass\r\n\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/2375.py","file_name":"2375.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24433871393","text":"import gpflow\nimport numpy as np\nimport tensorflow as tf\nfrom typing import Optional\nfrom oak.input_measures import (\n Measure,\n EmpiricalMeasure,\n GaussianMeasure,\n MOGMeasure,\n UniformMeasure,\n)\n\n\n# -\n\nclass OrthogonalRBFKernel(gpflow.kernels.Kernel):\n \"\"\"\n :param base_kernel: base RBF kernel before applying orthogonality constraint\n :param measure: input measure\n :param active_dims: active dimension\n :return: constrained BRF kernel\n \"\"\"\n\n def __init__(\n self, base_kernel: gpflow.kernels.RBF, measure: Measure, active_dims=None\n ):\n super().__init__(active_dims=active_dims)\n self.base_kernel, self.measure = base_kernel, measure\n self.active_dims = self.active_dims\n if not isinstance(base_kernel, gpflow.kernels.RBF):\n raise NotImplementedError\n if not isinstance(\n measure,\n (\n UniformMeasure,\n GaussianMeasure,\n EmpiricalMeasure,\n MOGMeasure,\n ),\n ):\n raise NotImplementedError\n\n if isinstance(self.measure, UniformMeasure):\n\n def cov_X_s(X):\n tf.debugging.assert_shapes([(X, (\"N\", 1))])\n l = self.base_kernel.lengthscales\n sigma2 = self.base_kernel.variance\n return (\n sigma2\n * l\n / (self.measure.b - self.measure.a)\n * np.sqrt(np.pi / 2)\n * (\n tf.math.erf((self.measure.b - X) / np.sqrt(2) / l)\n - tf.math.erf((self.measure.a - X) / np.sqrt(2) / l)\n )\n )\n\n def var_s():\n l = self.base_kernel.lengthscales\n sigma2 = self.base_kernel.variance\n y = (self.measure.b - self.measure.a) / np.sqrt(2) / l\n return (\n 2.0\n / ((self.measure.b - self.measure.a) ** 2)\n * sigma2\n * l ** 2\n * (\n np.sqrt(np.pi) * y * tf.math.erf(y)\n + tf.exp(-tf.square(y))\n - 1.0\n )\n )\n\n if isinstance(self.measure, GaussianMeasure):\n\n def cov_X_s(X):\n tf.debugging.assert_shapes([(X, (..., \"N\", 1))])\n l = self.base_kernel.lengthscales\n sigma2 = self.base_kernel.variance\n mu, var = self.measure.mu, self.measure.var\n return (\n sigma2\n * l\n / tf.sqrt(l ** 2 + var)\n * tf.exp(-0.5 * ((X - mu) ** 2) / (l ** 2 + var))\n )\n\n def var_s():\n l = self.base_kernel.lengthscales\n sigma2 = self.base_kernel.variance\n return sigma2 * l / tf.sqrt(l ** 2 + 2 * self.measure.var)\n\n if isinstance(self.measure, EmpiricalMeasure):\n\n def cov_X_s(X):\n location = self.measure.location\n weights = self.measure.weights\n tf.debugging.assert_shapes(\n [(X, (\"N\", 1)), (location, (\"M\", 1)), (weights, (\"M\", 1))]\n )\n return tf.matmul(self.base_kernel(X, location), weights)\n\n def var_s():\n location = self.measure.location\n weights = self.measure.weights\n tf.debugging.assert_shapes([(location, (\"M\", 1)), (weights, (\"M\", 1))])\n return tf.squeeze(\n tf.matmul(\n tf.matmul(\n weights, self.base_kernel(location), transpose_a=True\n ),\n weights,\n )\n )\n\n if isinstance(self.measure, MOGMeasure):\n\n def cov_X_s(X):\n tf.debugging.assert_shapes([(X, (\"N\", 1))])\n l = self.base_kernel.lengthscales\n sigma2 = self.base_kernel.variance\n mu, var, weights = (\n self.measure.means,\n self.measure.variances,\n self.measure.weights,\n )\n tmp = tf.exp(-0.5 * ((X - mu) ** 2) / (l ** 2 + var)) / tf.sqrt(\n l ** 2 + var\n )\n\n return sigma2 * l * tf.matmul(tmp, tf.reshape(weights, (-1, 1)))\n\n def var_s():\n l = self.base_kernel.lengthscales\n\n sigma2 = self.base_kernel.variance\n mu, var, w = (\n self.measure.means,\n self.measure.variances,\n self.measure.weights,\n )\n dists = tf.square(mu[:, None] - mu[None, :])\n scales = tf.square(l) + var[:, None] + var[None, :]\n tmp = sigma2 * l / tf.sqrt(scales) * tf.exp(-0.5 * dists / scales)\n\n return tf.squeeze(tf.matmul(tf.matmul(w[None, :], tmp), w[:, None]))\n\n self.cov_X_s = cov_X_s\n self.var_s = var_s\n\n def K(self, X: np.ndarray, X2: Optional[np.ndarray] = None) -> np.ndarray:\n \"\"\"\n :param X: input array X\n :param X2: input array X2, if None, set to X\n :return: kernel matrix K(X,X2)\n \"\"\"\n cov_X_s = self.cov_X_s(X)\n if X2 is None:\n cov_X2_s = cov_X_s\n else:\n cov_X2_s = self.cov_X_s(X2)\n k = (\n self.base_kernel(X, X2)\n - tf.tensordot(cov_X_s, tf.transpose(cov_X2_s), 1) / self.var_s()\n )\n return k\n\n def K_diag(self, X):\n cov_X_s = self.cov_X_s(X)\n k = self.base_kernel.K_diag(X) - tf.square(cov_X_s[:, 0]) / self.var_s()\n return k\n","repo_name":"amzn/orthogonal-additive-gaussian-processes","sub_path":"oak/ortho_rbf_kernel.py","file_name":"ortho_rbf_kernel.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"36041065377","text":"# -*- coding: utf-8 -*-\nimport os\n\n# стратовый путь для всех файлов\nBASE_DIR = os.getcwd()\n\n# папка с html шаблонами\nTEMPLATES_PATCH = (\n 'templates',\n)\n\nCSS_PATH = (\n 'css',\n)\n\nJS_PATH = (\n 'js',\n)\n\nIMG_PATH = 'images'\n\n# паттерн используется для поисков тегов в тексте\nPATTERN_TEMPLATE_TAG_RAW = \"({{[_a-zA-Z 0-9]+}})\"\nPATTERN_TEMPLATE_TAG = \"({{[ ]*&&[ ]*}})\"\nPATTERN_CLEAR_TAG = \"(?:[ ]*)([_a-zA-Z 0-9]+)\"","repo_name":"bisirkin-pv/simple-web-server-python26","sub_path":"Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23551414331","text":"# input() reads a string with a line of input, stripping the '\\n' (newline) at the end.\r\n# This is all you need for most Google Code Jam problems.\r\nt = int(input()) # read a line with a single integer\r\nfor i in range(1, t + 1):\r\n\r\n\tn = input().split(\" \") # read a list of integers, 2 in this case\r\n\ttidy = [int(s) for s in (list(str(n))[2:-2])][::-1]\r\n\tuntidy = True\r\n\t\r\n\twhile untidy:\r\n\t\tuntidy = False\r\n\t\tfor j in range(len(tidy)-1):\r\n\t\t\tif tidy[j] < tidy[j+1]:\r\n\t\t\t\tuntidy = True\r\n\t\t\t\tfor k in range(j+1):\r\n\t\t\t\t\ttidy[k] = 9\r\n\t\t\t\ttidy[j+1] -=1\r\n\t\t\r\n\tresposta = tidy[::-1]\r\n\tresp_string = ''\r\n\t\r\n\tfor s in resposta:\r\n\t\tresp_string += str(s)\r\n\r\n\tprint(\"Case #{}: {}\".format(i, int(resp_string)))\r\n\t# check out .format's specification for more formatting options","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1036.py","file_name":"1036.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43908314762","text":"from tools import check_skiplist\nfrom models import AupData, AupInfo, Groups\nblocks = {}\nblocks_r = {}\nperiod = {}\nperiod_r = {}\ncontrol_type = {}\ncontrol_type_r = {}\ned_izmereniya = {}\ned_izmereniya_r = {}\nchast = {}\nchast_r = {}\ntype_record = {}\ntype_record_r = {}\n\ndef getType(id):\n l = [1, 5, 9]\n if id in l:\n return \"control\"\n return \"load\"\n\ndef create_json(aup):\n aupInfo = AupInfo.query.filter_by(num_aup=aup).first()\n aupData = AupData.query.filter_by(id_aup=aupInfo.id_aup).order_by(AupData.shifr, AupData.discipline, AupData.id_period).all()\n\n json = dict()\n json['header'] = [aupInfo.name_op.okco.program_code + '.' + aupInfo.name_op.num_profile,\n aupInfo.name_op.okco.name_okco, aupInfo.name_op.name_spec, aupInfo.faculty.name_faculty]\n json['year'] = aupInfo.year_beg\n json['data'] = list()\n flag = \"\"\n session = list()\n value = list()\n\n # if check_skiplist(item.zet, item.discipline, item.type_record.title, item.block.title) == False:\n # continue\n for i, item in enumerate(aupData): \n # if 'Выполнение и защита выпускной квалификационной работы' in item.discipline:\n # pass\n if flag != item.discipline + str(item.id_period):\n if i != 0 and 'd' in locals():\n d['type']['session'] = session\n d['type']['value'] = value\n session = list()\n value = list()\n json['data'].append(d)\n flag = item.discipline + str(item.id_period)\n d = dict()\n d[\"discipline\"] = item.discipline\n d[\"id_group\"] = item.id_group\n d[\"id_block\"] = item.id_block\n d[\"shifr\"] = item.shifr\n d[\"id_part\"] = item.id_part\n d[\"id_module\"] = item.id_module\n d[\"num_col\"] = item.id_period - 1 \n d[\"num_row\"] = item.num_row\n d[\"type\"] = dict()\n d[\"id\"] = str(item.id)\n if check_skiplist(item.zet, item.discipline, item.type_record.title, item.block.title) == False:\n d[\"is_skip\"] = True\n else:\n d[\"is_skip\"] = False\n zet = dict()\n zet[\"amount\"] = item.amount / 100\n zet[\"id_edizm\"] = item.ed_izmereniya.id\n zet[\"id\"] = item.id\n zet[\"control_type_id\"] = item.id_type_control\n zet[\"type\"] = getType(item.id_type_control)\n if item.id_type_control == control_type['Экзамен'] or item.id_type_control == control_type['Зачет'] or item.id_type_control == control_type['Дифференцированный зачет']:\n session.append(zet)\n else:\n value.append(zet)\n if i+1==len(aupData):\n d['type']['session'] = session\n d['type']['value'] = value\n json['data'].append(d)\n else:\n d[\"id\"] += str(item.id)\n zet = dict()\n zet[\"amount\"] = item.amount / 100\n zet[\"id_edizm\"] = item.ed_izmereniya.id\n zet[\"id\"] = item.id\n zet[\"control_type_id\"] = item.id_type_control\n zet[\"type\"] = getType(item.id_type_control)\n if item.id_type_control == control_type['Экзамен'] or item.id_type_control == control_type['Зачет'] or item.id_type_control == control_type['Дифференцированный зачет']:\n session.append(zet)\n else:\n value.append(zet) \n if i+1==len(aupData):\n d['type']['session'] = session\n d['type']['value'] = value\n json['data'].append(d)\n\n for num in range(len(json[\"data\"])-1, -1, -1):\n if json[\"data\"][num][\"is_skip\"] == True:\n del json[\"data\"][num]\n return json\n\n\ndef create_json_test(aupInfo, aupData, max_column, max_row):\n json = dict()\n json['header'] = [aupInfo.name_op.okco.program_code + '.' + aupInfo.name_op.num_profile,\n aupInfo.name_op.okco.name_okco, aupInfo.name_op.name_spec, aupInfo.faculty.name_faculty]\n json['year'] = aupInfo.year_beg\n json['data'] = list()\n for i in range(1, max_column + 1):\n for j in range(max_row + 1):\n print(i, j)\n disc = aupData.filter_by(num_row=j, id_period=i).all()\n if disc == []: continue\n if check_skiplist(disc[0].zet, disc[0].discipline, disc[0].type_record.title, disc[0].block.title) == False:\n continue\n d = dict()\n d[\"discipline\"] = disc[0].discipline\n d[\"id_group\"] = disc[0].id_group\n d[\"num_col\"] = disc[0].id_period\n d[\"num_row\"] = disc[0].num_row\n d[\"type\"] = list()\n d[\"id\"] = \"\"\n for item in disc:\n zet = dict()\n zet[\"control\"] = control_type_r[item.id_type_control]\n zet[\"zet\"] = item.zet / 100\n zet[\"id\"] = item.id\n d[\"type\"].append(zet)\n d[\"id\"] += str(item.id)\n json['data'].append(d)\n return json\n\ndef create_json_print(aupData):\n json = dict()\n json['data'] = list()\n flag = \"\"\n for i, item in enumerate(aupData):\n # if 'Дизайн-проектирование природоподобных объектов для новой мобильности' in item.discipline:\n # pass\n # if check_skiplist(item.zet, item.discipline, item.type_record.title, item.block.title) == False:\n # continue\n if flag != item.discipline + str(item.id_period):\n if i != 0 and 'd' in locals():\n json['data'].append(d)\n flag = item.discipline + str(item.id_period)\n d = dict()\n d[\"discipline\"] = item.discipline\n group = Groups.query.filter(Groups.id_group == item.id_group).first()\n d[\"color\"] = group.color\n d[\"id_group\"] = group.id_group\n d[\"num_col\"] = item.id_period\n d[\"num_row\"] = item.num_row\n if check_skiplist(item.zet, item.discipline, item.type_record.title, item.block.title) == False:\n d[\"is_skip\"] = True\n else:\n d[\"is_skip\"] = False\n if item.id_edizm == 2:\n d[\"zet\"] = item.amount / 100 * 54\n else:\n d[\"zet\"] = item.amount / 100\n if i+1==len(aupData):\n json['data'].append(d)\n else:\n if item.id_edizm == 2:\n d[\"zet\"] = item.amount / 100 * 54\n else:\n d[\"zet\"] += item.amount / 100\n if i+1==len(aupData):\n json['data'].append(d)\n # for disc in json['data']:\n # disc['zet'] /= 36\n\n for num in range(len(json[\"data\"])-1, -1, -1):\n if json[\"data\"][num][\"is_skip\"] == True:\n del json[\"data\"][num]\n else:\n json[\"data\"][num]['zet'] /= 36\n\n return json\n\n","repo_name":"mksmp/PD_EP_spring2022","sub_path":"take_from_bd.py","file_name":"take_from_bd.py","file_ext":"py","file_size_in_byte":7071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11628906625","text":"# Mert Erciyas Pizza calculator \nsmallPizza = int(input(\"Hoeveel kleine pizzas wilt u?\\n\")) # hierbij kan je invoeren hoeveel small pizzas je wilt\nmediumPizza = int(input(\"Hoeveel medium pizzas wilt u?\\n\")) # hierbij kan je invoeren hoeveel medium pizzas je wilt\nlargePizza = int(input(\"Hoeveel grote pizzas wilt u?\\n\")) # hierbij kan je invoeren hoeveel large pizzas je wilt\n\nsmallPizzaPrice = 7.50 # hier zie je de prijs van de small pizza\nmediumPizzaPrice = 9.50 # hier zie je de prijs van de medium pizza\nlargePizzaPrice = 11.50 # hier zie je de prijs van de large pizza\n\nsmallPizzaKosten = (smallPizza * smallPizzaPrice)\nmediumPizzaKosten = (mediumPizza * mediumPizzaPrice)\nlargePizzaKosten = (largePizza * largePizzaPrice)\n\nprint(\"Dus jij hebt\" , smallPizza , \"small pizzas\" , mediumPizza , \"medium pizzas\" , largePizza , \"large pizzas\\n\") # hierbij laat hij weten hoeveel pizzas je hebt besteld\nprint(\"dat word dan\" , smallPizzaKosten , \"euro voor de small pizza's\" , mediumPizzaKosten , \"euro voor de medium pizza's\" , largePizzaKosten , \"euro voor de large pizza's\\n Dat word dan in totaal:\" )\nkosten = (float((smallPizza * smallPizzaPrice) + (mediumPizza * mediumPizzaPrice) + (largePizza * largePizzaPrice))) # hier berekend hij de kosten van alle pizzas\nprint(kosten , \"euro\") # hier laat hij zien hoeveel de pizzas in totaal kosten","repo_name":"MertErciyas/Van_input_naar-_output","sub_path":"Opdrachten/Pizza_Calculator.py","file_name":"Pizza_Calculator.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14296837799","text":"\"\"\"\n142. Linked List Cycle II\nhttps://leetcode.com/problems/linked-list-cycle-ii/\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def detectCycle(self, head: ListNode) -> ListNode:\n # Fast & slow pointers\n # [1] get the length of the cycle by using a fast & slow pointer method\n # [2] initialize 2 pointers at head, p1 = p2 = head. Move p2 forward k spaces\n # [3] increment both pointers until p1 == p2 & return\n # O(n) time and O(1) space, where n = length(head)\n def length_of_cycle(head):\n f = s = head\n while f and f.next:\n f = f.next.next\n s = s.next\n if f == s:\n return get_length(s)\n return 0\n\n def get_length(s):\n c = s # current\n length = 0\n while True:\n c = c.next\n length += 1\n if c == s:\n break\n return length\n\n p = head\n len_cycle = length_of_cycle(p)\n if len_cycle == 0: return None\n p1 = p2 = head\n while len_cycle:\n p2 = p2.next\n len_cycle -= 1\n while p1 != p2:\n p1 = p1.next\n p2 = p2.next\n return p1\n","repo_name":"mathvolcano/leetcode","sub_path":"0142_detectCycle.py","file_name":"0142_detectCycle.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70120490115","text":"import numpy as np\nimport esopt\n\n\ndef main():\n params = np.array([0.0, 0.0, 0.0])\n optimizer = esopt.RAdam(0.5, beta1=0, beta2=0) #plain SGD/SGA\n opt = esopt.ESOpt(params, optimizer, evalTrain, samples = 25, std = 0.02)\n\n print(\"Beginning:\")\n print(\"Initial Score: {}\".format(evalTrain(params, 0)))\n for i in range(10):\n score, gradnorm = opt.optimize(10)\n print(\"{:2d}: {}\".format(i+1, score))\n #}\n print(\"Finished!\")\n print(opt.params)\n#}\n\ndef evalTrain(params, t):\n X = np.array([0.0, 1.0, 2.0, -1, 4])\n Y = np.array([0.0, 1.0, 4.0, 1, 16])\n pred = params[0] + params[1] * X + params[2] * np.square(X)\n mse = np.square(Y - pred).sum() / len(X)\n return -mse\n#}\n\nif __name__ == \"__main__\":\n main()\n#}\n\n","repo_name":"FlixCoder/py-es-optimizer","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6524801094","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n # get and post req. for insert operation\n path('', views.empleado_form, name='empleado_insert'),\n # get and post req. for update operation\n path('/', views.empleado_form, name='empleado_update'),\n path('delete//', views.empleado_delete, name='empleado_delete'),\n # get req. to retrieve and display all records\n path('list/', views.empleado_list, name='empleado_list')\n]\n","repo_name":"mijailCR12/proyecto-2","sub_path":"proyecto/cita/urls-empleado.py","file_name":"urls-empleado.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36868038755","text":"__author__ = 'OliPicard'\nimport csv\nimport json\nimport sys\nfrom io import StringIO\nimport requests\n\n'''a csv to json converter written for the UKStationLive schema\nsimply run the tool to get the station.json output.\nThank you to the following\nexcalibur (for his streaming data save copy)\ndiminonten (for his memory version)\n'''\n\ndef cupid():\n response = requests.get('http://www.nationalrail.co.uk/static/documents/content/station_codes.csv')\n if response.status_code == requests.codes.ok:\n print(\"Grabbing file from National Rail\")\n csv_file = StringIO(response.content.decode())\n headers = ['Station Name', 'Code']\n csv_reader = csv.DictReader(csv_file, headers)\n json_filename = 'station.json'\n print('Saving the file', json_filename)\n jsonf = open(json_filename, 'w')\n data = {r['Station Name']: r['Code'] for r in csv_reader}\n redata = json.dumps(data, indent=4, sort_keys=True)\n jsonf.write(redata)\n jsonf.close()\n\nif __name__ == \"__main__\":\n cupid()","repo_name":"OliPicard/cupid","sub_path":"cupid.py","file_name":"cupid.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18620437208","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n start = 0\n end = 0\n maxlen = 0\n zdict = {}\n \n while end < len(s):\n if s[end] in zdict and zdict[s[end]] >= start:\n start = zdict[s[end]] + 1\n maxlen = max(maxlen, end - start + 1)\n zdict[s[end]] = end\n end += 1\n \n return maxlen","repo_name":"Zankar100/practice-problems","sub_path":"LongestSubstringWithoutRepeatingChars.py","file_name":"LongestSubstringWithoutRepeatingChars.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4234790513","text":"# -- coding: utf-8 --\nimport re\nimport datetime\nimport dateutil.parser as dparser\n\nfrom .base_template import BaseTemplate\n\n\nclass BitCoinTalkParser(BaseTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.parser_name = \"bitcointalk.com\"\n self.thread_name_pattern = re.compile(\n r'(\\d+).*html$'\n )\n self.avatar_name_pattern = re.compile(r'.*/(\\w+\\.\\w+)')\n self.files = self.get_filtered_files(kwargs.get('files'))\n self.comments_xpath = '//tr[@class]//td[contains(@class,\"windowbg\") and not(@valign)]'\n self.header_xpath = '//tr[@class]//td[contains(@class,\"windowbg\") and not(@valign)]'\n self.date_xpath = 'table//td[@valign=\"middle\"]/div[@class=\"smalltext\"]/text()'\n self.date_pattern = '%B %d, %Y, %I:%M:%S %p'\n self.author_xpath = 'table//td[@class=\"poster_info\"]/b/a/text()'\n self.title_xpath = 'table//div[@class=\"subject\"]/a/text()'\n self.post_text_xpath = 'table//div[@class=\"post\"]/text()'\n self.comment_block_xpath = 'table//td[@class=\"td_buttons\"]/div/a/text()'\n self.avatar_xpath = '//img[@class=\"avatar\"]/@src'\n\n # main function\n self.main()\n\n def get_filtered_files(self, files):\n filtered_files = list(\n filter(\n lambda x: self.thread_name_pattern.search(x) is not None,\n files\n )\n )\n sorted_files = sorted(\n filtered_files,\n key=lambda x: (self.thread_name_pattern.search(x).group(1),\n x.split(\"-\")[-1]))\n return sorted_files\n\n def get_date(self, tag):\n date_block = tag.xpath(self.date_xpath)\n if not date_block:\n date_block = tag.xpath(\n 'table//td[@valign=\"middle\"]'\n '/div[@class=\"smalltext\"]/span[@class=\"edited\"]/text()'\n )\n\n date = date_block[0].strip() if date_block else \"\"\n if date.startswith('at '):\n date = datetime.datetime.today().strftime('%B %d, %Y, ') + date.split('at ')[-1]\n\n try:\n date = datetime.datetime.strptime(date, self.date_pattern).timestamp()\n return str(date)\n except:\n try:\n date = dparser.parse(date).timestamp()\n return str(date)\n except:\n pass\n\n return \"\"\n\n def get_comment_id(self, tag):\n reply_block = tag.xpath(\n 'table//td[@class=\"td_buttons\"]'\n '/div/a/img[@class=\"reply_button\"]'\n )\n if reply_block:\n return\n\n comment_block = tag.xpath(self.comment_block_xpath)\n if comment_block:\n commentID = comment_block[0].split('#')[-1].replace(',', '')\n return commentID.replace(',', '')\n\n return \"\"\n","repo_name":"ken2190/Enterprise-Forum-Scraper","sub_path":"templates/bitcointalk_template.py","file_name":"bitcointalk_template.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34873336442","text":"import importlib\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path\n\nimport tessif.simulate as optimize\nfrom tessif.model.energy_system import AbstractEnergySystem as AES\nfrom tessif import parse\nfrom tessif.frused import configurations\nfrom tessif.frused.paths import doc_dir\nfrom tessif.transform.es2mapping import compile_result_data_representation\nimport tessif.visualize.nxgrph as nxv\n\n# silence spellings mismatch warnings\nconfigurations.spellings_logging_level = 'debug'\n\n# construct the absolute path of the system model data\np = Path(doc_dir) / \"source\" / \"getting_started\" / \"examples\"\np = p / \"application\" / \"phd\" / \"rdr\" / \"esm\"\nFOLDER = p.resolve()\n\n# create the system model\nes = AES.from_external(path=FOLDER, parser=parse.flat_config_folder)\n\n# create the visual representation\ndrawing_data = nxv.draw_graph(\n es.to_nxgrph(),\n node_color={\n 'CBES': '#666666',\n 'CBE Bus': '#666666',\n 'CHP': '#666666',\n\n 'Heatline': '#b30000',\n 'Heat Demand': '#b30000',\n\n 'Power Demand': '#ffe34d',\n 'Powerline': '#ffe34d',\n },\n node_size={\n 'powerline': 5000,\n 'district heating pipeline': 5000\n },\n)\n# plt.show()\n\nsoftware = \"fine\"\ntransformation_module = importlib.import_module(\n '.'.join(['tessif.transform.es2es', software]))\n\n# 7. Transform the tessif system model into a fine system model:\nsoftware_es = transformation_module.transform(es)\n\n# 8. Optimize the fine system model using fine's capabilities:\noptimized_es = getattr(optimize, \"_\".join([software, \"from_es\"]))(software_es)\n\nrdr = compile_result_data_representation(optimized_es, software, 'CHP')\nprint(rdr)\n","repo_name":"tZ3ma/tessif-phd","sub_path":"docs/source/getting_started/examples/application/phd/theory/rdr/result_data_representation.py","file_name":"result_data_representation.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27835005238","text":"\nimport ea_base as ea\n\n\n\ndef dominates(find1, find2):\n \"\"\"Determines whether fitness vector find1 dominates \n fitness vector find2\n If find1 dominates find2 returns True\n Otherwise returns False\n \n Parameters\n ----------\n find1: touple or list\n Vector of fitness values of individual 1\n find2: touple or list\n Vector of fitness values of individual 2\n \n Returns\n -------\n Boolean\n True if find1 dominates find2. False otherwise\n \n \"\"\"\n dom = False\n better = 0\n better_or_equal = 0\n nobj = len(find1)\n \n for i in range(0,nobj):\n if (find1[i] >= find2[i]):\n better_or_equal += 1\n if find1[i] > find2[i]:\n better += 1\n \n if (better_or_equal == nobj) and better >= 1:\n dom = True\n return dom\n\n\n\ndef get_non_dominated_solutions(pop):\n \"\"\"Extract non-dominated solutions from pop and return them\n Dominated solutions remain in pop\n \n Parameters\n ----------\n pop: Population\n Population fron which the set of non-dominated solutions will be \n extracted\n \n Returns\n -------\n Population\n The set of non-dominated solutions\n \n \"\"\"\n size = len(pop)\n dom_count = [0] * size\n \"\"\"\n Count how many times a solution has been dominated\n \"\"\"\n for i in range(0,size):\n for j in range(0,size):\n if i != j:\n if dominates(pop[i].fitness, pop[j].fitness):\n dom_count[j] += 1\n \n \"\"\"\n A solutions i with dom_count[i] == 0 is non-dominated\n \"\"\"\n \n ndpop = ea.Population()\n for i in range(size-1, -1, -1):\n if dom_count[i] == 0:\n # add non-dominated solution to ndpop\n ndpop.insert(0, pop[i])\n # remove non-dominated solution from pop\n pop.remove(pop[i]) \n return ndpop\n\n\ndef non_dominated_sorting(pop):\n \"\"\"Extract non-dominated fronts from pop and include them in a list\n The first front in the lits is the top front\n pop is empty after all fronts have been extracted\n Returns the list of non-diminated fronts\n \n Parameters\n ----------\n pop: Population\n Population fron which sets of non-dominated solutions will be \n extracted\n \n Returns\n -------\n List\n The sets of non-dominated solutions\n \"\"\"\n\n fronts = []\n while (len(pop) > 0):\n fi = get_non_dominated_solutions(pop)\n fronts.append(fi)\n return fronts\n\ndef front_rank(front, drank, i_index=0):\n \"\"\"Ranks all solutions of a front with the specified drank \n A solutions can have two ranks: rank[0] and rank[0]\n By default, i_index=0 so drank is assigned to rank[0]\n\n Parameters\n ----------\n front: Population\n Front of solutions to be ranked\n drank: integer\n The rank assigned to all solutions of the front\n i_index:\n The index of the rank being set. Default is 0\n \n \n Returns\n -------\n None\n \n \"\"\"\n # all solutions in the same front are assigned the same rank\n # the rank = front number\n for ind in front:\n ind.rank[i_index] = drank\n \ndef crowding_distance (front, nobj,i_index=1):\n \"\"\"Computes crowding distance of a set of non-dominated solutions\n \n Parameters\n ----------\n front: Population\n Front of solutions to compute crowding distance\n nobj: integer\n The number of objectives\n i_index:\n The index of the rank corrsponding to crowding distance. \n Default is 1\n \n Returns\n -------\n None \n \"\"\"\n \n nind = len(front)\n INFINITY = 1e+15\n EPS = 1e-10\n \n # Initialize cd = 0 to for all individuals in the front\n for i in range(0, nind):\n front[i].rank[i_index] = 0.0\n\n for i in range (0, nobj):\n # sort population by ith objective \n front.sort(key = lambda ind: ind.fitness[i], reverse = True)\n # get max and min fitness in the front\n maxf = front[0].fitness[i]\n minf = front[nind-1].fitness[i]\n\n # add a very large distance to extreme solutions\n front[0].rank[i_index] += INFINITY\n front[nind-1].rank[i_index] += INFINITY\n # add distance in objective i from solution j-1 to j+1\n for j in range(1, nind-1):\n d = front[j-1].fitness[i] - front[j+1].fitness[i]\n front[j].rank[i_index] += abs(d)/abs(EPS + maxf - minf)\n ","repo_name":"2basaa/python","sub_path":"optimization/mo/mosource/moea_base.py","file_name":"moea_base.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71150496193","text":"import pickle\nimport socket\n\nimport torch\nfrom flask import Flask, jsonify, request\n\nfrom ia.janggi_network import JanggiNetwork\nfrom ia.trainer import ModelSaver\nfrom janggi.utils import DEVICE\n\napp = Flask(__name__)\n\n\ndef get_model():\n model = JanggiNetwork()\n\n def load_latest_model():\n model_saver = ModelSaver()\n model_saver.load_latest_model(model)\n\n load_latest_model()\n model.to(DEVICE)\n model.eval()\n return model\n\n\nMODEL = get_model()\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n json_data = request.json\n features = torch.tensor(json_data[\"features\"], device=DEVICE)\n with torch.no_grad():\n policy, value = MODEL(features)\n res = jsonify({'policy': policy.tolist(), 'value': value.tolist()})\n return res\n return \"404\"\n\n\nHEADERSIZE = 10\n\n\ndef run_server():\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(('127.0.0.1', 5000))\n server.listen(5)\n print(\"Server Started\")\n while True:\n conn, addr = server.accept()\n from_client = b''\n new_msg = True\n msg_length = 0\n while True:\n data = conn.recv(4096)\n if new_msg:\n msg_length = int(data[:HEADERSIZE])\n new_msg = False\n from_client += data\n if len(from_client) - HEADERSIZE == msg_length:\n break\n features = pickle.loads(from_client[HEADERSIZE:]).to(DEVICE)\n with torch.no_grad():\n policy, value = MODEL(features)\n res = pickle.dumps({'policy': policy, 'value': value})\n msg = bytes(f\"{len(res):<{HEADERSIZE}}\", 'utf-8') + res\n conn.send(msg)\n conn.close()\n\n\nif __name__ == '__main__':\n # app.run(threaded=False)\n run_server()\n","repo_name":"Aunsiels/alphazero_janggi","sub_path":"inference_service.py","file_name":"inference_service.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"11702238584","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom typing import List\nfrom easydict import EasyDict\nfrom .utils import NetIO\nfrom .losses import LossWrapper\nfrom .optimizer import OptimizerBuilder\nfrom .schemes import SchedulerBuilder\nfrom .utils import LoggerBuilder, AverageMeter, update_ema_variables\nfrom .metrics import MetricBuilder\nfrom .controller import Controller\n\n\n__all__ = ['Trainer']\n\n\nclass Trainer:\n def __init__(self, \n config: EasyDict,\n model: nn.Module,\n mean_model: nn.Module,\n wrapper: LossWrapper,\n ioer: NetIO,\n *args, **kwargs\n ) -> None:\n \n self.config = config\n self.loss_wrapper = wrapper\n self.ioer = ioer\n self.logger, self.summary = None, None\n\n self.__parse_config()\n \n self.metric_func_list = self.__get_metrics()\n\n self.__num_loss = self.loss_wrapper.num_meter\n self.__num_metric = len(self.metric_func_list)\n\n self.model = model\n self.mean_model = mean_model\n (self.optimizer, self.optimizer_params), (self.scheduler, self.scheduler_params) = self.__build_optimizer(self.model, self.lr)\n\n self.controller = Controller(loss_wrapper=self.loss_wrapper, model=self.model, mean_model=self.mean_model, optimizer=self.optimizer)\n \n self.logger, self.summary = LoggerBuilder(config).load()\n if self.logger is not None:\n self.logger.info(self.optimizer_params)\n self.logger.info(self.scheduler_params)\n \n self.__global_step = 0\n\n def __parse_config(self):\n self.max_epoch = self.config.train[\"epochs\"]\n self.lr = self.config.train[\"lr\"]\n self.loss_names = self.config.train['criterion']['names']\n self.metric_names = self.config.train[\"metric\"][\"names\"]\n self.key_metric_name = self.config.train[\"metric\"][\"key_metric_name\"]\n\n self.log_step_freq = self.config.output[\"log_step_freq\"]\n self.log_epoch_freq = self.config.output[\"log_epoch_freq\"]\n\n def __build_optimizer(self, model: nn.Module, lr: float, *args, **kwargs):\n optimizer_name = self.config.train.optimizer\n scheduler_name = self.config.train.schedule\n \n optimizer, optimizer_config = OptimizerBuilder.load(optimizer_name, model.parameters(), lr)\n scheduler, scheduler_config = SchedulerBuilder.load(scheduler_name, optimizer, self.max_epoch)\n return (optimizer, optimizer_config), (scheduler, scheduler_config)\n\n\n def __get_metrics(self):\n metric_func_list = []\n\n for metric_name in self.metric_names:\n metric_func = MetricBuilder.load(metric_name)\n metric_func_list.append(metric_func)\n return metric_func_list\n\n \n def train(self, epoch: int, dataloader: DataLoader):\n if self.logger is not None:\n self.logger.info(\"Training epoch [{} / {}]\".format(epoch, self.max_epoch))\n\n use_cuda = torch.cuda.is_available()\n self.model.train()\n self.mean_model.train()\n\n current_lr = self.scheduler.get_lr()[0]\n\n if self.summary is not None:\n self.summary.add_scalar(\"train/lr\", current_lr, epoch)\n\n loss_recorder = AverageMeter(type='scalar', name='total loss')\n loss_list_recorder = AverageMeter(type='tuple', num_scalar=3, names=['model1', 'model2', 'model3'])\n metric_list_recorder1 = AverageMeter(type='tuple', num_scalar=self.__num_metric, names=self.metric_names)\n metric_list_recorder2 = AverageMeter(type='tuple', num_scalar=self.__num_metric, names=self.metric_names)\n metric_list_recorder3 = AverageMeter(type='tuple', num_scalar=self.__num_metric, names=self.metric_names)\n \n # === current epoch begins training ===\n for batch_idx, data in enumerate(dataloader):\n img, target = data\n img = img.float()\n target = target.long()\n batch_size = img.size(0)\n if use_cuda:\n img = img.cuda()\n target = target.cuda()\n\n loss, loss_tuple, outputs_no_grad = self.controller.train_step(img, target, epoch)\n update_ema_variables(self.model, self.mean_model, alpha=0.999, global_step=self.__global_step)\n\n loss_recorder.update(loss.item(), batch_size)\n\n loss_list_recorder.update(loss_tuple, batch_size)\n\n metrics = tuple([func(outputs_no_grad[0], target) for func in self.metric_func_list])\n metric_list_recorder1.update(metrics, batch_size)\n\n metrics = tuple([func(outputs_no_grad[1], target) for func in self.metric_func_list])\n metric_list_recorder2.update(metrics, batch_size)\n\n metrics = tuple([func(outputs_no_grad[2], target) for func in self.metric_func_list])\n metric_list_recorder3.update(metrics, batch_size)\n\n if self.log_step_freq > 0 and self.__global_step % self.log_step_freq == 0:\n if self.logger:\n msg = \"[Train] Epoch:[{}/{}] batch:[{}/{}] loss: {:.4f} loss list: {} metric list: {}\".format(epoch, self.max_epoch, batch_idx + 1, len(dataloader),\n loss_recorder.get_value(), loss_list_recorder, (metric_list_recorder1, metric_list_recorder2, metric_list_recorder3))\n self.logger.info(msg)\n\n self.__global_step += 1\n \n # === current epoch finishes training ===\n\n if epoch % self.log_epoch_freq == 0:\n if self.logger:\n msg = \"[Train] Epoch:[{}/{}] loss: {:.4f} loss list: {} metric list: {}\".format(epoch, self.max_epoch, loss_recorder.get_value(), loss_list_recorder, (metric_list_recorder1, metric_list_recorder2, metric_list_recorder3))\n self.logger.info(msg)\n if self.summary:\n self.summary.add_scalar(\"train/epoch_loss\", loss_recorder.get_value(), epoch)\n names = [\"model1 acc\", \"model2 acc\", \"model3 acc\"]\n values = [metric_list_recorder1.meter.get_value()[0], metric_list_recorder2.meter.get_value()[0], metric_list_recorder3.meter.get_value()[0]]\n for name, value in zip(names, values):\n self.summary.add_scalar(\"train/epoch_{}\".format(name), value, epoch)\n\n self.scheduler.step()\n \n def validate(self, epoch: int, dataloader: DataLoader):\n self.mean_model.eval()\n\n loss_recorder = AverageMeter(type=\"scalar\", name='total loss')\n loss_list_recorder = AverageMeter(type=\"tuple\", num_scalar=3, names=['model1', 'model2', 'model3'])\n metric_list_recorder1 = AverageMeter(type='tuple', num_scalar=self.__num_metric, names=self.metric_names)\n metric_list_recorder2 = AverageMeter(type='tuple', num_scalar=self.__num_metric, names=self.metric_names)\n metric_list_recorder3 = AverageMeter(type='tuple', num_scalar=self.__num_metric, names=self.metric_names)\n use_cuda = torch.cuda.is_available()\n val_step = 0\n with torch.no_grad():\n # === current epoch begins validation ===\n for batch_idx, data in enumerate(dataloader):\n img, target = data\n img = img.float()\n target = target.long()\n batch_size = img.size(0)\n if use_cuda:\n img = img.cuda()\n target = target.cuda()\n loss, loss_tuple, output_no_grad = self.controller.validate_step(img, target, epoch)\n \n loss_recorder.update(loss.item(), batch_size)\n loss_list_recorder.update(loss_tuple, batch_size)\n metrics = tuple([func(output_no_grad[0], target) for func in self.metric_func_list])\n metric_list_recorder1.update(metrics, batch_size)\n\n metrics = tuple([func(output_no_grad[1], target) for func in self.metric_func_list])\n metric_list_recorder2.update(metrics, batch_size)\n \n metrics = tuple([func(output_no_grad[2], target) for func in self.metric_func_list])\n metric_list_recorder3.update(metrics, batch_size)\n \n if self.log_step_freq > 0 and val_step % self.log_step_freq == 0:\n if self.logger:\n msg = \"[Validation] Epoch:[{}/{}] batch:[{}/{}] loss: {:.4f} loss list: {} metric list: {}\".format(epoch, self.max_epoch, batch_idx + 1, len(dataloader),\n loss_recorder.get_value(), loss_list_recorder, (metric_list_recorder1, metric_list_recorder2, metric_list_recorder3))\n self.logger.info(msg)\n val_step += 1\n # === current epoch finishes validation === \n\n if epoch % self.log_epoch_freq == 0:\n if self.logger:\n msg = \"[Validation] Epoch:[{}/{}] loss: {:.4f} loss list: {} metric list: {}\".format(epoch, self.max_epoch, loss_recorder.get_value(), loss_list_recorder, (metric_list_recorder1, metric_list_recorder2, metric_list_recorder3))\n self.logger.info(msg)\n if self.summary:\n self.summary.add_scalar(\"val/epoch_loss\", loss_recorder.get_value(), epoch)\n names = [\"model1 acc\", \"model2 acc\", \"model3 acc\"]\n values = [metric_list_recorder1.meter.get_value()[0], metric_list_recorder2.meter.get_value()[0], metric_list_recorder3.meter.get_value()[0]]\n for name, value in zip(names, values):\n self.summary.add_scalar(\"val/epoch_{}\".format(name), value, epoch)\n\n check_metric = max(values)\n # save checkpoint referring to the save_freq and the saving strategy, besides record the key metric value\n self.ioer.save_file(self.mean_model, epoch, check_metric)","repo_name":"shaoeric/Peer-Collaborative-Learning-for-Online-Knowledge-Distillation","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":9869,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"24973741006","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\n# @FileName : lc_0056.py\r\n# @Author : liang.lian\r\n\r\n\"\"\"\r\nLC 56 : 合并区间\r\n以数组 intervals 表示若干个区间的集合,其中单个区间为 intervals[i] = [starti, endi] 。请你合并所有重叠的区间,\r\n并返回一个不重叠的区间数组,该数组需恰好覆盖输入中的所有区间。\r\n\r\n提示:\r\n1 <= intervals.length <= 10^4\r\nintervals[i].length == 2\r\n0 <= starti <= endi <= 10^4\r\n\r\n示例 1:\r\n输入:intervals = [[1,3],[2,6],[8,10],[15,18]]\r\n输出:[[1,6],[8,10],[15,18]]\r\n解释:区间 [1,3] 和 [2,6] 重叠, 将它们合并为 [1,6].\r\n\r\n示例 2:\r\n输入:intervals = [[1,4],[4,5]]\r\n输出:[[1,5]]\r\n解释:区间 [1,4] 和 [4,5] 可被视为重叠区间。\r\n\"\"\"\r\nfrom typing import List\r\n\r\n\r\nclass Solution:\r\n def merge(self, intervals: List[List[int]]) -> List[List[int]]:\r\n result = []\r\n if len(intervals) == 0:\r\n return result\r\n c_intervals = []\r\n for interval in intervals:\r\n list.append(c_intervals, interval)\r\n c_intervals = sorted(c_intervals, key=lambda t: t[0])\r\n start_f, end_f = c_intervals[0][0], c_intervals[0][1]\r\n for enum in enumerate(c_intervals[1:]):\r\n index, start, end = enum[0], enum[1][0], enum[1][1]\r\n if start <= start_f or start <= end_f:\r\n if start <= start_f:\r\n start_f = start\r\n if end > end_f:\r\n end_f = end\r\n else:\r\n list.append(result, [start_f, end_f])\r\n start_f = start\r\n end_f = end\r\n list.append(result, [start_f, end_f])\r\n return result\r\n\r\n\r\nsolution = Solution()\r\nintervals = [[2, 3], [4, 5], [6, 7], [8, 9], [1, 10]]\r\nresult_val = solution.merge(intervals)\r\nprint(result_val)\r\n\r\n\"\"\"\r\n执行用时:52 ms, 在所有 Python3 提交中击败了53.71%的用户\r\n内存消耗:19.4 MB, 在所有 Python3 提交中击败了5.05%的用户\r\n通过测试用例:170 / 170\r\n\"\"\"\r\n","repo_name":"comeonlian/code-dev","sub_path":"leetcode-py/algorithms/page02/lc_0056.py","file_name":"lc_0056.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25732259138","text":"import math\nimport pandas as pd\nimport numpy as np\nimport models as huggingface_models\n\nif __name__ == \"__main__\":\n # Change parameters if necessary\n seed = 1\n num_samples = 100000\n batch_size = 10000\n model_fns = {\n \"twitter\": huggingface_models.create_twitter_model,\n \"english\": huggingface_models.create_large_english_model,\n \"financial\": huggingface_models.create_financial_model\n }\n results_folder = \"./results\"\n \n import argparse\n parser = argparse.ArgumentParser(description='Sexy Dominik.')\n parser.add_argument('-model_name', type=str, help='Name of the model to be executed')\n parser.add_argument('-batch_index', default=0, type=int, help='Batch index from which to resume executing')\n args = parser.parse_args()\n \n print(\"Loading data...\")\n data = pd.read_csv(\"./results/sample_data.csv\")\n \n print(f\"Loading model {args.model_name}...\")\n model = model_fns[args.model_name]()\n \n print(f\"Running {args.model_name}...\")\n batches = np.array_split(data, math.ceil(num_samples / batch_size))\n for i, batch in enumerate(batches[args.batch_index:], args.batch_index):\n res = model.predict(batch[\"review_text\"].fillna(\"\"))\n res.index = batch.index\n res.to_csv(f\"results/{args.model_name}_{i}_results.csv\")\n print(f\"Processed {(i + 1) * batch_size} rows...\")\n print(\"Done!\")","repo_name":"CommanderCero/META_RoBERTa_Analysis","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31691031406","text":"# practicepython.org exercise 8: rock paper scissors\r\n\r\n#if __name__ == \"__main__\":\r\n\r\nimport random\r\ndef play_r_p_s():\r\n print(\"Welcome to Rock Paper Scissors!!\")\r\n players = int(input(\"\\n1 or 2 players? \"))\r\n while players != 1 and players != 2:\r\n players = input(\"That's not a valid option. 1 or 2 players?\")\r\n options = ['rock','paper','scissors']\r\n play = True\r\n while play:\r\n print(\"\\nEnter q to quit.\\n\")\r\n p1 = input(\"Player 1: rock, paper, or scissors? \")\r\n if p1 == 'q':\r\n break\r\n while p1 not in options:\r\n p1 = input(\"That's not an option! rock, paper, or scissors? \")\r\n print('\\n' *80)\r\n if players == 2:\r\n p2 = input(\"Player 2: rock, paper, or scissors? \")\r\n p2_name = \"player 2\"\r\n p1_name = \"Player 1\"\r\n vic = \"wins\"\r\n else:\r\n p2 = options[random.randint(0,2)]\r\n p2_name = \"the computer\"\r\n p1_name = \"You\"\r\n vic = \"win\"\r\n if p2 == 'q':\r\n break\r\n while p2 not in options:\r\n p2 = input(\"That's not an option! rock, paper, or scissors? \")\r\n print('\\n' *80)\r\n if (p1 == 'rock' and p2 == 'scissors'\r\n or p1 == 'scissors' and p2 == 'paper'\r\n or p1 == 'paper' and p2 == 'rock'):\r\n print(\"{} chose {} and {} chose {}. {} {}!!\".format(p1_name,p1,p2_name,p2,p1_name,vic))\r\n elif p1 == p2:\r\n print(\"You both picked {}. It's a tie\".format(p1))\r\n else:\r\n print(\"{} chose {} and {} chose {}. {} wins!!\".format(p1_name,p1,p2_name,p2,p2_name))\r\n again = input(\"Play again? (y/n) \")\r\n while again != 'y' and again != 'n':\r\n again = input(\"Play again? (y/n) \")\r\n if again == 'n':\r\n play = False\r\n \r\n","repo_name":"danthropogenic/Games","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23599040781","text":"\n\ninf = float(\"infinity\")\n\ndef count(s):\n\tl = list(s)\n\tl.reverse()\n\tcnt = 0\n\tfor c in l:\n\t\tif c=='0': cnt += 1\n\t\telse: break\n\treturn cnt\n\n\n\ndef fin(l):\n\tfor i in range(len(l)):\n\t\tif l[i]>i: return False\n\treturn True\n\n\ndef moveup(l,i):\n\t#print(\"# \"+str(l)+\",\"+str(i))\n\tif fin(l):\n\t\treturn 0\n\tif i>=len(l):\n\t\treturn inf\n\tbest = moveup(list(l),i+1)\n\t#bnl = []\n\tfor j in range(i):\n\t\tnl = list(l)\n\t\tnl.insert(j,nl.pop(i))\n\t\tswaps = (i-j)+moveup(nl,i+1)\n\t\tbest = min(best,swaps)\n\t\tif best==swaps:\n\t\t\tbnl = nl\n\t#print(str(l)+\",\"+str(i)+\" has best \"+str(bnl))\n\treturn best\n\n\nT = int(input())\n\nfor X in range(1,T+1):\n\t\n\tswaps = 0\n\tN = int(input())\n\tlines = []\n\tfor r in range(N):\n\t\tlines += [max(input().rfind(\"1\"),0)]\n\t\n\t#print(fin(lines))\n\t\n\tswaps = moveup(lines,1)\n\t\n\tprint(\"Case #\"+str(X)+\": \"+str(swaps))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_46/101.py","file_name":"101.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8304445785","text":"import numpy as np\nimport onnx\nfrom onnx import numpy_helper\n\n\ndef pb_to_array(pb_input):\n tensor = onnx.TensorProto()\n with open(pb_input, \"rb\") as f:\n tensor.ParseFromString(f.read())\n in_array = numpy_helper.to_array(tensor)\n return in_array\n\n\ndef array_to_pb(in_array, save=\"tensor.pb\"):\n tensor = numpy_helper.from_array(in_array)\n print(\"TensorProto:\\n{}\".format(tensor))\n\n # Convert the TensorProto to a Numpy array\n new_array = numpy_helper.to_array(tensor)\n print(\"After round trip, Numpy array:\\n{}\\n\".format(new_array))\n\n # Save the TensorProto\n with open(save, \"wb\") as f:\n f.write(tensor.SerializeToString())\n\n\nin_data = pb_to_array(\"input_0.pb\")\narray_to_pb(in_data)\nout_data = pb_to_array(\"tensor.pb\")\nprint(in_data - out_data)\n","repo_name":"bleedingfight/Inference","sub_path":"tools/data_convertor.py","file_name":"data_convertor.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40466925194","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 20 14:23:40 2017\n\n@author: JTay\n\"\"\"\n\nimport numpy as np\n\nimport sklearn.model_selection as ms\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\n\nadult = pd.read_csv('winequality-white.csv', sep=';')\nX = adult.drop('quality', 1).copy().values\nadultY1 = adult['quality'].copy().values\nadultY = adultY1 <= 5\n\n\n\nfrom sklearn.preprocessing import scale\nadultX = scale(X)\n\n\n# madelon_trgX, madelon_tstX, madelon_trgY, madelon_tstY = ms.train_test_split(madelonX, madelonY, test_size=0.3, random_state=0,stratify=madelonY)\nmadelon_trgX, madelon_tstX, madelon_trgY, madelon_tstY = ms.train_test_split(adultX, adultY, test_size=0.3, random_state=0,stratify=adultY)\npipe = Pipeline([('Scale',StandardScaler())])\n\ntrgX = pipe.fit_transform(madelon_trgX,madelon_trgY)\ntrgY = np.atleast_2d(madelon_trgY).T\ntstX = pipe.transform(madelon_tstX)\ntstY = np.atleast_2d(madelon_tstY).T\n\n\ntrgX, valX, trgY, valY = ms.train_test_split(trgX, trgY, test_size=0.2, random_state=1,stratify=trgY)\n\n\n\ntst = pd.DataFrame(np.hstack((tstX,tstY)))\ntrg = pd.DataFrame(np.hstack((trgX,trgY)))\nval = pd.DataFrame(np.hstack((valX,valY)))\n\n\nprint(tst.shape)\nprint(trg.shape)\nprint(val.shape)\n\n\ntst.to_csv('m_test.csv',index=False,header=False)\ntrg.to_csv('m_trg.csv',index=False,header=False)\nval.to_csv('m_val.csv',index=False,header=False)","repo_name":"rylew2/Machine-Learning-Assignments","sub_path":"Randomized Optimization/jtay6 p2 clean/Datasets/DUMPER.py","file_name":"DUMPER.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71521149315","text":"import random\nimport time\n\n\ndef busqueda_binaria(objetivo, lista, inicio = None, limite = None):\n if inicio is None:\n inicio = 0\n \n if limite is None:\n limite = 0\n \n if limite < inicio:\n return -1\n \n punto_medio = (inicio + limite ) // 2\n \n if lista[punto_medio] == objetivo:\n return punto_medio\n elif punto_medio < lista[punto_medio]:\n return busqueda_binaria(objetivo, lista, inicio, limite - 1)\n else:\n return busqueda_binaria(objetivo, lista, inicio + 1, limite)\n\nif __name__=='__main__':\n size = 30000\n randoms = set()\n \n while len(randoms) < size:\n randoms.add(random.randint(-3 * size, 3 * size))\n \n lista_ordenada = sorted(list(randoms))\n \n tiempo_inicio = time.time()\n \n for mi_objetivo in lista_ordenada:\n busqueda_binaria(mi_objetivo, lista_ordenada)\n \n timepo_final = time.time()\n \n print(f\"El tiempo tardado es: {timepo_final - tiempo_inicio} segundos.\")","repo_name":"brunocoronado49/python-algorithms","sub_path":"busqueda_binaria.py","file_name":"busqueda_binaria.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13350869714","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\nimport sys\nsys.path.insert(1,\"../../\")\nimport h2o\nimport os\nimport subprocess\nfrom subprocess import STDOUT, PIPE\nfrom tests import pyunit_utils\nfrom h2o.estimators.kmeans import H2OKMeansEstimator\nfrom h2o.estimators.pca import H2OPrincipalComponentAnalysisEstimator\n\n\ndef build_mojo_pipeline():\n results_dir = pyunit_utils.locate(\"results\")\n iris_csv = pyunit_utils.locate('smalldata/iris/iris_train.csv')\n iris = h2o.import_file(iris_csv)\n\n pca = H2OPrincipalComponentAnalysisEstimator(k=2)\n pca.train(training_frame=iris)\n\n principal_components = pca.predict(iris)\n\n km = H2OKMeansEstimator(k=3)\n km.train(training_frame=principal_components)\n\n pca_mojo_path = pca.download_mojo(path=results_dir)\n km_mojo_path = km.download_mojo(get_genmodel_jar=True, path=results_dir)\n\n java_cmd = [\"java\", \"-cp\", os.path.join(results_dir, \"h2o-genmodel.jar\"), \"hex.genmodel.tools.BuildPipeline\", \"--mapping\"]\n pca_mojo_name = os.path.basename(pca_mojo_path).split('.')[0]\n for i, pc in enumerate(principal_components.columns):\n mapping = pc + '=' + pca_mojo_name + ':' + str(i)\n java_cmd += [mapping]\n java_cmd += [\"--output\", os.path.join(results_dir, \"pipe.zip\"), \"--input\", km_mojo_path, pca_mojo_path]\n\n subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT).communicate()\n\n h2o_preds = km.predict(principal_components)\n mojo_preds_raw = h2o.mojo_predict_csv(\n input_csv_path=iris_csv,\n mojo_zip_path=os.path.join(results_dir, \"pipe.zip\")\n )\n mojo_preds = h2o.H2OFrame([c['cluster'] for c in mojo_preds_raw], column_names=['predict'])\n \n assert (mojo_preds == h2o_preds).mean()[0, \"predict\"] == 1\n \n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(build_mojo_pipeline)\nelse:\n build_mojo_pipeline()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_misc/pyunit_build_mojo_pipeline.py","file_name":"pyunit_build_mojo_pipeline.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"23205550273","text":"import sqlite3\nimport random as rand\nclass Bank:\n print(\"WELCOME TO DB BANK\")\n print(\"-------------------\")\n def __init__(self):\n self.file = sqlite3.connect(\"Bank.db\")\n self.c = self.file.cursor()\n\n def CreateAccount(self): \n self.c.execute(\"\"\"create table if not exists Customer\n (\n User_Name text,\n Account_Number integer,\n Account_Balance integer,\n Deposit integer,\n Withdraw integer,\n Transfer integer\n )\"\"\")\n n1 = input(\"Enter your First Name:- \").upper()\n n2 = input(\"Enter Your Last Name:- \").upper()\n print(\"---------------------------------------\")\n if n1.isalpha() and not n1.isspace() and n2.isalpha() and not n2.isspace() and len(n1)>2 and len(n2)>2:\n name = n1+\" \"+n2\n number = rand.randint(10000000,99999999)\n self.balance = 0\n self.deposit_amount = 0\n self.Withdraw_amount = 0\n self.Transfer_amount = 0\n self.c.execute(\"insert into Customer values(?,?,?,?,?,?)\",(name,number,self.balance, self.deposit_amount, self.Withdraw_amount, self.Transfer_amount ))\n print(\"Hello {} your Account was successfully created. Please note your Account Number.\".format(name))\n print(\"Your Account Number is:- {}\".format(number))\n print(\"---------------------------------------\")\n self.file.commit()\n self.file.close()\n #self.c.execute(\"select * from Customer\")\n #print(self.c.fetchall())\n \n else:\n print(\"Enter Valid Name, Try Again...!\")\n\n\n \n def Login(self):\n acc_no = int(input(\"Enter your Account Number:- \"))\n check = True\n flag = False\n for a,b,c,d,e,f in self.c.execute(\"select * from Customer\"):\n if b == acc_no:\n flag = True\n check = False\n total = c\n identity = a\n # amt_dp = d\n # amt_wt = e\n # amt_trans = f\n print(\"(C)-Check Balance\")\n print(\"(D)-Deposit\")\n print(\"(W)-Withdraw\")\n print(\"(T)-Transfer\")\n print(\"(E)-Exit\")\n option = input(\"What would you like to do today? (C)/(D)/(W)/(T)\")\n \n if flag and (option=='d' or option=='D'):\n amt_dp = int(input(\"Enter the amount to Deposit:- \"))\n deposit = amt_dp + total \n self.c.execute(\"update Customer set Account_Balance = ?, Deposit = ? where Account_Number = ?\",(deposit,amt_dp,acc_no))\n self.file.commit()\n print(\"amount Deposited ${} , Available Balance is ${} \".format(amt_dp,deposit))\n # self.c.execute(\"select * from Customer\")\n # print(self.c.fetchall())\n\n if flag and (option == 'w' or option == 'W'): \n amt_wt = int(input(\"Enter the amount to Withdraw:- \"))\n if total>0 and total >= amt_wt:\n withdraw_bal = total - amt_wt\n self.c.execute(\"update Customer set Account_Balance = ?, Withdraw = ? where Account_Number = ?\",(withdraw_bal, amt_wt,acc_no))\n self.file.commit()\n print(\"Withdraw ${} done successfully...! Available balance ${}\".format(amt_wt,withdraw_bal))\n # self.c.execute(\"select * from Customer\")\n # print(self.c.fetchall())\n\n else:\n print(\"Low Balance\")\n\n if flag and (option == 't' or option == 'T'): \n amt_trans = int(input(\"Enter the amount to Transfer:- \"))\n if total>0 and total >= amt_trans:\n transfer_amt = total - amt_trans\n self.c.execute(\"update Customer set Account_Balance = ?, Transfer = ? where Account_Number = ?\",(transfer_amt,amt_trans,acc_no))\n self.file.commit()\n print(\"Transfer ${} done successfully...! Available balance ${}\".format(amt_trans,transfer_amt))\n # self.c.execute(\"select * from Customer\")\n # print(self.c.fetchall())\n\n if flag and (option == 'c' or option == 'C'):\n print(\"Hello {}, Your Account Balance is ${}\".format(identity,total))\n print(\"----------------------------------------------------------\")\n if check:\n print(\"Invalid Account Number.\")\n\n if flag and (option == 'e' or option == 'E'):\n bk = Bank;\n\nfun = Bank()\nprint(\"(C)-Create Account\")\nprint(\"(O)-Open Account\")\nop = input(\"What would you like to do today? (C)/(O):- \")\nif op == 'c' or op == 'C':\n fun.CreateAccount()\nif op =='o' or op == 'O':\n fun.Login()","repo_name":"DivyaTanwar/BankApplication-using-Python","sub_path":"bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"439117195","text":"import pandas as pd\nimport json\nfrom collections import Counter\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nwith open('train/scene_train_annotations_20170904.json', 'r') as f: #label文件\n label_raw = json.load(f)\n \nwith open('validation/scene_validation_annotations_20170908.json', 'r') as f: #label文件\n label_raw2 = json.load(f)\n \n\nmlist = []\nfor _ in label_raw:\n mlist.append(_['label_id'])\n \nmlist2 = []\nfor _ in label_raw2:\n mlist2.append(_['label_id'])\n \na = Counter(mlist)\nprint(min(a.values()))\nprint(max(a.values()))\n\nb = Counter(mlist2)\nprint(min(b.values()))\nprint(max(b.values()))\n\nmlist = [int(i) for i in mlist]\nmlist2 = [int(i) for i in mlist2]\na1 = pd.DataFrame(mlist)\nb1 = pd.DataFrame(mlist2)\n\nplt.figure()\nsns.countplot(x=0, data=a1)\nplt.savefig(\"train_distribution.png\")\nplt.figure()\nsns.countplot(x=0, data=b1)\nplt.savefig(\"validation_distribution.png\")\n\n'''\n类别可能存在一定的不平衡问题\n'''\n\n\n'''\n训练集和验证集有没有重复先不管了\n'''\n\nclass_sample_count = []\nfor i in range(0,80):\n class_sample_count.append(a[str(i)])","repo_name":"filick/scene","sub_path":"data/check_balence.py","file_name":"check_balence.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"39940371142","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 17 01:22:03 2019\n\n@author: yihuicui\n\"\"\"\nimport cv2\nimport numpy as np\nimport sys\nfrom queue import PriorityQueue\nimport matplotlib.pyplot as plt\ntry:\n import vrep\nexcept:\n print ('--------------------------------------------------------------')\n print ('\"vrep.py\" could not be imported. This means very probably that')\n print ('either \"vrep.py\" or the remoteApi library could not be found.')\n print ('Make sure both are in the same folder as this file,')\n print ('or appropriately adjust the file \"vrep.py\"')\n print ('--------------------------------------------------------------')\n print ('')\n\nimport time\nfrom IPython.display import clear_output\nfrom IPython.display import display\nimport ctypes\nfrom PIL import Image\n\n\n\nlad = 0.5 #look ahead distance\nprint ('Starting Connection')\nvrep.simxFinish(-1) # just in case, close all opened connections\nclientID=vrep.simxStart('127.0.0.1',19999,True,True,5000,5) # Connect to V-REP\nprint(clientID)\n\nif clientID!=-1:\n print ('Connected to remote API server') \n e = vrep.simxStartSimulation(clientID,vrep.simx_opmode_blocking)\n print('start',e)\n try:\n err = 10\n res,camera0_handle = vrep.simxGetObjectHandle(clientID,'top_view_camera',vrep.simx_opmode_oneshot_wait)\n \n while err != vrep.simx_return_ok:\n tick = time.time()\n err,resolution,image=vrep.simxGetVisionSensorImage(clientID,camera0_handle,0,vrep.simx_opmode_streaming) \n if err == vrep.simx_return_ok:\n img = np.array(image,dtype=np.uint8)\n img.resize([resolution[1],resolution[0],3])\n im = Image.fromarray(img)\n im.save(\"top_view.png\")\n finally:\n time.sleep(0.1)\n vrep.simxStopSimulation(clientID,vrep.simx_opmode_blocking)\n vrep.simxFinish(-1)\n\n","repo_name":"Xinyiguo2/ECE470_Project","sub_path":"image_process.py","file_name":"image_process.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36814440380","text":"import numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef recortar_cuadro_colores(imagen_tarjeta):\r\n # Cargar la imagen de la tarjeta\r\n img_tarjeta = cv2.imread(imagen_tarjeta)\r\n\r\n # Convertir la imagen a escala de grises\r\n img_gray = cv2.cvtColor(img_tarjeta, cv2.COLOR_BGR2GRAY)\r\n\r\n # Aplicar el algoritmo de detección de esquinas de Harris\r\n corners = cv2.cornerHarris(img_gray, 2, 3, 0.04)\r\n\r\n # Obtener las coordenadas de las esquinas\r\n corners = cv2.dilate(corners, None)\r\n _, corners_binary = cv2.threshold(corners, 0.01 * corners.max(), 255, 0)\r\n corners_binary = np.uint8(corners_binary)\r\n contours, _ = cv2.findContours(corners_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n contour = max(contours, key=cv2.contourArea)\r\n rect = cv2.minAreaRect(contour)\r\n box = cv2.boxPoints(rect)\r\n box = np.intp(box)\r\n\r\n # Calcular las dimensiones del cuadro de colores en función del contenido de la imagen\r\n min_x = np.min(box[:, 0])\r\n max_x = np.max(box[:, 0])\r\n min_y = np.min(box[:, 1])\r\n max_y = np.max(box[:, 1])\r\n width = max_x - min_x\r\n height = max_y - min_y\r\n\r\n # Ajustar las dimensiones del rectángulo para tener una proporción adecuada\r\n aspect_ratio = width / height\r\n target_ratio = 5 / 7\r\n if aspect_ratio > target_ratio:\r\n new_height = width / target_ratio\r\n offset = int((height - new_height) // 2)\r\n min_y += offset\r\n max_y -= offset\r\n else:\r\n new_width = height * target_ratio\r\n offset = int((width - new_width) // 2)\r\n min_x += offset\r\n max_x -= offset\r\n\r\n # Recortar la región de interés (cuadro de colores)\r\n roi_tarjeta = img_tarjeta[min_y:max_y, min_x:max_x]\r\n\r\n # Desplegar la imagen recortada utilizando plt\r\n plt.imshow(cv2.cvtColor(roi_tarjeta, cv2.COLOR_BGR2RGB))\r\n plt.title('Cuadro de Colores Recortado')\r\n plt.axis('off')\r\n plt.show()\r\n\r\n # Devolver la imagen recortada\r\n return roi_tarjeta\r\n\r\n\r\ndef calcular_fidelidad_color(imagen_referencia, imagen_tarjeta):\r\n # Cargar las imágenes\r\n img_referencia = cv2.imread(imagen_referencia)\r\n img_tarjeta = cv2.imread(imagen_tarjeta)\r\n\r\n # Recortar la región de interés (cuadro de colores)\r\n roi_tarjeta = recortar_cuadro_colores(imagen_tarjeta)\r\n\r\n # Redimensionar la imagen de referencia para que coincida con el tamaño del cuadro de colores de la tarjeta\r\n img_referencia = cv2.resize(img_referencia, (roi_tarjeta.shape[1], roi_tarjeta.shape[0]))\r\n\r\n # Calcular la diferencia absoluta entre los píxeles de las imágenes\r\n diferencia = cv2.absdiff(roi_tarjeta, img_referencia)\r\n\r\n # Calcular el promedio de la diferencia de color para obtener la fidelidad de color\r\n fidelidad_color = diferencia.mean()\r\n\r\n return fidelidad_color\r\n\r\n\r\ndef calcular_cdi(imagen_referencia, imagen_tarjeta):\r\n # Cargar las imágenes\r\n img_referencia = cv2.imread(imagen_referencia)\r\n img_tarjeta = cv2.imread(imagen_tarjeta)\r\n\r\n # Recortar la región de interés (cuadro de colores)\r\n roi_tarjeta = recortar_cuadro_colores(imagen_tarjeta)\r\n\r\n # Redimensionar la imagen de la tarjeta para que tenga las mismas dimensiones que la imagen de referencia\r\n img_tarjeta = cv2.resize(img_tarjeta, (img_referencia.shape[1], img_referencia.shape[0]))\r\n\r\n # Convertir las imágenes a formato CIELAB\r\n img_referencia_lab = cv2.cvtColor(img_referencia, cv2.COLOR_BGR2LAB)\r\n img_tarjeta_lab = cv2.cvtColor(img_tarjeta, cv2.COLOR_BGR2LAB)\r\n\r\n # Obtener los componentes L, A y B de las imágenes\r\n l_referencia, a_referencia, b_referencia = cv2.split(img_referencia_lab)\r\n l_tarjeta, a_tarjeta, b_tarjeta = cv2.split(img_tarjeta_lab)\r\n\r\n # Calcular la diferencia promedio entre los componentes de color recortados\r\n cdi = np.mean(np.abs(l_referencia - l_tarjeta) +\r\n np.abs(a_referencia - a_tarjeta) +\r\n np.abs(b_referencia - b_tarjeta))\r\n\r\n # Mostrar las imágenes y los pasos intermedios\r\n fig, axes = plt.subplots(2, 2, figsize=(10, 8))\r\n\r\n # Imagen de la tarjeta con el cuadro de colores recortado\r\n axes[0, 0].imshow(cv2.cvtColor(roi_tarjeta, cv2.COLOR_BGR2RGB))\r\n axes[0, 0].set_title('Región de Interés (Cuadro de Colores)')\r\n\r\n # Imagen de referencia\r\n axes[0, 1].imshow(cv2.cvtColor(img_referencia, cv2.COLOR_BGR2RGB))\r\n axes[0, 1].set_title('Imagen de Referencia')\r\n\r\n # Imagen de la tarjeta\r\n axes[1, 0].imshow(cv2.cvtColor(img_tarjeta, cv2.COLOR_BGR2RGB))\r\n axes[1, 0].set_title('Imagen de la Tarjeta')\r\n\r\n # Mostrar el CDI\r\n fig.suptitle(f'Índice de Diferencia de Color (CDI): {cdi:.2f}', fontsize=12)\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n return cdi\r\n\r\n\r\ndef detectar_tarjeta(imagen):\r\n # Convertir la imagen a escala de grises\r\n gris = cv2.cvtColor(imagen, cv2.COLOR_BGR2GRAY)\r\n\r\n # Aplicar un umbral para separar la tarjeta del fondo\r\n _, umbral = cv2.threshold(gris, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\r\n\r\n # Encontrar los contornos en la imagen umbralizada\r\n contornos, _ = cv2.findContours(umbral, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n # Encontrar el contorno más grande (asumiendo que es la tarjeta)\r\n contorno_tarjeta = max(contornos, key=cv2.contourArea)\r\n\r\n # Calcular el rectángulo delimitador de la tarjeta\r\n x, y, w, h = cv2.boundingRect(contorno_tarjeta)\r\n\r\n # Recortar la región de interés (tarjeta) de la imagen original\r\n tarjeta = imagen[y:y + h, x:x + w]\r\n\r\n # Dibujar el rectángulo delimitador en la imagen original\r\n cv2.rectangle(imagen, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n return imagen, tarjeta\r\n\r\n# Ruta de la imagen TIFF de referencia y la imagen JPEG de la tarjeta\r\nimagen_referencia = '/Users/nprantl/Desktop/Evaluacion_camara/AlgunasCapturas/referencia_color_rgb_a.tif'\r\nimagen_tarjeta = '/Users/nprantl/Desktop/Evaluacion_camara/AlgunasCapturas/Color_1.jpg'\r\n\r\nimagen = cv2.imread(imagen_tarjeta)\r\n\r\n# Detectar la tarjeta en la imagen y obtener la tarjeta recortada\r\nimagen_con_tarjeta, tarjeta_recortada = detectar_tarjeta(imagen)\r\n\r\n# Mostrar la imagen resultante con la tarjeta detectada\r\ncv2.imshow('Tarjeta detectada', imagen_con_tarjeta)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n# Mostrar la tarjeta recortada\r\ncv2.imshow('Tarjeta recortada', tarjeta_recortada)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n# # Calcular la fidelidad de color\r\n# fidelidad = calcular_fidelidad_color(imagen_referencia, imagen_tarjeta)\r\n#\r\n# # Calcular el Índice de Diferencia de Color (CDI)\r\n# cdi = calcular_cdi(imagen_referencia, imagen_tarjeta)\r\n#\r\n# print(f'La fidelidad de color es: {fidelidad}')\r\n# print(f'El Índice de Diferencia de Color (CDI) es: {cdi}')\r\n","repo_name":"nicolasprantl/test-image-detection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6782,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16187464011","text":"\"\"\"\r\n Esse exercício é referente a aula 07 do curso de Python do Curso em vídeo:\r\n\r\n Desafio 008: Escreva um programa que leia um valor em metros e o exiba\r\n convertido em centímetros e milímetros.\r\n\r\n Esse programa pede para o usuário digitar um valor em metros e mostra esse valor\r\n convertido em todas as medidas.\r\n\r\n By: José Brenon - 15/04/2023\r\n\"\"\"\r\nn = float(input('Digite uma distância em metros: '))\r\ndm = n * 10\r\ncm = n * 100\r\nmm = n * 1000\r\ndam = n / 10\r\nhm = n / 100\r\nkm = n / 1000\r\nprint('A medida de {:.1f}m corresponde a: \\n'\r\n '{}km \\n{}hm \\n{}dam\\n{}dm \\n{}m \\n{:.0f}dm \\n{:.0f}cm \\n{:.0f}mm'.format(n, km, hm, dam, dm, n, dm, cm, mm))\r\n","repo_name":"eibrenon/python-aulas-cursoemvideo","sub_path":"python-exercicios-cursoemvideo/ex008.py","file_name":"ex008.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13348884494","text":"from past.utils import old_div\nimport sys\nsys.path.insert(1,\"../../../\")\nimport h2o\nfrom tests import pyunit_utils\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\n\ntry: # redirect python output\n from StringIO import StringIO # for python 3\nexcept ImportError:\n from io import StringIO # for python 2\n\n# This test is used to ensured that the correct warning message is received if user tries to use \n# remove_collinear_columns with lambda_search or non-zero lambda\ndef test_GLM_RCC_warning():\n warnNumber = 1\n hdf = h2o.upload_file(pyunit_utils.locate(\"smalldata/prostate/prostate_complete.csv.zip\"))\n\n print(\"Testing for family: TWEEDIE\")\n print(\"Set variables for h2o.\")\n y = \"CAPSULE\"\n x = [\"AGE\",\"RACE\",\"DCAPS\",\"PSA\",\"VOL\",\"DPROS\",\"GLEASON\"]\n\n print(\"Create models with lambda_search\")\n buffer = StringIO() # redirect output\n sys.stderr=buffer\n model_h2o_tweedie = H2OGeneralizedLinearEstimator(family=\"tweedie\", link=\"tweedie\", lambda_search=True, \n remove_collinear_columns=True, solver=\"irlsm\")\n model_h2o_tweedie.train(x=x, y=y, training_frame=hdf) # this should generate a warning message\n sys.stderr=sys.__stderr__ # redirect printout back to normal path\n \n # check and make sure we get the correct warning message\n warn_phrase = \"It is used improperly here with lambda_search\"\n try: # for python 2.7\n assert len(buffer.buflist)==warnNumber\n print(buffer.buflist[0])\n assert warn_phrase in buffer.buflist[0]\n except: # for python 3.\n warns = buffer.getvalue()\n print(\"*** captured warning message: {0}\".format(warns))\n assert warn_phrase in warns\n \n print(\"Create models with non-zero lambda\")\n buffer = StringIO() # redirect output\n sys.stderr=buffer\n model_h2o_tweedie = H2OGeneralizedLinearEstimator(family=\"tweedie\", link=\"tweedie\", Lambda=0.01,\n remove_collinear_columns=True, solver=\"irlsm\")\n model_h2o_tweedie.train(x=x, y=y, training_frame=hdf) # this should generate a warning message\n sys.stderr=sys.__stderr__ # redirect printout back to normal path\n # check and make sure we get the correct warning message\n warn_phrase = \"It is used improperly here. Please set lambda=0\"\n try: # for python 2.7\n assert len(buffer.buflist)==warnNumber\n print(buffer.buflist[0])\n assert warn_phrase in buffer.buflist[0]\n except: # for python 3.\n warns = buffer.getvalue()\n print(\"*** captured warning message: {0}\".format(warns))\n assert warn_phrase in warns\n\nif __name__ == \"__main__\":\n pyunit_utils.standalone_test(test_GLM_RCC_warning)\nelse:\n test_GLM_RCC_warning()\n","repo_name":"h2oai/h2o-3","sub_path":"h2o-py/tests/testdir_algos/glm/pyunit_PUBDEV_8072_remove_collinear_columns_warnings.py","file_name":"pyunit_PUBDEV_8072_remove_collinear_columns_warnings.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":6553,"dataset":"github-code","pt":"61"} +{"seq_id":"1727689000","text":"from datetime import datetime\nfrom os import environ\nfrom os.path import join\nfrom time import sleep\nfrom tests.helpers_asserts import assert_exists_dir, assert_exists_file\nfrom tests.helpers_utils import to_asset_path\n\n\ndef test_hpc_connector_transfer_file(hpc_data_transfer):\n \"\"\"\n Testing the put_file and get_file functionality of the HPC transfer\n \"\"\"\n batch_script_id = \"test_empty.sh\"\n test_local_file_path = to_asset_path(resource_type=\"batch_scripts\", name=batch_script_id)\n assert_exists_file(test_local_file_path)\n\n test_hpc_file_path = join(hpc_data_transfer.project_root_dir, batch_script_id)\n hpc_data_transfer.put_file(\n local_src=test_local_file_path,\n remote_dst=test_hpc_file_path\n )\n sleep(2)\n test_local_received_file_path = join(environ.get(\"OPERANDI_SERVER_BASE_DIR\"), batch_script_id)\n hpc_data_transfer.get_file(\n remote_src=test_hpc_file_path,\n local_dst=test_local_received_file_path\n )\n sleep(2)\n assert_exists_file(test_local_received_file_path)\n\n\ndef test_hpc_connector_transfer_dir(hpc_data_transfer):\n \"\"\"\n Testing the put_dir and get_dir functionality of the HPC transfer\n \"\"\"\n test_local_dir_path = to_asset_path(resource_type=\"workspaces\", name=\"dummy_ws\")\n assert_exists_dir(test_local_dir_path)\n\n current_time = datetime.now().strftime(\"%Y%m%d_%H%M\")\n workspace_id = f\"test_folder_{current_time}\"\n test_hpc_dir_path = join(hpc_data_transfer.project_root_dir, workspace_id)\n hpc_data_transfer.put_dir(\n local_src=test_local_dir_path,\n remote_dst=test_hpc_dir_path\n )\n sleep(5)\n test_local_received_dir_path = join(environ.get(\"OPERANDI_SERVER_BASE_DIR\"), workspace_id)\n hpc_data_transfer.get_dir(\n remote_src=test_hpc_dir_path,\n local_dst=test_local_received_dir_path\n )\n sleep(2)\n assert_exists_dir(test_local_received_dir_path)\n","repo_name":"subugoe/operandi","sub_path":"tests/tests_utils/test_hpc_transfer.py","file_name":"test_hpc_transfer.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"29101669815","text":"from nfl.models import Team, Game, Season\nfrom django.core.management.base import BaseCommand\nimport xml.etree.ElementTree as et\nfrom urllib import request\nimport datetime\n\nclass Command(BaseCommand):\n def parsetime(self, date, time):\n return \"{}-{}-{} {}PM -0500\".format(date[:4],date[4:6],date[6:8],time)\n\n def handle(self, *args, **options):\n data = request.urlopen(\"http://www.nfl.com/liveupdate/scorestrip/ss.xml\").read().decode()\n tree = et.ElementTree(et.fromstring(data))\n root = tree.getroot()\n for child in root:\n week = child.attrib['w']\n sea = Season.objects.get(year=\"2017\")\n for gameelement in child:\n game = gameelement.attrib\n team1 = Team.objects.get(initials=game['v'])\n team2 = Team.objects.get(initials=game['h'])\n timeuk = datetime.datetime.strptime(self.parsetime(game['eid'], game['t']), \"%Y-%m-%d %I:%M%p %z\")\n newgame = Game(home_team=team2, away_team=team1, ko=timeuk, week=week, season=sea, home_score=game['hs'], away_score=game['vs'])\n newgame.save()","repo_name":"Karamello/Predictor","sub_path":"nfl/management/commands/gengames.py","file_name":"gengames.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20624481697","text":"#Hacer menu que cuando la persona ingrese 1, 2, 3 ó 4 diga\n#si 1, mostrar nombre\n#si 2, mostrar edad\n#si 3, mostrar notas\n#si 4, salir \n#siempre mostrar el menu\n#otro numero es invalido \n#-----------Mensajes-------------\nMENSAJE_SELECCION = \"Seleccione entre ver \\n 1. Nombres \\n 2. Edades \\n 3. Notas \\n 4. Salir \\n Ingrese su seleccion :\"\nMENSAJE_SALIDA = \"Salida exitosa.\"\nMENSAJE_ERROR = \"No es una opcion valida, ingrese un valor valido\"\n#-----------Listas----------------\nlista_opciones = [1,2,3,4]\nlista_nombres = [\"Juanes\", \"Marco\", \"Santiago\", \"Leslly\", \"Camila\", \"Camila\", \"Ysabella\", \"Elena\", \"Santiago\", \"Maria\", \"Susana\", \"David\", \"Daniel\", \"Daniel\"]\nlista_edades = [16, 19, 20, 19, 19, 19, 19 , 19, 19, 19, 20, 22, 19, 26]\nlista_notas = [3.0, 3.1, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 4.0, 4.1, 4.2, 4,3]\n_seleccion_usuario = int(input(MENSAJE_SELECCION))\nwhile ( _seleccion_usuario != 4 ) :\n if _seleccion_usuario == 1 :\n print(lista_nombres)\n elif _seleccion_usuario == 2 : \n print(lista_edades)\n elif _seleccion_usuario == 3 :\n print(lista_notas)\n else :\n print(MENSAJE_ERROR)\n _seleccion_usuario = int(input(MENSAJE_SELECCION))","repo_name":"danielzarateos/ProgramacionUno","sub_path":"Clases/ClaseCinco/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12206309683","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\n__all__ = ['reverse_filter', 'pretty_date_filter', 'number_format']\n\n\n# Flask Jinja2 Template filters\ndef reverse_filter(s):\n return s[::-1]\n\n\ndef pretty_date_filter(dt, default=None):\n \"\"\"\n Returns string representing \"time since\" e.g.\n 3 days ago, 5 hours ago etc.\n Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/\n \"\"\"\n\n if default is None:\n default = 'just now'\n\n now = datetime.utcnow()\n diff = now - dt\n\n periods = (\n (diff.days / 365, 'year', 'years'),\n (diff.days / 30, 'month', 'months'),\n (diff.days / 7, 'week', 'weeks'),\n (diff.days, 'day', 'days'),\n (diff.seconds / 3600, 'hour', 'hours'),\n (diff.seconds / 60, 'minute', 'minutes'),\n (diff.seconds, 'second', 'seconds'),\n )\n\n for period, singular, plural in periods:\n\n if not period:\n continue\n\n if period == 1:\n return u'%d %s ago' % (period, singular)\n else:\n return u'%d %s ago' % (period, plural)\n\n return default\n\n\ndef number_format(value, tsep=',', dsep='.'):\n s = unicode(value)\n cnt = 0\n numchars = dsep + '0123456789'\n ls = len(s)\n while cnt < ls and s[cnt] not in numchars:\n cnt += 1\n\n lhs = s[:cnt]\n s = s[cnt:]\n if not dsep:\n cnt = -1\n else:\n cnt = s.rfind(dsep)\n if cnt > 0:\n rhs = dsep + s[cnt + 1:]\n s = s[:cnt]\n else:\n rhs = ''\n\n splt = ''\n while s != '':\n splt = s[-3:] + tsep + splt\n s = s[:-3]\n\n return lhs + splt[:-1] + rhs\n","repo_name":"VPH-Share/transmogrifier","sub_path":"transmogrifier/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"3040064682","text":"from random import randint\nfrom time import sleep\n\ndef escolhajogador():\n posicao = int(input('Digite a posicao de 1 a 9: '))\n while True:\n if p[posicao] == 'X' or p[posicao] == 'Z':\n posicao = int(input('Posição já preenchida. Digite a posicao de 1 a 9: '))\n else:\n p[posicao] = 'X'\n break\n\ndef escolhapc():\n posicao = randint(1, 9)\n while True: \n if p[posicao] == 'X' or p[posicao] == 'Z':\n posicao = randint(1, 9)\n else:\n p[posicao] = 'Z'\n break\n \ndef tabuleiro():\n print(f'''\n 1|2|3 {p[1]} | {p[2]} | {p[3]}\n 4|5|6 {p[4]} | {p[5]} | {p[6]}\n 7|8|9 {p[7]} | {p[8]} | {p[9]}\n''')\n\ndef conferevencedor():\n jogadores = [['X','jogador'], ['Z','pc']]\n pvencedor = [[1,2,3], [1,4,7], [1,5,9], [4,5,6], [7,8,9], [7,5,3], [2,5,8], [3,6,9]]\n for t, c in jogadores:\n for c1 in pvencedor:\n if p[c1[0]] == t and p[c1[1]] == t and p[c1[2]] == t:\n print(f'{c} GANHOU!')\n global jogando\n jogando = False\n\np = [None, 0, 0, 0, 0, 0, 0, 0, 0, 0]\njogadas = 0\njogando = True\n\ntabuleiro()\n\nwhile jogando:\n \n escolhajogador()\n jogadas +=1\n if jogadas >= 9:\n print('Empate!')\n break\n \n tabuleiro()\n conferevencedor()\n if jogando == False:\n break\n \n escolhapc()\n print('pensando...')\n jogadas += 1\n if jogadas >= 9:\n print('Empate!')\n break\n sleep(1)\n tabuleiro()\n conferevencedor()\n if jogando == False:\n break\n","repo_name":"gabrielsalesls/jogo-da-velha-python","sub_path":"jogodavelha1.py","file_name":"jogodavelha1.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72654445314","text":"from .whois import *\nimport verifica_site\nfrom django.shortcuts import render\nimport os\nimport logging\n\nfrom django.http import HttpResponse, HttpRequest, HttpResponseRedirect\nfrom django.urls import reverse\n\nlogger = logging.getLogger(\"CONSULTAS\")\nlogger.setLevel(logging.INFO)\nhandler = logging.FileHandler('static/logs.html')\nformatter = logging.Formatter(\\\n '
\\\n ',\n datefmt='%d/%b/%Y %H:%M:%S'\\\n)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\ndef index(request):\n if not request.method == 'POST':\n return render(request, 'dominio/index.html')\n\n dominio = request.POST['dominio']\n dominio = clean_dominio(dominio)\n cliente = request.META.get('REMOTE_ADDR')\n\n nserver = request.POST['nserver']\n whois = consulta_whois(dominio)\n enderecos = consulta_host(dominio, nserver)\n\n logger.info('[ %s ] consultou o dominio ( %s )', cliente, dominio)\n\n return render(request, 'dominio/index.html', {\n 'dominio': dominio,\n 'enderecos': enderecos,\n 'whois': whois\n })\n\ndef site(request):\n dominio = request.GET['dominio']\n return HttpResponse(verifica_site.requisicao(dominio))\n","repo_name":"josiasjuniorx/consulta-dominio","sub_path":"dominio/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28499459389","text":"import sys\nimport os\nfrom Bio import SeqIO\n\n#################################################\nusage = \"circos_plot.py fasta gff3 links_file cuffdiff_file misa_file snp_file\"\n#################################################\n\ntry:\n script,fasta,gff,links_file,cuffdiff_file,misa_file,snp_file=sys.argv\nexcept: sys.exit(\"Correct usage is:\\n\"+usage)\n\n\nclass Circos:\n def __init__(self, fasta, gffs, gff_keys, links_file, limit_size, \n cuffdiff_file, misa_file, snp_file,\n out_dir, \n chrs='Chr'):\n self.fasta = fasta\n self.chrs = chrs\n self.out = out_dir\n self.gffs = {\"GFF%d\"%(x+1): [gffs[x], gff_keys[x]] for x in range(len(gffs))}\n self.links = links_file\n self.limit_size = limit_size\n self.cuffdiff_file = cuffdiff_file\n self.misa_file = misa_file\n self.snp = snp_file\n \n def prepare(self):\n print(\" [1/5] importando fasta ...\")\n self.fasta_dict = SeqIO.to_dict(SeqIO.parse(self.fasta, 'fasta'))\n \n print(\" [2/5] preparando arquivos em %s ...\" % self.out)\n os.mkdir(self.out)\n os.symlink(self.snp, self.out + '/snp.hist')\n os.symlink(self.fasta, self.out + '/genome.fa')\n for k, v in self.gffs.items():\n os.link(v[0], self.out + '/' + k)\n self.misa2gff3(self.misa_file, self.out + '/misa.gff3')\n self.geneateConfig(chrs=self.chrs)\n \n print(\" [3/5] importando gffs \" + self.out)\n for k, v in self.gffs.items():\n self.gffdensity(self.out + '/' + k, v[1], self.out + '/' + k + '.bars', self.chrs)\n self.gffdensity(self.out + '/misa.gff3', 'SSR', self.out + '/misa.bars', self.chrs)\n \n print(\" [4/5] importando links \" + self.out)\n self.minimize_links(self.links, self.out + '/links', self.limit_size)\n \n print(\" [5/5] importando gene expression \" + self.out)\n for f in self.importGeneExp(self.cuffdiff_file, self.out, self.chrs):\n self.normWindow(f, f + \".norm\")\n print(\"run circos --config circos.conf ... \")\n os.system(\"cd \" + self.out + \" && circos -conf circos.conf\")\n \n def karyotype(self, out, chrs='Chr'):\n print(\"file\" + out + ' ... OK!')\n with open(out, 'w') as o:\n for c in [c for c in self.fasta_dict if c.startswith(chrs)]:\n o.write('chr\\t-\\t%s\\t%s\\t0\\t%d\\tgreen\\n' % (c, c, len(self.fasta_dict[c])))\n return out\n \n def geneateConfig(self, chrs='Chr', rewrite=False):\n self.karyotype(self.out + '/genome.karyotype', chrs)\n \n conf = \"\"\"\n<>\n<>\n<>\n<>\n<>\n\n <>\n\nchromosomes_units = 1000000\nchromosomes_display_default = yes\nchromosomes_color = /.*/:piyg-3-div-3\nkaryotype = genome.karyotype\n\n \n file = links\n radius = 0.2r\n color = piyg-3-div-1\n bezier_radius = 0.1r\n thickness = 5\n #ribbon = yes\n \n \n condition = var(intrachr) && abs(var(pos1)-var(pos2)) > 50Kb\n color = piyg-3-div-2\n \n \n condition = var(intrachr) && abs(var(pos1)-var(pos2)) > 60Kb\n color = piyg-3-div-3\n \n \n \n\n\n \n type = scatter\n file = misa.bars\n r0 = 0.21r\n r1 = 0.26r\n color = piyg-4-div\n \n \n type = heatmap\n file = flor_folha.heatmap.norm\n r0 = 0.27r\n r1 = 0.30r\n color = piyg-4-div\n stroke_thickness = 1\n stroke_color = black\n \n \n type = heatmap\n file = flor_fruto.heatmap.norm\n r0 = 0.31r\n r1 = 0.34r\n color = piyg-4-div\n stroke_thickness = 1\n stroke_color = black\n \n \n type = heatmap\n file = folha_fruto.heatmap.norm\n r0 = 0.35r\n r1 = 0.38r\n color = piyg-4-div\n stroke_thickness = 1\n stroke_color = black\n \n \n type = histogram\n file = GFF1.bars\n r0 = 0.39r\n r1 = 0.45r\n stroke_type = outline\n thickness = 4\n color = lgrey\n fill_color = lgrey\n extend_bin = yes\n \n \n show = no\n type = line\n file = GFF2.bars\n r0 = 0.39r\n r1 = 0.45r\n stroke_type = outline\n thickness = 4\n extend_bin = yes\n color = piyg-3-div-2\n \n \n condition = var(value) > 50\n color = piyg-3-div-3\n \n \n condition = var(value) < 25\n color = piyg-3-div-1\n \n \n \n \n type = line\n file = snp.hist\n r0 = 0.46r\n r1 = 0.52r\n stroke_type = outline\n thickness = 4\n extend_bin = yes\n color = piyg-3-div-2\n \n \n condition = var(value) > 200\n color = piyg-3-div-1\n \n \n condition = var(value) < 100\n color = piyg-3-div-3\n \n \n \n\n<>\n \"\"\"\n\n ideogram = \"\"\"\n\n \n default = 0.0025r\n break = 0.5r\n \n<>\n<>\n\n \"\"\"\n \n label = \"\"\"\nshow_label = yes\nlabel_font = default\nlabel_radius = 0.63r\nlabel_with_tag = yes\nlabel_size = 36\nlabel_parallel = yes\n#label_case = lower\nlabel_format = eval(sprintf(\"%s\", replace(var(label), \"Chr\", \"LG\") ))\n \"\"\"\n \n position = \"\"\"\nradius = 1.5r\nthickness = 30p\nfill = yes\nstroke_thickness = 2\nstroke_color = black\n \"\"\"\n ticks = \"\"\"\nshow_ticks = no\nshow_tick_labels = no\n\n skip_first_label = no\n skip_last_label = no\n radius = dims(ideogram,radius_outer)\n tick_separation = 2p\n label_separation = 5p\n multiplier = 1e-6\n color = black\n thickness = 4p\n size = 20p\n \n spacing = 1u\n show_label = no\n thickness = 2p\n color = dgrey\n \n \n spacing = 5u\n show_label = no\n thickness = 3p\n color = vdgrey\n \n \n spacing = 10u\n show_label = yes\n label_size = 20p\n label_offset = 10p\n format = %d\n grid = yes\n grid_color = dgrey\n grid_thickness = 1p\n grid_start = 0.5r\n grid_end = 0.999r\n \n\n \"\"\"\n\n colors = \"\"\"\n\n chrs = 254,158,218\n\"\"\"\n\n def persist(file, var):\n if not rewrite and os.path.exists(file) and os.path.isfile(file):\n raise BaseException('ERROR: file ' + file + ' EXISTIS! call with REWRITE arg!')\n with open(file, 'w') as o:\n o.write(var)\n print('file %s ... OK' % file)\n\n persist(self.out + '/circos.conf', conf)\n persist(self.out + '/ideogram.conf', ideogram)\n persist(self.out + '/ideogram.label.conf', label)\n persist(self.out + '/ideogram.position.conf', position)\n persist(self.out + '/ticks.conf', ticks)\n persist(self.out + '/colors.conf', colors)\n \n def minimize_links(self, file, out, limitMIN=10):\n seg_dup = [l.strip().split('\\t') for l in open(file).readlines() if not l.startswith('#')]\n ss = [(x[0], int(x[1]), int(x[2]), int(x[2]) - int(x[1]), x[3], x[4], x[5]) for x in seg_dup if len(x) == 6]\n links = [x for x in ss if x[3] > limitMIN * 1000]\n with open(out, 'w') as o:\n for l in links:\n o.write(\"%s\\t%d\\t%d\\t%s\\t%s\\t%s\\n\" % (l[0], l[1], l[2], l[4], l[5], l[6]))\n print(\"%d writed in %s ....\" % (len(links), out))\n \n \n def run(self):\n print('iniciando ...')\n self.prepare()\n \n\n def gffdensity(self, gff, keys, out, chrs='Chr', window=100000):\n print(\"importando %s ...\" % keys)\n genes = [l.strip().split('\\t') for l in open(gff).readlines() if l.count(\"\\t\" + keys + \"\\t\") > 0]\n print(\"parseando %s ...\" % keys)\n chr2genes = {x: [(int(y[3]),int(y[4])) for y in genes if y[0] == x] for x in set([x[0] for x in genes])}\n print(\"salvando em \" + out + ' ...')\n with open(out, 'w') as o:\n for c in [c for c in self.fasta_dict if c.startswith(chrs) and c in chr2genes]:\n ranges = list(sorted(set(list(range(1, len(self.fasta_dict[c]), window)) + [len(self.fasta_dict[c])+1])))\n for i in range(1, len(ranges)):\n o.write('%s\\t%d\\t%d\\t%d\\n' % (c, ranges[i-1], ranges[i]-1, len([x for x in chr2genes[c] if \n (x[0] >= ranges[i-1] and x[0] <= ranges[i]-1) or \n (x[1] >= ranges[i-1] and x[1] <= ranges[i]-1)])))\n \n def misa2gff3(self,file, out):\n k = [l.strip().split(\"\\t\") for l in open(file).readlines() if l.count('\\t') > 0]\n if k[0] == 'ID\\tSSR nr.\\tSSR type\\tSSR\\tsize\\tstart\\tend'.split(\"\\t\"):\n with open(out, 'w') as o:\n o.write('\\n'.join(['\\t'.join([\n x[0], \n 'misa', \n 'SSR', \n x[5], \n x[6], \n '.', '.', '.', \n 'ID=' + x[2] + '.' + x[0] + '.' + x[1]]) for x in k[1:]]) +'\\n')\n else:\n raise BaseException('File ' + file + ' not of misa output!')\n\n \n def importGeneExp(self, file, out_dir='./', chrs=None):\n header = 'test_id\\tgene_id\\tgene\\tlocus\\tsample_1\\tsample_2\\tstatus\\tvalue_1\\tvalue_2\\tlog2(fold_change)\\ttest_stat\\tp_value\\tq_value\\tsignificant'.split('\\t')\n lines = [x.strip().split(\"\\t\") for x in open(file).readlines() if x.count(\"\\t\") > 3]\n if lines[0] != header:\n raise BaseException('Header NOT OK, verify if file is from CUFFDIFF %s' % file)\n\n ls = [(x[3],'_'.join(sorted([x[4],x[5]])),float(x[9]),float(x[12]), x[13] == 'yes') for x in lines[1:]]\n condicoes = {x: [y for y in ls if y[1] == x] for x in set([z[1] for z in ls])}\n files = []\n for k, v in condicoes.items():\n with open(out_dir + '/' + k + '.heatmap', 'w') as o:\n ls = [(x[0].replace(':', '\\t').replace('-', '\\t'), str(x[2])) for x in v if x[4] and (chrs is None or x[0].startswith(chrs))]\n o.write('\\n'.join(['\\t'.join(x) for x in ls]) + '\\n')\n print('file %s ... OK' % (out_dir + '/' + k + '.heatmap'))\n files.append(out_dir + '/' + k + '.heatmap')\n return files\n \n def normWindow(self, file, out):\n fasta = self.fasta_dict\n ls = [l.strip().split(\"\\t\") for l in open(file).readlines() if l.count(\"\\t\") > 0]\n chr2exp = {x: [(int(z[1]), int(z[2]), abs(float(z[3]))) for z in ls if z[0] == x] for x in set([y[0] for y in ls])}\n window = 10 * 10000\n with open(out, 'w') as o:\n for c in [c for c in fasta if c in chr2exp]:\n ranges = list(sorted(set(list(range(1, len(fasta[c]), window)) + [len(fasta[c])+1])))\n for i in range(1, len(ranges)):\n t = [x[2] for x in chr2exp[c] if \n (x[0] >= ranges[i-1] and x[0] <= ranges[i]-1) or \n (x[1] >= ranges[i-1] and x[1] <= ranges[i]-1)]\n o.write('%s\\t%d\\t%d\\t%f\\n' % (c, ranges[i-1], ranges[i]-1, sum(t)/len(t) if len(t) > 0 else 0))\n \n\ncircos = Circos(fasta=fasta, gffs=[gff,gff], gff_keys=['gene', 'CDS'], links_file=links_file, limit_size=40,cuffdiff_file=cuffdiff_file, misa_file=misa_file,snp_file=snp_file,out_dir='circos_out')\ncircos.run()\n\nprint('by mikeias.net')\n","repo_name":"MiqueiasFernandes/bioinformatics","sub_path":"circos_plot.py","file_name":"circos_plot.py","file_ext":"py","file_size_in_byte":12698,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"72069623874","text":"\"\"\"\nНапишите программу, которая решает квадратные уравнения даже если дискриминант отрицательный.\nИспользуйте комплексные числа для извлечения квадратного корня.\n\"\"\"\nimport doctest\nfrom math import sqrt\n\n\ndef quadratic_equation(a, b, c):\n \"\"\"решает квадратные уравнения даже если дискриминант отрицательный.\n >>> quadratic_equation(-18, 60, 100)\n 'Корни уравнения: x1 = -1.220; x2 = 4.553'\n >>> quadratic_equation(5, -10, 5)\n 'Корень уравнения: x = 1.000'\n >>> quadratic_equation(5, 10, 15)\n 'Корни уравнения: x1 = (-1+1.4142j); x2 = (-1-1.4142j)'\n \"\"\"\n d = b ** 2 - 4 * a * c\n if d > 0:\n x1 = (-b + sqrt(d)) / (2 * a)\n x2 = (-b - sqrt(d)) / (2 * a)\n return (f'Корни уравнения: x1 = {x1:.3f}; x2 = {x2:.3f}')\n elif d == 0:\n x1 = -b / (2 * a)\n return (f'Корень уравнения: x = {x1:.3f}')\n else:\n real = round(-b / (2 * a), 4)\n imaginary = round(sqrt(abs(d)) / (2 * a), 4)\n x1 = complex(real, imaginary)\n x2 = complex(real, -imaginary)\n return (f'Корни уравнения: x1 = {x1}; x2 = {x2}')\n\n\nif __name__ == '__main__':\n print(quadratic_equation(-18, 60, 100))\n # print(quadratic_equation(5, -10, 5))\n # print(quadratic_equation(5, 10, 15))\n doctest.testmod(verbose=True)\n","repo_name":"TerekhinSergei/immersion_to_python_homework","sub_path":"HW14/task14_QuadraticEquation_doctest.py","file_name":"task14_QuadraticEquation_doctest.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34091076304","text":"import sys\nimport re\n##CHROM POS ID REF ALT QUAL FILTER INFO FORMAT /archive/data/amed_snt/WORK/james/NA19240_SV_calling_results/20201203_minimap2_v2.17_alignments/NA19240_all.fastq.0.minimap2.sort.bam\n#chr1 66239 0 ATTATATTATATAATATATAATATAAATATAATATAA N . PASS PRECISE;SVMETHOD=Snifflesv1.0.12;CHR2=chr1;END=66276;STD_quant_start=6.164414;STD_quant_stop=5.412947;Kurtosis_quant_start=0.221510;Kurtosis_quant_stop=1.767848;SVTYPE=DEL;SUPTYPE=AL;SVLEN=-37;STRANDS=+-;STRANDS2=9,7,9,7;RE=16;REF_strand=11,13;Strandbias_pval=0.54037;AF=0.666667 GT:DR:DV 0/1:8:16\n\n\nf = open(sys.argv[1])\nfor line in f:\n\tline = line.replace(\"\\n\", \"\")\n\tline_l = line.split(\"\\t\")\n\t\n\tif line[0] == \"#\":\n\t\tcontinue\n\n\tend_pos, length, SV_type = \"\", \"\", \"\"\n\tinfo_l = line_l[7].split(\";\")\n\n#SVTYPE=DEL\t\n\tfor tmp in info_l:\n#\t\tprint(tmp)\n\t\tif tmp.startswith(\"END=\"):\n\t\t\tend_pos = int(tmp.replace(\"END=\", \"\"))\n\t\tif tmp.startswith(\"SVLEN=\"):\n\t\t\tlength = abs(int(tmp.replace(\"SVLEN=\", \"\")))\n\t\tif tmp.startswith(\"SVTYPE=\"):\n\t\t\tSV_type = tmp.replace(\"SVTYPE=\", \"\")\n\n\tif SV_type != \"INS\" and SV_type != \"DEL\":\n\t\tcontinue\n\n\tif SV_type == \"DEL\":\n\t\tprint(line_l[0], line_l[1], line_l[0], end_pos, SV_type, length, sep=\"\\t\")\n\telse:\n\t\tprint(line_l[0], line_l[1], line_l[0], int(line_l[1]), SV_type, length, sep=\"\\t\")\n","repo_name":"afujimoto/CAMPHOR","sub_path":"CAMPHOR/src/change_vcf_format_Sniffles.py","file_name":"change_vcf_format_Sniffles.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"1110834465","text":"import sys\r\n\r\nimport numpy\r\nimport numpy as np\r\nimport csv\r\n\r\nfrom models import createCLSTM, createLSTM\r\nfrom utils import create_datasetLSTM\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef fileLen(fileName):\r\n with open(fileName) as f:\r\n return sum(1 for line in f)\r\n\r\n\r\ndef unscale(predMat, scaleMat):\r\n idx = 0\r\n for pred in predMat:\r\n predMat[idx][0] = pred[0] * scaleMat[1] + scaleMat[0]\r\n predMat[idx][1] = pred[1] * scaleMat[3] + scaleMat[2]\r\n # predMat[idx][2] = pred[2] * scaleMat[5] + scaleMat[4] # Added this for 3 inputs\r\n # predMat[idx][3] = pred[3] * scaleMat[7] + scaleMat[6] # Added this for 4 inputs\r\n idx += 1\r\n return predMat\r\n\r\n\r\ndef stripDate(dateIn):\r\n try:\r\n month, day, year = dateIn.split('/')\r\n return day, month\r\n except ValueError:\r\n print(dateIn)\r\n\r\n\r\n\r\n\r\n\r\nnumLines = fileLen('weatherFull.csv') - 1 # Remove Header Line\r\nwith open('weatherFull.csv') as file:\r\n weatherArr = np.zeros([numLines - 1, 2])\r\n\r\n weatherCSV = csv.reader(file)\r\n line = next(weatherCSV)\r\n idx = 0\r\n tempMaxTotal = 0\r\n tempMinTotal = 0\r\n while idx < numLines - 1:\r\n line = next(weatherCSV)\r\n\r\n # day, month = stripDate(line[1])\r\n tempMax = line[2]\r\n tempMin = line[3]\r\n seaLevelPressure = line[19]\r\n humidity = line[9]\r\n\r\n weatherArr[idx] = [float(tempMax), float(tempMin)]\r\n # weatherArr[idx] = [float(tempMax), float(tempMin), float(seaLevelPressure)]\r\n # weatherArr[idx] = [float(tempMax), float(tempMin), float(seaLevelPressure), float(humidity)]\r\n\r\n idx += 1\r\n\r\n\r\n# print(weatherArr)\r\n\r\n# Standard Normalization\r\nmaxMean = np.average(weatherArr[:, 0])\r\nmaxSTD = np.std(weatherArr[:, 0])\r\nweatherArr[:, 0] = (weatherArr[:, 0] - maxMean) / maxSTD\r\n\r\nminMean = np.average(weatherArr[:, 1])\r\nminSTD = np.std(weatherArr[:, 1])\r\nweatherArr[:, 1] = (weatherArr[:, 1] - minMean) / minSTD\r\n\r\n# # added this for 3 inputs\r\n# pressureMean = np.average(weatherArr[:, 2])\r\n# pressureSTD = np.std(weatherArr[:, 2])\r\n# weatherArr[:, 2] = (weatherArr[:, 2] - pressureMean) / pressureSTD\r\n#\r\n# # added this for 4 inputs\r\n# humidityMean = np.average(weatherArr[:, 3])\r\n# humiditySTD = np.std(weatherArr[:, 3])\r\n# weatherArr[:, 3] = (weatherArr[:, 3] - humidityMean) / humiditySTD\r\n\r\nscaleArr = [maxMean, maxSTD, minMean, minSTD]\r\nstringArr = str(maxMean) + str(maxSTD) + str(minMean) + str(minSTD)\r\n\r\n# scaleArr = [maxMean, maxSTD, minMean, minSTD, pressureMean, pressureSTD]\r\n# stringArr = str(maxMean) + str(maxSTD) + str(minMean) + str(minSTD) + str(pressureMean) + str(pressureSTD)\r\n\r\n# scaleArr = [maxMean, maxSTD, minMean, minSTD, pressureMean, pressureSTD, humidityMean, humiditySTD]\r\n# stringArr = str(maxMean) + str(maxSTD) + str(minMean) + str(minSTD) + str(pressureMean) + str(pressureSTD) + str(humidityMean) + str(humiditySTD)\r\n\r\nwith open('scaleMat.txt', 'w') as file:\r\n file.write(\"maxMean maxSTD minMean minSTD\\n\")\r\n file.write(stringArr)\r\ntrainX, trainY, testX, testY = create_datasetLSTM(weatherArr, 10)\r\n\r\n# clstmModel = createCLSTM(filters=64, denseNodes=[50, 2], num_dense_layers=2)\r\n\r\n\r\nlstm = createLSTM()\r\nhistory = lstm.fit(trainX, trainY, validation_split=.2, epochs=25, batch_size=32)\r\n\r\n# clstm = createCLSTM()\r\n# history = clstm.fit(trainX, trainY, validation_split=.2, epochs=25, batch_size=32)\r\n\r\nplt.figure(0)\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\n\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['MSE loss','Validation loss'], loc='upper left')\r\nplt.savefig(\"loss.png\")\r\nplt.show()\r\n\r\n\r\npredY = lstm.predict(testX)\r\n# predY = clstm.predict(testX)\r\nunscaledPred = unscale(predY, scaleMat=scaleArr)\r\nunscaledTest = unscale(testY, scaleMat=scaleArr)\r\n\r\n# plt.figure(1)\r\n# plt.plot(unscaledTest[:, 1])\r\n# plt.plot(unscaledPred[:, 1])\r\n# plt.title('Minimum Temperatures')\r\n# plt.legend(['True', 'Pred'], loc='upper left')\r\n# plt.ylabel('Min Temp')\r\n# plt.xlabel('Day')\r\n# plt.savefig(\"mintemp.png\")\r\n# plt.show()\r\n#\r\n#\r\n# plt.figure(2)\r\n# plt.plot(unscaledTest[:, 0])\r\n# plt.plot(unscaledPred[:, 0])\r\n# plt.title('Maximum Temperatures')\r\n# plt.legend(['True', 'Pred'], loc='upper left')\r\n# plt.ylabel('Max Temp')\r\n# plt.xlabel('Day')\r\n# plt.savefig(\"maxtemp.png\")\r\n# plt.show()\r\n#\r\n# plt.figure(3)\r\n# plt.plot(np.abs((unscaledTest[:, 1] - unscaledPred[:, 1])/unscaledTest[:, 1]) * 100)\r\n#\r\n# plt.title('Minimum Temperature Accuracy')\r\n# plt.ylabel('Min Temp')\r\n# plt.xlabel('Day')\r\n# plt.savefig(\"minAcc.png\")\r\n# plt.show()\r\n#\r\n#\r\n# plt.figure(4)\r\n# plt.plot(np.abs((unscaledTest[:, 0] - unscaledPred[:, 0])/unscaledTest[:, 0]) * 100)\r\n#\r\n# plt.title('Maximum Temperature Accuracy')\r\n# plt.ylabel('Accuracy Percentage')\r\n# plt.xlabel('Day')\r\n# plt.savefig(\"maxAcc.png\")\r\n# plt.show()\r\n#\r\n#\r\n#\r\n# plt.figure(5)\r\n# plt.plot(np.abs((unscaledTest[:, 1] - unscaledPred[:, 1])))\r\n# plt.title('Minimum Temperature Difference')\r\n# plt.ylabel('Difference (degrees)')\r\n# plt.xlabel('Day')\r\n# plt.savefig(\"minDiff.png\")\r\n# plt.show()\r\n#\r\n# plt.figure(6)\r\n# plt.plot(np.abs((unscaledTest[:, 0] - unscaledPred[:, 0])))\r\n#\r\n# plt.title('Maximum Temperature Difference')\r\n# plt.ylabel('Difference (degrees)')\r\n# plt.xlabel('Day')\r\n# plt.savefig(\"maxDiff.png\")\r\n# plt.show()\r\n#\r\n# print(\"Average Humidity Difference = %.2f\" % (np.average(np.absolute(unscaledTest[:, 3] - unscaledPred[:, 3]))))\r\n# print(\"Average Sea Level Pressure Difference = %.2f\" % (np.average(np.absolute(unscaledTest[:, 2] - unscaledPred[:, 2]))))\r\nprint(\"Average Min Difference = %.2f\" % (np.average(np.absolute(unscaledTest[:, 1] - unscaledPred[:, 1]))))\r\nprint(\"Average Max Difference = %.2f\" % (np.average(np.absolute(unscaledTest[:, 0] - unscaledPred[:, 0]))))\r\n\r\n# print(unscaledTest[:, 1])\r\n# print(\"----------------\")\r\n# print(unscaledTest[:, 0])\r\n# print(\"----------------\")\r\n# print(unscaledTest[:, 2])\r\n# print(\"----------------\")\r\n# print(unscaledTest[:, 3])\r\n\r\n\r\n","repo_name":"avornhagen2/Temperature-Forecasting","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74366935875","text":"import pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\n\ndef geophys_plots(plot_filter=\"ORE_READING_DIST\"):\n filters = [\"HOLEID\", # 0\n \"EAST\", # 1\n \"NORTH\", # 2\n \"RL\", # 3\n \"DRILL_DEPTH\", # 4\n \"LAS_READING_DEPTH\", # 5\n \"LAS_STOP_DEPTH\", # 6\n \"TOO_OREZONE_DRILL\", # 7\n \"ORE_READING_DIST\", # 8\n \"DATE_DRILLED\", # 9\n \"REGION\", # 10\n \"REGIONPIT\", # 11\n \"SUBREGIONPIT\", # 12\n \"OVERBURDEN\", # 13\n \"DATE_SURVEYED\", # 14\n \"DAYS_OUTSTANDING\", # 15\n \"SHORT_LAS\", # 16\n \"OVERBURDEN_CORRECTED\", # 17\n \"IS_OUTSTANDING\", # 18\n \"SURVEY_STATUS\"] # 19\n\n dframe = pd.read_excel(\"LAS file.xlsx\", engine=\"openpyxl\")\n dframe = dframe[dframe[filters[5]].notna()] # Filter out rows which do not have a LAS_READING_DEPTH value, as these rows are useless.\n\n # Plot-filter-specific data transformations and filters\n\n # Filter initially negative values from the \"TOO_Ore_zone_DRILL\" column.\n if (plot_filter == filters[7]): dframe = dframe.loc[dframe[plot_filter] > 0]\n\n # For a to-scale direction plot, we want to \"reverse\" the z-axes signs. Since Plotly doesn't have functionality for doing so in a 3D plot, we'll flip the signs in the dataframe instead.\n dframe[plot_filter] = dframe[plot_filter].mul(-1)\n\n # [Plot 2 (Bottom / Right)] This trace is colour-coded based on the survey status.\n survey_fig = px.scatter_3d(\n dframe,\n x=filters[1], y=filters[2], z=plot_filter, color=filters[19],\n title=\"{} - Survey Status\".format(plot_filter)\n ).update_traces( marker=dict(size=2) )\n\n # This is the default color palette that Plotly uses. These traces are colour-coded based on the region.\n colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']\n\n # Get all the unique regions in the dataframe.\n regions = []\n for region in dframe[\"REGION\"]:\n if (region not in regions): regions.append(region)\n\n # Push a plot for each region.\n traces = []\n current_color = 0\n for region in regions:\n this_region = dframe.loc[dframe[filters[10]] == region] # Get the rows which correspond to this particular region\n traces.append(go.Scatter3d(x=this_region[filters[1]], y=this_region[filters[2]], z=this_region[plot_filter],\n mode=\"markers\", marker=dict(color=colors[current_color], size=2), name=\"{} ({} points)\".format(region, this_region.size),\n showlegend=True, legendgroup=region))\n current_color = (current_color + 1) % len(colors)\n\n # For each polygon point, locate its general region based on its region_name, and add it to the corresponding point set.\n points = pd.read_csv(\"points.csv\").dropna()\n sorted_points = points.sort_values([\"region_name\", \"point_order\"])\n current_region = \"\"\n x, y, z = [], [], []\n for row in points.iterrows():\n if (row[1].region_name != current_region):\n if (current_region != \"\"):\n # Push the current scatter graph, reset the point lists, and update the current_region\n over_region = list(filter(lambda x: x[0] == current_region[0], regions))\n traces.append(go.Scatter3d(name=current_region, x = x, y = y, z = z, mode=\"markers+lines\", marker=dict(color=\"black\", size=2), showlegend=False, legendgroup=over_region[0]))\n x, y, z = [], [], []\n current_region = row[1].region_name\n x.append(row[1].X)\n y.append(row[1].Y)\n z.append(0)\n\n # Push the last scatter graph\n over_region = list(filter(lambda x: x[0] == current_region[0], regions))\n traces.append(go.Scatter3d(name=current_region, x = x, y = y, z = z, mode=\"markers+lines\", marker=dict(color=\"black\", size=2), showlegend=False, legendgroup=over_region[0]))\n\n # [Plot 1 (Top / Left)]\n layout = go.Layout(title=\"{} - Coloured by Region\".format(plot_filter), legend=dict(title=plot_filter))\n region_fig = go.Figure(data = traces, layout = layout)\n region_fig.update_layout( scene = dict(xaxis_title = filters[1], yaxis_title = filters[2], zaxis_title = plot_filter) )\n\n return (region_fig, survey_fig)","repo_name":"ForsakenIdol/ExcelDash","sub_path":"geophys.py","file_name":"geophys.py","file_ext":"py","file_size_in_byte":4611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73223979395","text":"import serial\nimport time\n\nclass PGVCommunication():\n \n def __init__(self) -> None:\n # self.usb_port = \"/dev/ttyUSB1\"\n self.usb_port = \"/dev/pgv_sensor\"\n self.baud_rate = 115200\n self.check_connection()\n self.initialize_message()\n self.trigger_sensor()\n\n def check_connection(self):\n try:\n self.serial_channel = serial.Serial(self.usb_port, self.baud_rate, timeout=3, parity=serial.PARITY_EVEN)\n print(\"Serial details params: \", self.serial_channel)\n except:\n print(\"Couldn't connect to usb port\")\n\n def initialize_message(self):\n self.position_value = [0xC8, 0x37]\n self.request_blue = [0xC4, 0x3B]\n self.request_green = [0x88, 0x77]\n self.request_red = [0x90, 0x6F]\n self.follow_left_lane = [0xE8, 0x17]\n\n def send_message(self, message):\n \"\"\"Send message to the PGV\n\n Args:\n message (hex): the message that will be sent to the PGV\n \"\"\"\n self.result_write = self.serial_channel.write(message)\n # print(f'result_write: {self.result_write}')\n\n def read_message(self):\n # print(\"reading message\")\n self.result_read = self.serial_channel.read(21)\n # self.result_read_hex =self.result_read.hex()\n # print(f'result_read: {self.result_read}')\n # print(f'hex access index 0: {repr(chr(self.result_read[0]))}')\n # print(f'hex access index 1: {self.result_read[1]}')\n # print(f'hex access index -1: {self.result_read[-1]}')\n # print(f'length: {len(self.result_read_hex)}')\n \n def print_result_read(self):\n counter = 1\n for _ in self.result_read:\n print(f'{counter} = {_}')\n if counter == 2:\n print(f\"binary representation: {bin(_)[2:].zfill(8)}\")\n counter += 1\n\n def calculate(self):\n \"\"\"Calculate the angle value, y_position, and number of lanes detected.\n \"\"\"\n multiplier = 0x80\n byte_2_binary = bin(self.result_read[1])[2:].zfill(8)\n # print(f\"len binary = {len(byte_2_binary)}\")\n # print(f\"index_2 = {byte_2_binary[2]}\")\n # print(f\"index_2 = {byte_2_binary[3]}\")\n self.number_of_lanes = int(byte_2_binary[2]) * 2 + int(byte_2_binary[3])\n # print(f'number_of_lanes: {self.number_of_lanes}')\n self.angle_value = self.result_read[10] * multiplier + self.result_read[11]\n if self.angle_value > 180 :\n self.angle_value = - (360-self.angle_value)\n y_position_unsigned = self.result_read[6] * multiplier + self.result_read[7]\n if y_position_unsigned > 0x2000:\n self.y_position = -1 * int(0x4000 - y_position_unsigned)\n else:\n self.y_position = int(y_position_unsigned)\n self.tracking_result = [self.number_of_lanes, self.angle_value, self.y_position]\n # print(self.tracking_result)\n \n def trigger_sensor(self):\n try:\n self.send_message(self.request_blue)\n time.sleep(0.1)\n self.send_message(self.follow_left_lane)\n time.sleep(1)\n except Exception as error:\n print(error)\n finally:\n self.serial_channel.close()\n time.sleep(1)\n self.check_connection()\n time.sleep(1)\n\n def stream_value(self):\n \"\"\"View all the tracking lane values information (angle_value, y_position, number_of_lanes)\n \"\"\"\n try:\n # self.send_message(self.request_blue)\n # time.sleep(0.1)\n # self.send_message(self.follow_left_lane)\n # time.sleep(3)\n while True:\n self.send_message(self.position_value)\n self.read_message()\n self.calculate()\n # self.print_result_read()\n # print(f'angle_value: {self.angle_value} \\t y_position: {self.y_position} \\t number_of_lanes: {self.number_of_lanes}') #TODO: comment this on deployment\n time.sleep(0.1) #TODO: check this if it is enough to have a good movement result\n if __name__ == \"__main__\":\n print(f'number_of_lanes: {self.number_of_lanes} \\t angle_value: {self.angle_value} \\t y_position: {self.y_position}')\n except Exception as error:\n print (error)\n finally:\n self.serial_channel.close()\n\n def update_value(self):\n try:\n self.send_message(self.position_value)\n self.read_message()\n self.calculate()\n # time.sleep(0.1) #removedelay\n except Exception as error:\n print(error)\n \n def loop_update_value(self):\n while True:\n self.update_value()\n \nif __name__ == \"__main__\":\n PGVCommunicationObject = PGVCommunication()\n PGVCommunicationObject.stream_value()\n \n","repo_name":"simbolonmartin/AGV_line_follower","sub_path":"pgv_sensor/read_write_serial_hex.py","file_name":"read_write_serial_hex.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23560950381","text":"t=int(input())\nr=[]\nfor i in range(t):\n n=int(input())\n for j in range(n,-1,-1):\n b=str(j)\n dobar=True\n for k in range(len(b)-1):\n if b[k]>b[k+1]:\n dobar=False\n break\n if dobar:\n r.append(j)\n break\n \nfor i in range(len(r)):\n print('Case #',i+1,':',' ',r[i],sep='') \n \n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/4227.py","file_name":"4227.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34277455777","text":"#!/usr/bin/env python3\n\n#file_name = '/Users/sarah/Docs Local/aoc2021/AoC2021/Test/Day15.txt'\nfile_name = '/Users/sarah/Docs Local/aoc2021/AoC2021/Data/Day15.txt'\n\nf = open(file_name, 'r')\ndata = f.read().splitlines()\n\nclass Point:\n\tdef __init__(self, row, col, cost):\n\t\tself.row = row\n\t\tself.col = col\n\t\tself.cost = int(cost)\n\t\tself.pathCost = 9999999999\n\t\tself.connects = []\n\t\t\n\tdef __str__(self):\n\t\treturn f'Row: {self.row}, Col: {self.col}, Cost: {self.cost}, Path Cost: {self.pathCost}'\n\t\n\tdef getID(self):\n\t\treturn f'{self.row},{self.col}'\n\t\t\n\tdef getConnects(self, pointsByLoc):\n\t\tconnects = []\n\t\tleft = f'{self.row},{self.col - 1}'\n\t\tright = f'{self.row},{self.col + 1}'\n\t\tabove = f'{self.row - 1},{self.col}'\n\t\tbelow = f'{self.row + 1},{self.col}'\n\t\tif left in pointsByLoc:\n\t\t\tconnects.append(pointsByLoc[left])\n\t\tif right in pointsByLoc:\n\t\t\tconnects.append(pointsByLoc[right])\n\t\tif above in pointsByLoc:\n\t\t\tconnects.append(pointsByLoc[above])\n\t\tif below in pointsByLoc:\n\t\t\tconnects.append(pointsByLoc[below])\n\t\treturn connects\n\n\t\ndef findPath(startPoint, unvisited):\n\thaveFoundEnd = False\n\twhile True:\t\t\n\t\tpathCost = startPoint.pathCost\n\t\tnexts = startPoint.connects\n\t\tfor p in nexts:\n\t\t\tnewPathCost = pathCost + p.cost\n\t\t\tif newPathCost < p.pathCost:\n\t\t\t\tp.pathCost = newPathCost\n\t\t\tif p.getID() == endPointId:\n\t\t\t\treturn p\n\t\tunvisited.remove(startPoint)\n\t\t\n\t\tunvisited.sort(key=lambda x: x.pathCost)\n\t\tstartPoint = unvisited[0]\n\t\t\n\t\t\nunvisited = []\npointsByLoc = {}\n\nstartPoint = None\n\nfor rowNum in range(len(data)):\n\tfor colNum in range(len(data[0])):\n\t\tpoint = Point(rowNum, colNum, data[rowNum][colNum])\n\t\tif rowNum == 0 and colNum == 0:\n\t\t\tpoint.cost = 0\n\t\t\tpoint.pathCost = 0\n\t\t\tstartPoint = point\n\t\tunvisited.append(point)\n\t\tpointsByLoc[f'{rowNum},{colNum}'] = point\nendPointId = point.getID()\n\nfor p in unvisited:\n\tp.connects = p.getConnects(pointsByLoc)\n\t\n# Part 1\nend = findPath(startPoint, unvisited)\nprint(end)\n\n\n# Part 2\n\nnums = []\nfor row in data:\n\trowNums = [int(x) for x in row]\n\tnums.append(rowNums)\n\nfor row in nums:\n\tstartNums = row\n\tfor block in range(4):\n\t\tnextBatch = [x + 1 if x < 9 else 1 for x in startNums]\n\t\trow += nextBatch\n\t\tstartNums = nextBatch\n\t\t\nrequiredRows = len(nums) * 5\nrowIndex = 0\n\nwhile len(nums) < requiredRows:\n\tnewRow = [x + 1 if x < 9 else 1 for x in nums[rowIndex]]\n\tnums.append(newRow)\n\trowIndex += 1\n\t\nunvisited = []\npointsByLoc = {}\n\nstartPoint = None\n\nfor rowNum in range(len(nums)):\n\tfor colNum in range(len(nums[0])):\n\t\tpoint = Point(rowNum, colNum, nums[rowNum][colNum])\n\t\tif rowNum == 0 and colNum == 0:\n\t\t\tpoint.cost = 0\n\t\t\tpoint.pathCost = 0\n\t\t\tstartPoint = point\n\t\tunvisited.append(point)\n\t\tpointsByLoc[f'{rowNum},{colNum}'] = point\nendPointId = point.getID()\n\nfor p in unvisited:\n\tp.connects = p.getConnects(pointsByLoc)\n\t\nend = findPath(startPoint, unvisited)\nprint(end)\n\n\n#for p in unvisited:\n#\tprint(p)","repo_name":"trozware/aoc2021","sub_path":"Day15.py","file_name":"Day15.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34542471761","text":"from grafo import Grafo\nimport utils\nfrom random import shuffle\nimport sys\nimport time\nfrom heapq import heappop, heappush, heapify\nfrom collections import deque\n\nCOLOR_VERDE = '\\033[92m'\nCOLOR_ROJO = '\\033[91m'\nCOLOR_NORMAL = '\\033[0m'\n\nclass Net:\n def __init__(self, datos, debug = False):\n '''\n Inicializa una instancia de Netstats. \n Recibe por parámetro una lista de listas, con los datos del programa.\n '''\n self.grafo = Grafo(True)\n self.debug = debug\n self.comandos = { # Comandos del programa con sus respectivos argumentos\n # \"comando\": (tuple) (cantidad minima de parametros, cantidad maxima)\n \"listar_operaciones\": [0, 0],\n \"camino\": [2, 2],\n \"mas_importantes\": [1, 1],\n \"conectados\": [1, 1],\n \"ciclo\": [2, 2],\n \"lectura\": [1, \"INF\"],\n \"diametro\": [0, 0],\n \"rango\": [2, 2],\n \"comunidad\": [1, 1],\n \"navegacion\": [1 ,1],\n \"clustering\": [0, 1]\n }\n \n # Diccionarios de cache\n self.__conectados_cache = set()\n self.__pr_cache = {}\n self.__diametro_chache = []\n \n for linea in datos: # Se procesa la linea (lista).\n for item in linea: # Cada elemento de la lista se agrega al grafo\n self.grafo.agregar_vertice(item)\n\n for i in range(0, len(linea)): # Se agregan las conexiones entre los vertices agregados\n if i != len(linea) - 1:\n self.grafo.agregar_arista(linea[0], linea[i+1])\n\n def procesar_comando(self, entrada):\n '''\n Se encarga de procesar el comando de entrada y lo dirige a su correspondiente método.\n '''\n entrada = entrada.split()\n comando = entrada.pop(0)\n if comando == \"\":\n raise KeyboardInterrupt\n elif comando in self.comandos:\n self.params = (\" \".join(entrada)).split(\",\")\n if self.params[0] == \"\": self.params.pop(0)\n\n actual = self.comandos[comando]\n if actual[1] == \"INF\": actual[1] = len(self.grafo)\n\n if len(self.params) < actual[0] or len(self.params) > actual[1]:\n print(f\"{COLOR_ROJO}ERROR {COLOR_NORMAL} - parámetros incorrectos para comando {comando}.\")\n else:\n try:\n if self.debug: \n print(f\"Comando: {comando}\")\n t0 = round(time.time() * 1000)\n getattr(self, comando)()\n if self.debug: \n t1 = round(time.time() * 1000)\n print(f\"{COLOR_VERDE}PASS{COLOR_NORMAL}: {comando} tardó {str(t1 - t0)}ms\")\n except KeyError:\n print(f\"{COLOR_ROJO}ERROR{COLOR_NORMAL} - No existe el/los datos.\")\n else:\n print(f\"{COLOR_ROJO}ERROR{COLOR_NORMAL} - Comando incorrecto\")\n\n '''\n Métodos de cada comando.\n Hay un método por cada item de self.comandos.\n La validacion de parámetros básica se realiza en self.procesar_comando().\n '''\n\n def listar_operaciones(self):\n for comando in self.comandos:\n if comando == \"listar_operaciones\": continue\n print(comando)\n \n def camino(self):\n origen = self.params[0]\n destino = self.params[1]\n padres, orden = utils.bfs(self.grafo, origen)\n\n lista = []\n actual = destino\n while actual != origen:\n if actual == None: \n print(\"No se encontro recorrido\")\n return\n else:\n lista.insert(0, actual)\n actual = padres[actual]\n \n resultado = origen + \" -> \"\n for elemento in lista:\n if elemento == destino: continue\n resultado += elemento + \" -> \"\n print(resultado + destino)\n print(f\"Costo: {orden[destino]}\")\n\n def diametro(self):\n if self.__diametro_chache:\n print(self.__diametro_chache[0])\n print(\"Costo: \" + str(self.__diametro_chache[1]))\n return\n\n grafo = self.grafo\n diametro = 0\n for v in grafo:\n padre, distancia = utils.bfs(grafo, v)\n for w in grafo:\n if diametro < distancia[w]:\n inicio = v\n fin = w\n diametro = distancia[w]\n padre_definitivo = padre\n \n lista = []\n actual = fin\n while actual != inicio:\n lista.insert(0, actual)\n actual = padre_definitivo[actual]\n \n camino = inicio + \" -> \"\n for pagina in lista:\n if pagina == fin: continue\n camino += pagina + \" -> \" \n camino += fin\n \n self.__diametro_chache.append(camino)\n self.__diametro_chache.append(diametro)\n print(camino)\n print(\"Costo: \" + str(diametro))\n\n def rango(self):\n origen = self.params[0]\n n = int(self.params[1])\n\n print(utils.vertices_en_radio_n(self.grafo, origen, n))\n \n def comunidad(self):\n grafo = self.grafo\n pagina = self.params[0]\n if not grafo.vertice_pertenece(pagina): return None\n\n iteraciones = 50 # Condición de corte que decidimos\n label = {} \n padres_adyacentes = utils.padres(grafo)\n\n indice = 0\n for v in grafo:\n label[v] = indice\n indice += 1\n\n orden_aleatorio = list(grafo.obtener_vertices())\n label_nuevo = label.copy()\n for i in range(0,iteraciones): \n shuffle(orden_aleatorio)\n for v in orden_aleatorio:\n if len(padres_adyacentes[v]) == 0: continue\n label_nuevo[v] = utils.label_mayor_frecuencia(padres_adyacentes[v], label) # Usamos el label de la iteración anterior!\n \n label = label_nuevo.copy() # Para actualizar asincrónicamente\n \n resultado = []\n comunidad_pag = label_nuevo[pagina]\n for v in grafo:\n if label_nuevo[v] == comunidad_pag:\n resultado.append(v)\n print(\", \".join(resultado))\n\n def navegacion(self):\n actual = self.params[0]\n resultado = []\n if actual not in self.grafo:\n print(f\"ERROR - {actual} no se encuentra en el sistema\") \n else:\n for _ in range(0, 21):\n resultado.append(actual)\n adyacentes = self.grafo.obtener_adyacentes(actual)\n if(len(adyacentes) == 0): break\n actual = adyacentes[0]\n print(\" -> \".join(resultado))\n\n def lectura(self):\n grafo_aux = Grafo(True)\n \n for param in self.params:\n grafo_aux.agregar_vertice(param)\n\n for param in self.params:\n for w in self.grafo.obtener_adyacentes(param):\n if w in self.params:\n grafo_aux.agregar_arista(w, param)\n\n grados = utils.grado_entrada(grafo_aux)\n \n q = deque()\n\n for v in grafo_aux:\n if grados[v] == 0:\n q.append(v)\n\n res = []\n\n while len(q):\n v = q.popleft()\n res.append(v)\n for w in grafo_aux.obtener_adyacentes(v):\n grados[w] -= 1\n if grados[w] == 0:\n q.append(w)\n\n if len(res) == len(grafo_aux):\n print(\", \".join(res)) \n else:\n print(\"No existe forma de leer las paginas en orden\")\n\n def clustering(self):\n grafo = self.grafo\n pagina = None\n if self.params:\n pagina = self.params[0]\n \n if pagina:\n print(\"%1.3f\" % round(utils.obtener_clustering_v(grafo, pagina),3))\n return\n\n clustering_total = 0\n for v in grafo:\n clustering_total += utils.obtener_clustering_v(grafo, v)\n print(\"%1.3f\" % round(clustering_total / len(grafo),3))\n\n def conectados(self):\n pagina = self.params[0]\n\n # Si esta consulta ya se realizó antes \n if pagina in self.__conectados_cache:\n utils.imprimir_json(\"conectados_\"+pagina)\n return\n\n # Vamos a tocar un poco la recursión en esta ejecución porque se puede poner peluda\n recursion_limit = sys.getrecursionlimit()\n sys.setrecursionlimit(100000)\n\n visitados = set()\n pila = []\n mas_bajo = {}\n orden = {}\n apilados = set()\n componentes = []\n\n orden[pagina] = 0\n visitados, pila, apilados, orden, componentes, mas_bajo, indice = utils.tarjan(self.grafo, pagina, visitados, pila, apilados, orden, mas_bajo, componentes, 0)\n resultado = []\n \n\n for c in componentes:\n if pagina not in c: continue\n for elemento in c:\n resultado.append(elemento)\n \n utils.guardar_json(\"conectados_\"+pagina, resultado)\n self.__conectados_cache.add(\"conectados_\"+pagina)\n utils.imprimir_json(\"conectados_\"+pagina)\n \n # Volvemos a la normalidad, no queremos heridos\n sys.setrecursionlimit(recursion_limit)\n\n def mas_importantes(self):\n n = int(self.params[0])\n\n if self.__pr_cache:\n print(list(self.__pr_cache.keys())[:n])\n return\n\n precision = 6 # Cuanto mayor este número, más preciso es pero más tarda\n grafo = self.grafo\n\n padres_adyacentes = utils.padres(grafo)\n vertices = list(grafo.obtener_vertices())\n shuffle(vertices)\n grado_salida = utils.grado_salida(grafo)\n\n d = 0.85 # Coeficiente de amortiguación\n primer_termino = (1 - d) / len(vertices)\n pr = {} # Pagerank\n\n for v in grafo:\n pr[v] = primer_termino # Inicializamos así los pr\n\n pr_nueva_iteracion = pr.copy()\n seguir_iterando = True\n while seguir_iterando:\n seguir_iterando = False\n for v in vertices:\n if not padres_adyacentes[v]: continue\n suma_prs_adyacentes_sobre_grado = 0\n for w in padres_adyacentes[v]:\n suma_prs_adyacentes_sobre_grado += pr[w] / grado_salida[w]\n nuevo_pr = primer_termino + d * suma_prs_adyacentes_sobre_grado\n if round(nuevo_pr,precision) != round(pr[v],precision): # Si no redondeamos tarda mucho en converger\n pr_nueva_iteracion[v] = nuevo_pr\n seguir_iterando = True\n\n pr = pr_nueva_iteracion.copy()\n #Ordenamos y printeamos los más importantes\n aux = sorted(pr.items(), key=lambda item: item[1], reverse = True)\n res = {k: v for k, v in aux}\n self.__pr_cache = res # Guardamos los resultados para usarlos en futuras llamadas\n print(list(res.keys())[:n]) \n \n def ciclo(self):\n # Vamos a tocar un poco la recursión en esta ejecución porque se puede poner peluda\n recursion_limit = sys.getrecursionlimit()\n sys.setrecursionlimit(100000)\n\n pagina = self.params[0]\n n = int(self.params[1])\n ciclo = utils.ciclo_de_vertices(self.grafo, pagina, n)\n if ciclo:\n print(\" -> \".join(ciclo))\n else:\n print(\"No se encontro recorrido\")\n \n # Volvemos a la normalidad, no queremos heridos\n sys.setrecursionlimit(recursion_limit)","repo_name":"juancebarberis/algo2","sub_path":"tp3/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":10073,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31559431225","text":"print(\"The List Manager\")\n\n\ndef my_fuc():\n\timport sys\n\timport os\n\timport time\n\n\tprint(\"Choose from the following options\")\n\tprint(\"\")\n\n\ttodo_list = []\n\n\twhile True:\n\t\ttime.sleep(3)\n\t\tos.system('cls' if os.name == 'nt' else 'clear')\n\n\t\tprint(\"1. View Items\")\n\t\tprint(\"2. Add Item\")\n\t\tprint(\"3. Remove Item\")\n\t\tprint(\"4. Edit Item\")\n\t\tprint(\"5. Clear List\")\n\t\toption = input(\"Enter your option: \")\n\n\t\t#view Items\n\t\tif option == \"1\":\n\t\t\tif not todo_list:\n\t\t\t\tprint(\"No items to view\")\n\t\t\telse:\n\t\t\t\tfor i, item in enumerate(todo_list):\n\t\t\t\t\tprint(f\"{i+1}. {item}\")\n\n\t\t#add Item\n\n\t\telif option == \"2\":\n\t\t\titem = input(\"Enter the item to add: \")\n\t\t\tif item in todo_list:\n\t\t\t\tprint(\"Item already exists\")\n\t\t\telse:\n\t\t\t\ttodo_list.append(item)\n\t\t\t\tprint(\"Item added\")\n\n\t\t#remove Item\n\t\telif option == \"3\":\n\t\t\titem = input(\"Enter the item to remove: \")\n\t\t\tif not item in todo_list:\n\t\t\t\tprint(\"Item does not exist\")\n\t\t\telse:\n\t\t\t\tconfirmation = input(\"Are you sure you want to remove this item? (y/n) \")\n\t\t\t\tif confirmation == \"y\":\n\t\t\t\t\ttodo_list.remove(item)\n\t\t\t\t\tprint(\"Item removed\")\n\n\t\t#edit Item\n\t\telif option == \"4\":\n\t\t\titem = input(\"Enter the item to edit: \")\n\t\t\tif not item in todo_list:\n\t\t\t\tprint(\"Item does not exist\")\n\t\t\telse:\n\t\t\t\tconfirmation = input(\"Are you sure you want to edit this item? (y/n) \")\n\t\t\t\tif confirmation == \"y\":\n\t\t\t\t\tnew_item = input(\"Enter the new item: \")\n\t\t\t\t\tif new_item in todo_list:\n\t\t\t\t\t\tprint(\"Item already exists\")\n\t\t\t\t\telse:\n\t\t\t\t\t\ttodo_list[todo_list.index(item)] = new_item\n\t\t\t\t\t\tprint(\"Item edited\")\n\n\t\t\t#clear List\n\t\t\t\telif option == \"5\":\n\t\t\t\t\ttodo_list = []\n\t\t\t\t\tprint(\"List cleared\")\n\n\t\telse:\n\t\t\tprint(\"Invalid option\")\n\n\nmy_fuc()\n","repo_name":"MyCool-DJ/Day-35-Lists","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"74441042434","text":"# Jiayao Gu\n# 260830725 \n\nimport random\nimport matplotlib.pyplot as plt\n\nclass Animal:\n \n # Initializer method\n def __init__(self, my_species, row, column):\n \"\"\" Constructor method\n Args:\n self (Animal): the object being created\n my_species (str): species name (\"Lion\" or \"Zebra\")\n row (int): row for the new animal\n column (int): column for the new animal\n Returns:\n Nothing\n Behavior:\n Initializes a new animal, setting species to my_species\n \"\"\" \n self.species = my_species\n self.row = row\n self.col = column\n self.age = 0\n self.time_since_last_meal = 0\n\n def __str__(self):\n \"\"\" Creates a string from an object\n Args:\n self (Animal): the object on which the method is called\n Returns:\n str: String summarizing the object\n \"\"\"\n s= self.species+\" at position (\"+str(self.row)+\",\"+str(self.col)+\"):, age=\"+str(self.age)+\", time_since_last_meal=\"+\\\n str(self.time_since_last_meal)\n return s\n \n def can_eat(self, other):\n \"\"\" Checks if self can eat other\n Args:\n self (Animal): the object on which the method is called\n other (Animal): another animal\n Returns:\n boolean: True if self can eat other, and False otherwise\n \"\"\" \n # WRITE YOUR CODE FOR QUESTION 3 HERE\n if self.species == \"Lion\" and other.species == \"zebra\":\n return True\n else:\n return False\n\n def time_passes(self):\n \"\"\" Increases age and time_since_last_meal\n Args:\n self (Animal): the object on which the method is called\n Returns:\n Nothing\n Behavior:\n Increments age and time_since_last_meal\n \"\"\" \n # WRITE YOUR CODE FOR QUESTION 4 HERE\n self.age += 1\n self.time_since_last_meal += 1\n return \n\n def dies_of_old_age(self):\n \"\"\" Determines if an animal dies of old age\n Args:\n self (Animal): the object on which the method is called\n Returns:\n boolean: True if animal dies of old age, False otherwise\n \"\"\" \n # WRITE YOUR CODE FOR QUESTION 5 HERE\n if (self.species == \"Lion\" and self.age >= 18) or (self.species == \"Zebra\" and self.age >= 7):\n return True\n else:\n return False\n\n def dies_of_hunger(self):\n \"\"\" Determines if an animal dies of hunger\n Args:\n self (Animal): the object on which the method is called\n Returns:\n boolean: True if animal dies of hunger, False otherwise\n \"\"\" \n # WRITE YOUR CODE FOR QUESTION 6 HERE \n if self.species == \"Lion\" and self.time_since_last_meal >= 6:\n return True\n else: \n return False \n \n def will_reproduce(self):\n \"\"\" Determines if an animal will reproduce this month\n Args:\n self (Animal): the object on which the method is called\n Returns:\n boolean: True if ready to reproduce, False otherwise\n \"\"\" \n # WRITE YOUR CODE FOR QUESTION 7 HERE\n if self.species==\"Zebra\":\n if self.age==3 or self.age==6 and self.dies_of_old_age()==False:\n return True\n else:\n return False\n elif self.species==\"Lion\":\n if self.age==7 or self.age==14 and self.age<18 and self.dies_of_hunger()==False: \n return True\n else:\n return False\n\n # end of Animal class\n\ndef initialize_population(grid_size):\n \"\"\" Initializes the grid by placing animals onto it.\n Args:\n grid_size (int): The size of the grid\n Returns:\n list of animals: The list of animals in the ecosystem\n \"\"\" \n all_animals=[]\n all_animals.append(Animal(\"Lion\",3,5))\n all_animals.append(Animal(\"Lion\",7,4))\n all_animals.append(Animal(\"Zebra\",2,1)) \n all_animals.append(Animal(\"Zebra\",5,8))\n all_animals.append(Animal(\"Zebra\",9,2))\n all_animals.append(Animal(\"Zebra\",4,4))\n all_animals.append(Animal(\"Zebra\",4,8))\n all_animals.append(Animal(\"Zebra\",1,2))\n all_animals.append(Animal(\"Zebra\",9,4))\n all_animals.append(Animal(\"Zebra\",1,8))\n all_animals.append(Animal(\"Zebra\",5,2))\n \n return all_animals\n \n\ndef print_grid(all_animals, grid_size):\n \"\"\" Prints the grid\n Args:\n all_animals (list of animals): The animals in the ecosystem\n grid_size (int): The size of the grid\n Returns:\n Nothing\n Behavior:\n Prints the grid\n \"\"\" \n \n #get the set of tuples where lions and zebras are located\n lions_tuples = { (a.row,a.col) for a in all_animals if a.species==\"Lion\"}\n zebras_tuples = { (a.row,a.col) for a in all_animals if a.species==\"Zebra\"}\n\n print(\"*\"*(grid_size+2))\n for row in range(grid_size):\n print(\"*\",end=\"\")\n for col in range(grid_size):\n if (row,col) in lions_tuples:\n print(\"L\",end=\"\")\n elif (row,col) in zebras_tuples:\n print(\"Z\",end=\"\")\n else:\n print(\" \",end=\"\")\n print(\"*\")\n print(\"*\"*(grid_size+2))\n\n\ndef sort_animals(all_animals):\n \"\"\" Sorts the animals, left to right and top to bottom\n Args:\n all_animals (list of animals): The animals in the ecosystem\n Returns:\n Nothing\n Behavior:\n Sorts the list of animals\n \"\"\" \n def get_key(a):\n return a.row+0.001*a.col\n all_animals.sort(key=get_key)\n \n \ndef my_random_choice(choices):\n \"\"\" Picks ones of the elements of choices\n Args:\n choices (list): the choices to choose from\n Returns:\n One of elements in the list\n \"\"\"\n if not choices:\n return None\n \n # for debugging purposes, we use this fake_random_choice function\n def getKey(x):\n return x[0]+0.001*x[1]\n return min(choices, key=getKey) \n\n # for actual random selection, replace the above this:\n #return random.choice(choices)\n\n\ndef list_neighbors(current_row, current_col, grid_size):\n \"\"\" Produces the list of neighboring positions\n Args:\n current_row (int): Current row of the animal\n current_col (int): Current column of the animal\n grid_size (int): The size of the gride\n Returns:\n list of tuples of two ints: List of all position tuples that are \n around the current position, without \n including positions outside the grid\n \"\"\"\n # WRITE YOUR CODE FOR QUESTION 1 HERE\n i = current_row\n j = current_col\n List = []\n for row_increment in range(-1,2):\n new_row = i + row_increment\n for col_increment in range(-1,2):\n new_col = j + col_increment\n if new_col == j and new_row == i:\n pass\n elif (new_row > 0 and new_row < grid_size) and (new_col > 0 and new_col < grid_size):\n List.append((new_row, new_col))\n return List\n \n \ndef random_neighbor(current_row, current_col, grid_size, only_empty=False, animals=[]):\n \"\"\" Chooses a neighboring positions from current position\n Args:\n current_row (int): Current row of the animal\n current_col (int): Current column of the animal\n size (int): Size of the grid\n only_empty (boolean): keyword argument. If True, we only consider \n neighbors where there is not already an animal\n animals (list): keyword argument. List of animals present in the ecosystem\n Returns:\n tuple of two int: A randomly chosen neighbor position tuple\n\n \"\"\" \n # WRITE YOUR CODE FOR QUESTION 2 HERE\n all_neighbors = [n for n in list_neighbors(current_row, current_col, grid_size)]\n next_cell = my_random_choice(all_neighbors)\n i = next_cell[0]\n j = next_cell[1]\n\n if not(only_empty):\n return next_cell\n else:\n for animal in animals:\n if animal.row == i and animal.col == j:\n all_neighbors.remove((animal.row,animal.col))\n if len(all_neighbors) == 0:\n return None\n return next_cell #next_cell does not have an animal\n\n\ndef one_step(all_animals, grid_size):\n \"\"\" simulates the evolution of the ecosystem over 1 month\n Args:\n all_animals (list of animals): The animals in the ecosystem\n grid_size (int): The size of the grid\n Returns:\n list fo str: The events that took place\n Behavior:\n Updates the content of animal_grid by simulating one time step\n \"\"\" \n sort_animals(all_animals) # ensures that the animals are listed \n # from left to right, top to bottom \n \n # run time_passes on all animals\n for animal in all_animals:\n animal.time_passes()\n # make animals die of old age\n events = []\n dead = []\n for animal in all_animals:\n if(animal.dies_of_old_age()):\n dead.append(animal)\n events.append(\"{} dies of old age at position {} {}\".format(animal.species, animal.row, animal.col)) \n # make animals die of hunger\n elif(animal.dies_of_hunger()):\n dead.append(animal)\n events.append(\"{} dies of hunger at position {} {}\".format(animal.species,animal.row, animal.col))\n\n for a in all_animals:\n if a in dead:\n all_animals.remove(a)\n \n # move animals\n new_row, new_col = random_neighbor(animal.row, animal.col, grid_size, False, all_animals)\n eaten = []\n for animal in all_animals: \n # search for animal that on the new cell\n present_animal = [n for n in all_animals if n.row == new_row and n.col == new_col and n not in eaten]\n #there is an animal in the new cell\n if present_animal != []:\n # search for the animal at the new_position\n #3 cases:\n # if a lion moves to a cell contains a zebra, lion eats zebra and lion moves to new_position, zebra is removed from the cel\n present_animal = present_animal[0]\n if animal.can_eat(present_animal):\n events.append(animal.species+\" moves from \"+str(animal.row)+\" \"+str(animal.col)+\" to \"+str(new_row)+\" \"+str(new_col)+\" and eats a zebra\")\n animal.row = new_row #update position\n animal.col = new_col\n animal.time_since_last_meal = 0 #update the time since last meal\n eaten.append(present_animal)\n # if a zebra moves to a cell contains a lion, zebra is eaten and removed from its position\n elif present_animal.can_eat(animal):\n events.append(animal.species+\" moves from \"+str(animal.row)+\" \"+str(animal.col)+\" to \"+str(new_row)+\" \"+str(new_col)+ \" and is eaten by a lion\")\n eaten.append(animal)\n present_animal.time_since_last_meal = 0\n # if the same animals present, animals stay, nothing happens\n else: # two animals are the same species\n events.append(animal.species+\" moves from \"+str(animal.row)+\" \"+str(animal.col)+\" to \"+str(new_row)+\" \"+str(new_col)+ \" but there is an animal of the same species\")\n\n #new cell does not contain an animal\n else:\n events.append(animal.species+\" moves from \"+str(animal.row)+\" \"+str(animal.col)+\" to \"+str(new_row)+\" \"+str(new_col))\n animal.row = new_row\n animal.col = new_col\n \n for a in all_animals:\n if a in eaten:\n all_animals.remove(a)\n # since animals have moved, we sort the list of animals again, so that\n # we consider them for reproduction in the right order \n sort_animals(all_animals)\n\n babies = []\n # reproduce animals\n for animal in all_animals:\n if animal.will_reproduce():\n result = random_neighbor(animal.row, animal.col, grid_size, False, all_animals)\n if result == None:\n return;\n else: # new cell is empty\n baby_row, baby_col = result\n all_animals.append(Animal(animal.species, baby_row, baby_col)) #add baby to all_animals list\n events.append(\"A new baby {} is born at {} {}\".format(animal.species, baby_row, baby_col))\n\ndef run_whole_simulation(grid_size = 10, simulation_duration = 20, image_file_name=\"population.png\"):\n \"\"\" Executes the entire simulation\n Args:\n grid_size (int): Size of the grid\n simulation_duration (int): Number of steps of the simulation\n image_file_name (str): name of image to be created.\n Returns:\n Nothing\n Behavior:\n Simulates the evolution of an animal grid\n Generates graph of species abundance and saves it to populations.png\n \"\"\" \n # Do not change this; this initializes the animal population\n all_animals = initialize_population(grid_size)\n # WRITE YOUR CODE FOR QUESTION 9 HERE\n lions = []\n zebras = []\n for time in range(simulation_duration):\n num_of_lions = 0\n num_of_zebras = 0\n for animal in all_animals:\n if animal.species == \"Lion\":\n num_of_lions += 1\n elif animal.species == \"Zebra\":\n num_of_zebras += 1\n lions.append(num_of_lions)\n zebras.append(num_of_zebras)\n one_step(all_animals, grid_size)\n\n plt.plot(range(simulation_duration), lions, \"b\", label=\"Lions\")\n plt.plot(range(simulation_duration), zebras, \"r\", label = \"zebras\")\n plt.xlabel(\"time\")\n plt.ylabel(\"Number of individuals\")\n plt.legend(loc = \"best\")\n plt.savefig(image_file_name)\n\n\n\n \n ","repo_name":"jgu13/Miscellaneous","sub_path":"Python/ecosys_simulator/ecosystem_simulator.py","file_name":"ecosystem_simulator.py","file_ext":"py","file_size_in_byte":13857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7906500638","text":"\"\"\"\nЕсть список из случайных чисел и строк. Создайте цикл, итерирующийся до тех пор, пока не встретится число \"777\".\nЕсли в течении 100 попыток число не будет найдено — остановить цикл и вызвать ошибку с соответсвующим сообщением.\n\"\"\"\n\nfrom random import randint\n\nlst = [randint(500, 999) for _ in range(100 + 1)]\n\nind = 0\nwhile not (isinstance(lst[ind], int) and lst[ind] == 777):\n if ind > 99:\n raise Exception(\"Maximum number of iterations exceeded\")\n ind += 1\n\nprint(ind, lst[ind])\n","repo_name":"Kyrylo-Kotelevets/NIX_python","sub_path":"Beginner/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10462285903","text":"import os\nimport sys\nimport time\nimport math\nimport logging\nimport dateutil.parser\nfrom gsuite_exporter import auth\n\nlogger = logging.getLogger(__name__)\n\nclass AdminReportsAPIFetcher(object):\n \"\"\"Fetch Admin SDK Reports API records and streams them.\n\n Args:\n api (`googleapiclient.discovery.Resource`): The Admin SDK API to fetch\n records from.\n version (str): The Admin SDK API version.\n credentials_path (str): The path to the GSuite Admin credentials.\n scopes (list): A list of scopes to grant the API requests.\n \"\"\"\n SCOPES = [\n 'https://www.googleapis.com/auth/admin.reports.audit.readonly',\n ]\n REPORTS_API_VERSION = 'v1'\n def __init__(self, gsuite_admin, credentials_path=None):\n self.api_name = 'reports_{}'.format(\n AdminReportsAPIFetcher.REPORTS_API_VERSION)\n\n logger.debug(\"Initializing Admin API '{}' ...\".format(\n self.api_name))\n self.api = auth.build_service(\n api='admin',\n version=self.api_name,\n credentials_path=credentials_path,\n user_email=gsuite_admin,\n scopes=AdminReportsAPIFetcher.SCOPES)\n\n def fetch(self, application, start_time, user_key='all', item_key='items'):\n \"\"\"Fetch records from Admin API based on a query.\n\n Args:\n api_query (dict): The request arguments as as dict.\n\n Yields:\n list: A list of entries in each response\n \"\"\"\n activities = self.api.activities()\n req = activities.list(\n applicationName=application,\n userKey=user_key,\n startTime=start_time\n )\n while req is not None:\n res = req.execute()\n items = res.get(item_key, [])\n logger.debug(\"Retrieved {} new Admin API records from '{}.{}' app since {}\".format(\n len(items),\n self.api_name,\n application,\n start_time))\n yield items\n req = activities.list_next(\n previous_request=req,\n previous_response=res)\n","repo_name":"cleibl/terraform-google-gsuite-exporter-cfn","sub_path":"gsuite-exporter-cloudfunction/gsuite_exporter/collectors/reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"13727547723","text":"from stop import Stop\n\n\nclass OfficerId(Stop):\n def __init__(self, stop_filepath=None, acs=None, chunk=None, chunksize=1000000,\n groupby_columns=['county_fips', 'state_officer_id','driver_race'], output_directory='data/summaries'):\n super().__init__(stop_filepath, acs, chunk, chunksize, groupby_columns, output_directory)\n\n def create_single_columns_from_summary_table(self):\n summary = self.summary.reset_index()\n melt = summary.melt(id_vars=self.groupby_columns, value_vars=['stops', 'stop_percentage'])\n pivot = melt.pivot_table(index=['county_fips','state_officer_id'], columns=['driver_race', 'variable'], values='value')\n pivot.columns = ['_'.join(col).strip() for col in pivot.columns.values]\n pivot.reset_index(inplace=True)\n return pivot\n\n def export_file(self):\n export_filename = self.filepath.split('/')[-1]\n export_path = self.output_directory + '/' + export_filename\n self.summary.to_csv(export_path)\n\n def create_stop_percentage(self):\n stop_percentage_label = 'stop_percentage'\n stops_ = self.summary['stops']\n summary_groupby = self.summary.groupby(['state_officer_id', 'county_fips'])\n self.summary[stop_percentage_label] = stops_ / summary_groupby.transform(sum)['stops']\n return True\n","repo_name":"shawnveltman/policing_racism","sub_path":"src/officerid.py","file_name":"officerid.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23642379371","text":"def\treadint():\n\treturn\t(list(map(int,\tsys.stdin.readline().strip().split(\" \"))))\ndef\treadstr():\n\treturn\t(list(map(str,\tsys.stdin.readline().strip().split(\"\t\"))))\ndef\trline():\n\treturn\tsys.stdin.readline().strip()\ndef\tmain():\n\tpass\n\nif\t__name__=='__main__':\n\timport\tsys\n\ttestcase\t=\tint(rline())\n\tfor\ti\tin\trange(testcase):\n\t\tl\t=\treadint()\n\t\t#print (l)\n\t\tn1\t=\tl[0]\n\t\tn2\t=\tl[1]\n\t\tli = []\n\t\t#print (n1, n2)\n\t\tfor a in range(n1, n2+1):\n\t\t\tli.append(a)\n\t\t#print(li)\n\t\tdict = {}\n\t\tcount = 0\n\t\tfor ll in li:\n\t\t\ts = str(ll)\n\t\t\tlis = []\n\t\t\tfor k in range(1, len(s)):\n\t\t\t\t\ts1 = s[len(s)-k:] + s[0:len(s)-k]\n\t\t\t\t\tif int(s1) in li and ll != int(s1):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tt = dict[ll]\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tt = []\n\t\t\t\t\t\tt.append(int(s1))\n\t\t\t\t\t\tdict[ll] = t\n\t\t\t\t\t\tli.remove(int(s1))\n\t\tfor e in dict:\n\t\t\tnn = len(dict[e])\n\t\t\tif nn == 1:\n\t\t\t\tcount += 1\n\t\t\telse:\n\t\t\t\tcount += nn * (nn+1) / 2\n\t\tprint\t('Case\t#%d: %d'\t%\t((i+1),count))\n\tmain()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/1051.py","file_name":"1051.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22297831164","text":"from mode import *\nimport pymongo\nimport os\nimport json\nfrom read_blockchain import get_votes_from_blochchain\n\n\n# Connection String\nclient = pymongo.MongoClient(mongosettings[URL])\ndb = client[mongosettings[MONGODB]]\nresults_collection = db[mongosettings[RESULTSCOLLECTION]]\nballotspecs_collection = db[mongosettings[BALLOTSPECSCOLLECTION]]\nvotes_collection = db[mongosettings[VOTESCOLLECTION]]\nbills_collection = db[mongosettings[BILLSCOLLECTION]]\nissues_collection = db[mongosettings[ISSUESCOLLECTION]]\nCONSTITUENCY = \"Australia\"\n\n# dummy function, waiting for votes to be counted on the blockchain.\n\n\ndef get_votes(spec_hash):\n try:\n (yes, no) = get_votes_from_blochchain(spec_hash)\n except Exception as e:\n (yes, no) = (1, 1)\n if yes == 0:\n yes = 1\n if no == 0:\n no = 1\n return(yes, no)\n\n\ndef run(event, context):\n # print(all_results)\n for ballot in ballotspecs_collection.find():\n result_doc = {}\n result_doc[\"_id\"] = ballot[\"_id\"]\n result_doc[\"constituency\"] = CONSTITUENCY\n (result_doc[\"yes\"], result_doc[\"no\"]) = get_votes(\"0x\"+ballot[\"ballotspec_hash\"])\n # print(result_doc[\"_id\"], result_doc[\"yes\"], result_doc[\"no\"])\n results_collection.replace_one({'_id': result_doc[\"_id\"]}, {'data': result_doc}, True)\n if result_doc[\"constituency\"] == CONSTITUENCY:\n if result_doc[\"_id\"][0] == \"i\":\n issues_collection.update_one({'_id': result_doc[\"_id\"]},\n {\"$set\": {\"data.yes\": result_doc[\"yes\"]}})\n issues_collection.update_one({'_id': result_doc[\"_id\"]},\n {\"$set\": {\"data.no\": result_doc[\"no\"]}})\n elif result_doc[\"_id\"][0] == \"s\" or result_doc[\"_id\"][0] == \"r\":\n bills_collection.update_one({'_id': result_doc[\"_id\"]},\n {\"$set\": {\"data.yes\": result_doc[\"yes\"]}})\n bills_collection.update_one({'_id': result_doc[\"_id\"]},\n {\"$set\": {\"data.no\": result_doc[\"no\"]}})\n","repo_name":"voteflux/voting-app-api","sub_path":"update_results_db.py","file_name":"update_results_db.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"18542768836","text":"# Past charts are rendered in the browser using javascript(Single Page Application:SPA)\n# Instead of using popular modules such as 'Selenium' or 'PhantomJS', I found the URL of the AJAX request.\n# Check this blog for detailed explanation\n# https://blog.hartleybrody.com/web-scraping-cheat-sheet/#more-advanced-topics\n# section: Javascript Heavy Websites\n# -------------------- challenges encountered --------------------\n# 1. Without 'Cookie' in header, the server returned 406 error which I believe was due to an authorization issue\n# 2. Request GET using params parameter somehow did not work\n# request.get(host_url, headers=header, params=payload)\n# So I constructed a payload substring and appended to the host_url\n# 3. Python by default is pointer reference, use .copy() to make another copy of the data\n# ----------------------------------------------------------------\n\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nimport time\nimport datetime\nfrom dateutil.relativedelta import *\n\nhost_url = 'https://www.melon.com/chart/search/list.htm'\nlikes_url = 'https://www.melon.com/commonlike/getSongLike.json'\n\nheader = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n 'Referer': 'https://www.melon.com/chart/search/index.htm',\n 'X-Requested-With': 'XMLHttpRequest'\n}\n\n\ndef get_chart(payload):\n sub_string = ''\n for idx, t in enumerate(payload.items()):\n if idx == 0:\n sub_string += '?'\n else:\n sub_string += '&'\n sub_string += t[0] + '=' + t[1]\n r = requests.get(host_url + sub_string, headers=header)\n html = r.text\n soup = BeautifulSoup(html, 'html.parser')\n ret_dict = dict()\n ret_dict['year'] = payload['year']\n ret_dict['month'] = payload['mon']\n ret_dict['startDay'] = payload['startDay']\n ret_dict['endDay'] = payload['endDay']\n ret_dict['chart'] = list()\n chart_list = soup.find_all('tr', class_='lst50') + soup.find_all('tr', class_='lst100')\n for idx, tag in enumerate(chart_list):\n a_dict = dict()\n song_name = tag.find('div', class_='ellipsis rank01').text.strip()\n song_id = tag.find('div', class_='wrap pd_none left').find('input')['value']\n artist_name = tag.find('span', class_='checkEllipsis').text.strip()\n pattern = tag.find('span', class_='checkEllipsis').find('a')['href'].strip()\n artist_id = re.search(r'(?<=\\')\\d*(?=\\')', pattern)[0]\n album_name = tag.find('div', class_='ellipsis rank03').text.strip()\n pattern = tag.find('div', class_='ellipsis rank03').find('a')['href'].strip()\n album_id = re.search(r'(?<=\\')\\d*(?=\\')', pattern)[0]\n rank = tag.find('div', class_='wrap right_none').find_all('span')[0].text.strip()\n pattern = tag.find('span', class_='wrap_rank')['title']\n status = ''\n delta = '0'\n if '단계 상승' in pattern:\n status = 'up'\n delta = pattern.strip('단계 상승')\n elif '단계 하락' in pattern:\n status = 'down'\n delta = pattern.strip('단계 하락')\n elif '순위 집입' in pattern:\n status = 'new'\n elif '순위 동일' in pattern:\n status = 'static'\n else:\n status = pattern\n a_dict['rank'] = int(rank)\n a_dict['song_name'] = song_name\n a_dict['artist_name'] = artist_name\n a_dict['album_name'] = album_name\n a_dict['song_id'] = int(song_id)\n a_dict['artist_id'] = int(artist_id)\n a_dict['album_id'] = int(album_id)\n a_dict['status'] = status\n a_dict['delta'] = int(delta)\n ret_dict['chart'].append(a_dict)\n return ret_dict\n\n\ndef get_search_range(data, payload):\n url = 'https://www.melon.com/chart/search/cond.json'\n r = requests.post(url, headers=header, data=data)\n payload['year'] = data['year']\n payload['mon'] = data['mon']\n ret_list = list()\n print(r.json()['itemData'])\n for item in r.json()['itemData']:\n payload['startDay'] = item['STARTDAY']\n payload['endDay'] = item['ENDDAY']\n payload['day'] = item['ITEMVAL'].replace('^', '%5E')\n ret_list.append(payload.copy())\n return ret_list\n\n\nif __name__ == \"__main__\":\n date = datetime.date(2016, 2, 22)\n until = datetime.date(2013, 10, 22)\n\n payload = {\n 'chartType': 'WE',\n 'age': '2010',\n 'year': '2013',\n 'mon': '06',\n 'day': '20130603%5E20130609',\n 'classCd': 'GN0000',\n 'startDay': '20130603',\n 'endDay': '20130609',\n 'moved': 'Y'\n }\n\n data = {\n 'chartType': 'WE',\n 'searchDepth': '3',\n 'age': '2010',\n 'year': '2018',\n 'mon': '10'\n }\n\n while date > until:\n try:\n data['year'] = str(date.year)\n date_str = str(date.year*10000 + date.month*100 + date.day)\n data['mon'] = date_str[4:6]\n iter_list = get_search_range(data, payload)\n for pl in iter_list:\n w_dict = get_chart(pl)\n if len(w_dict['chart']) == 0:\n pl2 = pl.copy()\n pl2['classCd'] = 'DP0000'\n w_dict = get_chart(pl2)\n with open('./melon_chart_data/melon_chart' + pl['startDay'] + '-' + pl['endDay'] + '.json', 'w', encoding='utf-8-sig') as f:\n f.write(json.dumps(w_dict, indent=2, ensure_ascii=False))\n print('{} complete'.format(pl['startDay']))\n time.sleep(2)\n except:\n print('{} error'.format(date))\n time.sleep(3)\n date = date - relativedelta(months=+1)\n","repo_name":"HakkyuKim/Song_top100_survival_analysis","sub_path":"03.melon_crawler(dynamic).py","file_name":"03.melon_crawler(dynamic).py","file_ext":"py","file_size_in_byte":5735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20600583800","text":"from flask import Flask\nfrom flask import render_template\nfrom extensions import db\nfrom extensions import mail\nimport base64\nimport os\nfrom flask import request\nfrom flask import redirect\nimport datetime\n\nfrom models.employee import Employee\nfrom models.manager import Manager\nimport utils\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:root@127.0.0.1/aig_prd_form'\napp.config['SQLALCHEMY_POOL_SIZE'] = 5\napp.config['SQLALCHEMY_POOL_TIMEOUT'] = 120\napp.config['SQLALCHEMY_POOL_RECYCLE'] = 280\n\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = r'kmt.aigbusiness@gmail.com'\napp.config['MAIL_PASSWORD'] = r'atul123@#'\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\n\ndb.init_app(app)\nmail.init_app(app)\n\nwith app.app_context():\n db.create_all()\n\n@app.route(\"/\")\ndef main():\n return render_template('main.html')\n\n@app.route(\"/employee\" , methods=['POST'])\ndef save_data():\n emp_name = request.form.get('emp_name')\n emp_code = request.form.get('emp_code')\n emp_email= request.form.get('emp_email')\n job_function = request.form.get('job_function')\n date = request.form.get('date')\n reviewer_name = request.form.get('reviewer_name')\n reviewer_code = request.form.get('reviewer_code')\n self_assessment1 = request.form.get('self_assessment1')\n self_assessment1_comment1 = request.form.get('self_assessment1_comment1')\n self_assessment2 = request.form.get('self_assessment2')\n self_assessment2_comment2 = request.form.get('self_assessment2_comment2')\n self_assessment3 = request.form.get('self_assessment3')\n self_assessment3_comment3 = request.form.get('self_assessment3_comment3')\n self_assessment4 = request.form.get('self_assessment4')\n self_assessment4_comment4 = request.form.get('self_assessment4_comment4')\n self_assessment5 = request.form.get('self_assessment5')\n self_assessment5_comment5 = request.form.get('self_assessment5_comment5')\n self_assessment6 = request.form.get('self_assessment6')\n self_assessment6_comment6 = request.form.get('self_assessment6_comment6')\n rev_email = request.form.get('rev_email')\n\n\n employee_form = Employee(\n emp_code=emp_code,\n emp_name=emp_name,\n emp_email=emp_email,\n job_function=job_function,\n date=date,\n reviewer_name=reviewer_name,\n reviewer_code=reviewer_code,\n self_assessment1=self_assessment1,\n self_assessment1_comment1=self_assessment1_comment1,\n self_assessment2=self_assessment2,\n self_assessment2_comment2=self_assessment2_comment2,\n self_assessment3=self_assessment3,\n self_assessment3_comment3=self_assessment3_comment3,\n self_assessment4=self_assessment4,\n self_assessment4_comment4=self_assessment4_comment4,\n self_assessment5=self_assessment5,\n self_assessment5_comment5=self_assessment5_comment5,\n self_assessment6=self_assessment6,\n self_assessment6_comment6=self_assessment6_comment6,\n rev_email = rev_email,\n\n IP_addr=request.remote_addr,\n Location=request.form.get('location'),\n UserAgent=request.user_agent.browser,\n OperatingSystem=request.user_agent.platform,\n Time=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n\n )\n\n db.session.add(employee_form)\n db.session.commit()\n utils.send_link_as_mail(\n emp_name=emp_name,\n # emp_email=emp_email,\n rev_email=rev_email,\n reviewer_code=reviewer_code,\n emp_code=emp_code,\n\n )\n return redirect('/success')\n\n@app.route(\"/success\")\ndef success():\n return render_template('thankyou.html')\n\n@app.route(\"/document//\")\ndef document(emp_code,reviewer_code):\n the_document = Employee.query.filter(Employee.emp_code == emp_code,Employee.reviewer_code==reviewer_code).order_by(\"id desc\").first()\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n #return str(BASE_DIR)\n\n return render_template('document.html', the_document=the_document, base_dir=BASE_DIR)\n\n\n@app.route(\"/manager\",methods=['POST'])\ndef save_managerdata():\n\n emp_code1 = request.form.get('emp_code1')\n reviewer_name1 = request.form.get('reviewer_name1')\n reviewer_code1 = request.form.get('reviewer_code1')\n\n manager_assessment1 = request.form.get('manager_assessment1')\n manager_assessment1_comment1 = request.form.get('manager_assessment1_comment1')\n total_score1 = request.form.get('total_score1')\n achieved_score1 = request.form.get('achieved_score1')\n\n manager_assessment2 = request.form.get('manager_assessment2')\n manager_assessment2_comment2 = request.form.get('manager_assessment2_comment2')\n total_score2 = request.form.get('total_score2')\n achieved_score2 = request.form.get('achieved_score2')\n\n manager_assessment3 = request.form.get('manager_assessment3')\n manager_assessment3_comment3 = request.form.get('manager_assessment3_comment3')\n total_score3 = request.form.get('total_score3')\n achieved_score3 = request.form.get('achieved_score3')\n\n manager_assessment4 = request.form.get('manager_assessment4')\n manager_assessment4_comment4 = request.form.get('manager_assessment4_comment4')\n total_score4 = request.form.get('total_score4')\n achieved_score4 = request.form.get('achieved_score4')\n\n manager_assessment5 = request.form.get('manager_assessment5')\n manager_assessment5_comment5 = request.form.get('manager_assessment5_comment5')\n total_score5 = request.form.get('total_score5')\n achieved_score5 = request.form.get('achieved_score5')\n\n manager_assessment6 = request.form.get('manager_assessment6')\n manager_assessment6_comment6 = request.form.get('manager_assessment6_comment6')\n total_score6 = request.form.get('total_score6')\n achieved_score6 = request.form.get('achieved_score6')\n\n rev_email1 = request.form.get('rev_email1')\n\n manager_form = Manager(\n\n emp_code1 = emp_code1,\n reviewer_name1 = reviewer_name1,\n reviewer_code1 = reviewer_code1,\n manager_assessment1 = manager_assessment1,\n manager_assessment1_comment1 = manager_assessment1_comment1,\n total_score1 = total_score1,\n achieved_score1 = achieved_score1,\n\n manager_assessment2=manager_assessment2,\n manager_assessment2_comment2=manager_assessment2_comment2,\n total_score2=total_score2,\n achieved_score2=achieved_score2,\n\n manager_assessment3=manager_assessment3,\n manager_assessment3_comment3=manager_assessment3_comment3,\n total_score3=total_score3,\n achieved_score3=achieved_score3,\n\n manager_assessment4=manager_assessment4,\n manager_assessment4_comment4=manager_assessment4_comment4,\n total_score4=total_score4,\n achieved_score4=achieved_score4,\n\n manager_assessment5=manager_assessment5,\n manager_assessment5_comment5=manager_assessment5_comment5,\n total_score5=total_score5,\n achieved_score5=achieved_score5,\n\n manager_assessment6=manager_assessment6,\n manager_assessment6_comment6=manager_assessment6_comment6,\n total_score6=total_score6,\n achieved_score6=achieved_score6,\n\n\n\n IP_addr=request.remote_addr,\n Location=request.form.get('location'),\n UserAgent=request.user_agent.browser,\n OperatingSystem=request.user_agent.platform,\n Time=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n\n )\n\n\n db.session.add(manager_form)\n db.session.commit()\n utils.send_manager_link_as_mail(\n\n rev_email1=rev_email1,\n reviewer_code1=reviewer_code1,\n emp_code1=emp_code1,\n\n )\n return redirect('/success')\n\n@app.route(\"/final_form//\")\ndef final_document(emp_code1,reviewer_code1):\n the_final_document = Manager.query.filter(Manager.emp_code1 == emp_code1,Manager.reviewer_code1==reviewer_code1).order_by(\"id desc\").first()\n the_document = Employee.query.filter(Employee.emp_code == emp_code1,Employee.reviewer_code == reviewer_code1).order_by(\"id desc\").first()\n\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n #return str(BASE_DIR)\n\n return render_template('finaldocument.html', the_document=the_document,the_final_document=the_final_document, base_dir=BASE_DIR)\n","repo_name":"pawan0410/PRD","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24573511986","text":"__author__ = 'Quantik'\n\nDAILY_INTAKES = {\n 'KCAL': {'name':'Calories', 'value':\tu'2000 cal'},\n 'FAT': {'name':'Total Fat', 'value':\tu'65 g'},\n\t'FATSAT': {'name':'Saturated Fatty Acids', 'value':u'20 g'},\n 'CHOLE': {'name':'Cholesterol', 'value':u'300 mg'},\n\t'NA': {'name':'Sodium', 'value': u'2400 mg'},\n\t'K': {'name':'Potassium', 'value': u'4700 mg'},\n\t'CHOCDF': {'name':'Total Carbohydrate', 'value': u'300 g'},\n\t'FIBTG':{'name':'Dietary Fiber', 'value': u'25 g'},\n\t'PROCNT': {'name':'Protein', 'value': u'50 g'},\n\t'VITA_RAE':{'name':'Vitamin A', 'value': u'900 \\xceg'},\n 'VITC': {'name':'Ascorbic Acid Vitamin C', 'value': u'60 mg'},\n 'CA': {'name':'Calcium', 'value': u'1000 mg'},\n 'FE':{'name':'Iron', 'value': u'18 mg'},\n 'VITD-': {'name':'Cholecalciferol Vitamin D', 'value': u'400 IU'},\n 'TOCPHA':{'name':'Tocopherol Vitamin E', 'value': u'30 IU'},\n 'VITK':{'name':'Vitamin K', 'value': u'80 \\xceg'},\n 'THIA':{'name':'Thiamin Vitamin B1', 'value': u'1.5 mg'},\n 'RIBF':{'name':'Riboflavin Vitamin B2', 'value': u'1.7 mg'},\n 'NIA':{'name':'Niacin Vitamin B3', 'value':\tu'20 mg'},\n 'VITB6A':{'name':'Pyridoxine Vitamin B6', 'value':\tu'2 mg'},\n 'FOLDFE':{'name':'Folate', 'value':\tu'400 \\xceg'},\n 'VITB12':{'name':'Cobalamine Vitamin B12', 'value':\tu'6 \\xceg'},\n 'P':{'name':'Phosphorus', 'value':\tu'1000 mg'},\n 'MG':{'name':'Magnesium', 'value':\tu'400 mg'},\n 'ZN':{'name':'Zinc', 'value': u'15 mg'},\n 'SE':{'name':'Selenium', 'value': u'70 \\xceg'},\n 'CU':{'name':'Copper', 'value': u'2000 \\xceg'},\n 'MN':{'name':'Manganese', 'value': u'2 mg'}\n}\n\n\n# Biotin\t300 \\xceg\t30 \\xceg\n# Pantothenic acid Vitamin B5\t10 mg\t5 mg\n# Iodine\t150 \\xceg\t150 \\xceg\n# Chromium\t120 \\xceg\t35 \\xceg\n# Molybdenum\t75 \\xceg\t45 \\xceg\n# Chloride\t3400 mg\t2300 mg\n","repo_name":"quantumlicht/menu_builder","sub_path":"src/recipe/lib/daily_intake.py","file_name":"daily_intake.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25169390095","text":"import pandas as pd\nimport pytest\n\nfrom superset.exceptions import InvalidPostProcessingError\nfrom superset.utils import pandas_postprocessing as pp\nfrom tests.unit_tests.fixtures.dataframes import categories_df\n\n\ndef test_rename_should_not_side_effect():\n _categories_df = categories_df.copy()\n pp.rename(\n df=_categories_df,\n columns={\n \"constant\": \"constant_newname\",\n \"category\": \"category_namename\",\n },\n )\n assert _categories_df.equals(categories_df)\n\n\ndef test_rename():\n new_categories_df = pp.rename(\n df=categories_df,\n columns={\n \"constant\": \"constant_newname\",\n \"category\": \"category_newname\",\n },\n )\n assert list(new_categories_df.columns.values) == [\n \"constant_newname\",\n \"category_newname\",\n \"dept\",\n \"name\",\n \"asc_idx\",\n \"desc_idx\",\n \"idx_nulls\",\n ]\n assert not new_categories_df.equals(categories_df)\n\n\ndef test_should_inplace_rename():\n _categories_df = categories_df.copy()\n _categories_df_inplaced = pp.rename(\n df=_categories_df,\n columns={\n \"constant\": \"constant_newname\",\n \"category\": \"category_namename\",\n },\n inplace=True,\n )\n assert _categories_df_inplaced.equals(_categories_df)\n\n\ndef test_should_rename_on_level():\n iterables = [[\"m1\", \"m2\"], [\"a\", \"b\"], [\"x\", \"y\"]]\n columns = pd.MultiIndex.from_product(iterables, names=[None, \"level1\", \"level2\"])\n df = pd.DataFrame(index=[0, 1, 2], columns=columns, data=1)\n \"\"\"\n m1 m2\n level1 a b a b\n level2 x y x y x y x y\n 0 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1\n 2 1 1 1 1 1 1 1 1\n \"\"\"\n post_df = pp.rename(\n df=df,\n columns={\"m1\": \"new_m1\"},\n level=0,\n )\n assert post_df.columns.get_level_values(level=0).equals(\n pd.Index(\n [\n \"new_m1\",\n \"new_m1\",\n \"new_m1\",\n \"new_m1\",\n \"m2\",\n \"m2\",\n \"m2\",\n \"m2\",\n ]\n )\n )\n\n\ndef test_should_raise_exception_no_column():\n with pytest.raises(InvalidPostProcessingError):\n pp.rename(\n df=categories_df,\n columns={\n \"foobar\": \"foobar2\",\n },\n )\n\n\ndef test_should_raise_exception_duplication():\n with pytest.raises(InvalidPostProcessingError):\n pp.rename(\n df=categories_df,\n columns={\n \"constant\": \"category\",\n },\n )\n\n\ndef test_should_raise_exception_duplication_on_multiindx():\n iterables = [[\"m1\", \"m2\"], [\"a\", \"b\"], [\"x\", \"y\"]]\n columns = pd.MultiIndex.from_product(iterables, names=[None, \"level1\", \"level2\"])\n df = pd.DataFrame(index=[0, 1, 2], columns=columns, data=1)\n \"\"\"\n m1 m2\n level1 a b a b\n level2 x y x y x y x y\n 0 1 1 1 1 1 1 1 1\n 1 1 1 1 1 1 1 1 1\n 2 1 1 1 1 1 1 1 1\n \"\"\"\n\n with pytest.raises(InvalidPostProcessingError):\n pp.rename(\n df=df,\n columns={\n \"m1\": \"m2\",\n },\n level=0,\n )\n pp.rename(\n df=df,\n columns={\n \"a\": \"b\",\n },\n level=1,\n )\n\n\ndef test_should_raise_exception_invalid_level():\n with pytest.raises(InvalidPostProcessingError):\n pp.rename(\n df=categories_df,\n columns={\n \"constant\": \"new_constant\",\n },\n level=100,\n )\n pp.rename(\n df=categories_df,\n columns={\n \"constant\": \"new_constant\",\n },\n level=\"xxxxx\",\n )\n\n\ndef test_should_return_df_empty_columns():\n assert pp.rename(\n df=categories_df,\n columns={},\n ).equals(categories_df)\n","repo_name":"apache/superset","sub_path":"tests/unit_tests/pandas_postprocessing/test_rename.py","file_name":"test_rename.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","stars":55269,"dataset":"github-code","pt":"61"} +{"seq_id":"43144260610","text":"class Solution:\n def kthDistinct(self, arr: List[str], k: int) -> str:\n s = set()\n l = []\n for i in arr:\n if arr.count(i)==1:\n l.append(i)\n \n print(l)\n if len(l)>=k:\n return l[k-1]\n return \"\"\n ","repo_name":"hardik302001/leetcode","sub_path":"problems/kth_distinct_string_in_an_array/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34781755328","text":"#手把手写神经网络\nimport math\n\nimport matplotlib\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tf_utils import load_dataset,random_mini_batches,convert_to_one_hot,predict\n\nnp.random.seed(1)\n\nX_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()\n\nindex = 0\nplt.imshow(X_train_orig[index])\nprint(\"y = \" + str(np.squeeze(Y_train_orig[:,index])))\n\nX_train_flatten = X_train_orig.reshape(X_train_orig.shape[0], -1).T\nX_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T\n# 简单的归一化\nX_train = X_train_flatten / 255.\nX_test = X_test_flatten / 255.\n# one hot编码\nY_train = convert_to_one_hot(Y_train_orig, 6)\nY_test = convert_to_one_hot(Y_test_orig, 6)\n\nprint(\"number of training examples = \" + str(X_train.shape[1]))\nprint(\"number of test examples = \" + str(X_test.shape[1]))\nprint(\"X_train shape: \" + str(X_train.shape))\nprint(\"Y_train shape: \" + str(Y_train.shape))\nprint(\"X_test shape: \" + str(X_test.shape))\nprint(\"Y_test shape: \" + str(Y_test.shape))\n\n# print(\"total is :\"+str(X_test))\n\ndef create_placeholders(n_x,n_y):\n X = tf.placeholder(tf.float32, [n_x, None], name=\"X\")\n Y = tf.placeholder(tf.float32, [n_y, None], name=\"Y\")\n\n return X,Y\n\nX,Y = create_placeholders(12288,6)\nprint(\"X = \" + str(X))\nprint(\"Y = \" + str(Y))\n\ndef initialize_parameters():\n tf.set_random_seed(1)\n W1 = tf.get_variable(\"W1\",[25,12288],initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b1 = tf.get_variable(\"b1\",[25,1],initializer=tf.zeros_initializer())\n W2 = tf.get_variable(\"W2\",[12,25],initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b2 = tf.get_variable(\"b2\",[12,1],initializer=tf.zeros_initializer())\n W3 = tf.get_variable(\"W3\",[6,12],initializer=tf.contrib.layers.xavier_initializer(seed=1))\n b3 = tf.get_variable(\"b3\", [6, 1], initializer=tf.zeros_initializer())\n\n parameters = {\n \"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3\n }\n return parameters\ntf.reset_default_graph()\nwith tf.Session() as sess:\n parameters = initialize_parameters()\n print(\"W1 = \" + str(parameters[\"W1\"]))\n print(\"b1 = \" + str(parameters[\"b1\"]))\n print(\"W2 = \" + str(parameters[\"W2\"]))\n print(\"b2 = \" + str(parameters[\"b2\"]))\n\n\ndef forward_propagation(X,parameters):\n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n\n # 计算第一层的z\n Z1 = tf.add(tf.matmul(W1, X), b1)\n # 在第一层的z上面执行relu激活操作,得到第一层的a。\n # 注意,tensorflow中已经帮我们实现了relu函数。\n # 之前是我们自己写不少python代码才能实现relu操作的。\n A1 = tf.nn.relu(Z1)\n Z2 = tf.add(tf.matmul(W2, A1), b2)\n A2 = tf.nn.relu(Z2)\n Z3 = tf.add(tf.matmul(W3, A2), b3)\n\n return Z3\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n X, Y = create_placeholders(12288, 6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X, parameters)\n print(\"Z3 = \" + str(Z3))\n\ndef computer_cost(Z3,Y):\n logits = tf.transpose(Z3)\n labels = tf.transpose(Y)\n\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=labels))\n return cost\n\ntf.reset_default_graph()\nwith tf.Session() as sess:\n X,Y = create_placeholders(12288,6)\n parameters = initialize_parameters()\n Z3 = forward_propagation(X,parameters)\n cost = computer_cost(Z3,Y)\n print(\"cost = \" + str(cost))\n\n\ndef model(X_train, Y_train, X_test, Y_test, learning_rate=0.0001,\n num_epochs=1500, minibatch_size=32, print_cost=True):\n ops.reset_default_graph() # 将计算图返回到默认空状态\n tf.set_random_seed(1)\n seed = 3\n (n_x, m) = X_train.shape # (n_x: 特征数量, m : 训练集中的样本数)\n n_y = Y_train.shape[0]\n costs = []\n\n # 创建占位符\n X, Y = create_placeholders(n_x, n_y)\n\n # 初始化参数\n parameters = initialize_parameters()\n\n # 构建前向传播操作\n Z3 = forward_propagation(X, parameters)\n\n # 构建成本计算操作\n cost = computer_cost(Z3, Y)\n\n # 构建反向传播,为反向传播指定优化算法和学习率以及成本函数,这里我们使用adam算法,\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n\n # 定义初始化操作\n init = tf.global_variables_initializer()\n\n # 开始一个tensorflow的session\n with tf.Session() as sess:\n\n # 执行初始化操作\n sess.run(init)\n\n # 执行epochs指定的训练次数,一个epoch就是完整的向整个数据集学习一次\n for epoch in range(num_epochs):\n\n epoch_cost = 0.\n num_minibatches = int(m / minibatch_size) # 计算有多少个子训练集\n seed = seed + 1\n # 将数据集分成若干子训练集\n minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)\n\n # 循环遍历每一个子训练集\n for minibatch in minibatches:\n (minibatch_X, minibatch_Y) = minibatch\n\n # 这行代码会使整个计算图被执行,从前向传播操作到反向传播操作,最后到参数更新操作。\n _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y})\n\n epoch_cost += minibatch_cost / num_minibatches\n\n if print_cost == True and epoch % 100 == 0:\n print(\"Cost after epoch %i: %f\" % (epoch, epoch_cost))\n if print_cost == True and epoch % 5 == 0:\n costs.append(epoch_cost)\n\n # 画出cost成本的走势图\n plt.plot(np.squeeze(costs))\n plt.ylabel('cost')\n plt.xlabel('iterations (per tens)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n # 从计算图中获取训练好了的参数,后面我们就可以用这些参数来识别手语了!\n parameters = sess.run(parameters)\n print(\"Parameters have been trained!\")\n\n # 分别计算一下在训练集和测试集上面的预测精准度\n correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n print(\"Train Accuracy:\", accuracy.eval({X: X_train, Y: Y_train}))\n print(\"Test Accuracy:\", accuracy.eval({X: X_test, Y: Y_test}))\n\n return parameters\nparameters = model(X_train, Y_train, X_test, Y_test)\n\n\n","repo_name":"a13483685/AiStudyOrigin","sub_path":"AppOne.py","file_name":"AppOne.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32691921294","text":"from django import forms\nfrom .models import Message\nfrom django.contrib.auth.models import User\nfrom accounts.models import Profile\n\n\n# Message Form - for the message system; this is what the user fills out\n# user can select who to send to (receiver), reason for message, and type what he wants to say (msg_content)\nclass MessageForm(forms.ModelForm):\n class Meta:\n model = Message\n fields = ['receiver', 'reason', 'msg_content']\n labels = {\n 'msg_content': 'Message content'\n }\n\n # checks if user needs to contact a user that can process complaints (OU for docs and SU for general)\n # e.g, reporting someone: valid receivers are SUs only.\n def __init__(self, *args, **kwargs):\n is_contacting_authority = kwargs.pop('is_contacting_authority')\n super(MessageForm, self).__init__(*args, **kwargs)\n if is_contacting_authority:\n users = User.objects.all()\n self.fields['receiver'].queryset = User.objects.filter(profile__cohort__gte=Profile.ORDINARY_USER)\n else:\n self.fields['receiver'].queryset = User.objects.all()\n","repo_name":"Kamide/DSS","sub_path":"msg_system/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"75146957314","text":"import sys\nsys.path.append('./src/')\nfrom process.putils import load_file_from_local, process\nfrom process.build import build_dim_date\nfrom pandas import DataFrame\nfrom pandas import testing as pdtesting\nfrom pandas import to_datetime as to_datetime\nfrom numpy import equal\nimport numpy as np\n\ndef test_returns_dataframe():\n df = build_dim_date('2020/01/01', '2022/01/01')\n assert isinstance(df, DataFrame)\n \ndef test_returned_dataframe_has_expected_index_and_columns():\n df = build_dim_date('2020/01/01', '2022/01/01')\n expected_cols = ['date_id', 'year', 'month', 'day', 'day_of_week', 'day_name', 'month_name', 'quarter']\n actual_cols = df.columns.values\n assert equal(expected_cols, actual_cols).all()\n \ndef test_returned_data_columns_have_expected_data_types():\n df = build_dim_date('2020/01/01', '2022/01/01')\n cols = ['year', 'month', 'day', 'day_of_week', 'day_name', 'month_name', 'quarter']\n expected_dtypes = ['int64', 'int64', 'int64', 'int64', 'O', 'O', 'int64']\n types = df.dtypes\n for i in range(len(cols)):\n assert types[cols[i]] == expected_dtypes[i]\n \ndef test_returned_data_is_correct():\n df = build_dim_date('2023/03/20', '2023/03/26')\n expected_first_row = [to_datetime('2023-03-20'), 2023, 3, 20, 0, 'Monday', 'March', 1]\n actual_first_row = df.iloc[0]\n for i in range(len(expected_first_row)):\n assert expected_first_row[i] == actual_first_row[i]","repo_name":"rexhao362/de-jan-23-project","sub_path":"test/lambdas/process/build/test_build_dim_date.py","file_name":"test_build_dim_date.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14106323864","text":"class Solution:\n def pruneTree(self, root: TreeNode) -> TreeNode:\n def recursion(node):\n if not node: return 0\n l = recursion(node.left)\n if l == 0: node.left = None\n r = recursion(node.right)\n if r == 0: node.right = None\n return max(l, r, node.val)\n \n if recursion(root) == 0:return None\n return root","repo_name":"Sol-cito/LeetCoding","sub_path":"binary-tree-pruning/binary-tree-pruning.py","file_name":"binary-tree-pruning.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34871647122","text":"import pandas as pd\n\nimport json\nimport importlib\nimport os\nfrom pathlib import Path\n\nfrom tessif.analyze import ComparativeResultier\nfrom tessif import parse\nfrom tessif.frused.paths import doc_dir\nimport tessif.examples.data.tsf.py_hard as tsf_examples\nimport tessif.simulate as optimize\nfrom tessif.transform.es2mapping import compile_result_data_representation\nimport tessif.visualize.dcgrph as dcv\nfrom tessif.frused.hooks.tsf import reparameterize_components\n\n# PERIODS = 3\nPERIODS = 8760\n# EXPANSION = False\nEXPANSION = True\n\n\nFOLDER = \"commitment_results\"\n# FOLDER = \"expansion_results\"\nFOLDER = \"modified_expansion_results\"\n\nFOLDER = \"trivia_results\"\n# FOLDER = \"avs_results\"\n# FOLDER = \"test_results\"\n\n# define the softwares to be used\nSOFTWARES = ['cllp', 'fine', 'omf', 'ppsa', ]\n# use this in case you are just testing out the water\nSOFTWARES = ['ppsa', ]\nSOFTWARES = ['omf', ]\nHOOK_PYPSA = True\n\nCYTOSCAPE_ADVANCED_GRAPH = True\nMATPLOTLIB_ADVANCED_GRAPH = False\nADVANCED_GRAPH_ON = \"omf\"\n\nPARENT = os.path.join(\n doc_dir,\n \"source\",\n \"getting_started\",\n \"examples\",\n \"application\",\n \"phd\",\n \"field_study\",\n \"CompCnE\",\n)\n\n\ndef reparam_ppsa(tessif_es):\n reparameterized_es = reparameterize_components(\n es=tessif_es,\n components={\n 'Hard Coal CHP': {\n 'flow_emissions': {'Hard_Coal': 0, 'electricity': 0, 'hot_water': 0},\n },\n 'Hard Coal Supply': {\n 'flow_emissions': {'Hard_Coal': 0.8 * 0.4 + 0.06 * 0.4},\n },\n 'Biogas CHP': {\n 'flow_emissions': {'biogas': 0, 'electricity': 0, 'hot_water': 0},\n },\n 'Biogas Supply': {\n 'flow_emissions': {'biogas': 0.25 * 0.4 + 0.01875 * 0.5},\n },\n\n },\n\n )\n return reparameterized_es\n\n\n# create dispatch problem aka TransC or TransE combination\ncreation_module_path = os.path.join(PARENT, \"creation.py\")\n\ncreation_module = parse.python_file(creation_module_path)\ntessif_CompCnE = creation_module.create_compcne_es(\n periods=PERIODS, expansion=EXPANSION,)\n\n\n# dynamically access the tessif transform utilities based on requested\n# softwares above. Store them in a dictionairy for\n# ease of access.\ntransformers = {}\nfor software in SOFTWARES:\n transformers[software] = importlib.import_module(\n '.'.join(['tessif.transform.es2es', software]))\n\n# Do the tessif -> software transformations and store them in a dictionairy for\n# ease of access\ntransformed_CompCnE_combinations = {}\nfor software in SOFTWARES:\n # local copy of the tessif es:\n copied_es = tessif_CompCnE.duplicate(suffix='')\n if software == 'ppsa' and HOOK_PYPSA:\n copied_es = reparam_ppsa(copied_es)\n\n transformed_CompCnE_combinations[software] = transformers[\n software].transform(copied_es)\n\n\n# Perform the software specific optimizations\noptimized_CompCnE_combinations = {}\nfor software in SOFTWARES:\n optimizer = getattr(optimize, \"_\".join([software, \"from_es\"]))\n optimized_CompCnE_combinations[software] = optimizer(\n transformed_CompCnE_combinations[software])\n\n# post process the allresultiers:\nall_resultiers = {}\nfor software in SOFTWARES:\n post_processor = importlib.import_module(\n '.'.join(['tessif.transform.es2mapping', software]))\n all_resultiers[software] = post_processor.AllResultier(\n optimized_CompCnE_combinations[software])\n\n# post process the comparative results using the constructed all-resultiers:\ncomparatier = ComparativeResultier(all_resultiers)\n\n\ndata_storage_path = os.path.join(PARENT, FOLDER)\n\n# store the all_loads results\nfor software in SOFTWARES:\n\n result_id = f\"{software}_all_loads\"\n storage_location = os.path.join(data_storage_path, result_id)\n result_df = comparatier.all_loads[software]\n result_df.to_csv(\".\".join([storage_location, \"csv\"]))\n\n# store the all_socs results\nresult_id = f\"all_socs\"\nstorage_location = os.path.join(data_storage_path, result_id)\nresult_df = comparatier.all_socs\nresult_df.to_csv(\".\".join([storage_location, \"csv\"]))\n\n# store the rest of the all_* results:\nfor rtype in [\n \"all_capacities\",\n \"all_original_capacities\",\n \"all_net_energy_flows\",\n \"all_costs_incurred\",\n \"all_emissions_caused\",\n]:\n result_id = rtype\n storage_location = os.path.join(data_storage_path, result_id)\n result_df = getattr(comparatier, rtype)\n result_df.to_csv(\".\".join([storage_location, \"csv\"]))\n\n\nresult_types = ['Load', 'Capacity', 'IntegratedGlobal', ]\npost_processed_data = {}\nfor software in SOFTWARES:\n post_processor = importlib.import_module(\n '.'.join(['tessif.transform.es2mapping', software]))\n post_processed_data[software] = {}\n for result_type in result_types:\n post_processed_data[software][result_type] = getattr(\n post_processor, \"\".join([result_type, \"Resultier\"]))(\n optimized_CompCnE_combinations[software])\n\nwanted_results = {\n 'Load': 'node_load',\n 'Capacity': 'node_installed_capacity',\n 'IntegratedGlobal': 'global_results',\n}\nnodes_of_interest = {\n 'Load': ['Powerline', 'Heatline'],\n 'Capacity': [],\n 'IntegratedGlobal': [],\n}\n\n\nfor software in SOFTWARES:\n for rtype in result_types:\n if nodes_of_interest[rtype]:\n for node in nodes_of_interest[rtype]:\n result_id = f\"{software}_{rtype}_{node}\"\n storage_location = os.path.join(data_storage_path, result_id)\n res = getattr(post_processed_data[software][rtype], wanted_results[rtype])[\n node]\n if rtype == 'Load':\n # store timeseries results\n res.name = f\"{software}_timeseries_{rtype}_{node}\"\n timeseries_storage_location = storage_location.replace(\n rtype, \"timeseries_\"+rtype)\n res.to_json(\n \".\".join([timeseries_storage_location, \"json\"]),\n orient=\"split\",\n )\n\n # store regular loads as summed loads\n res = res.sum()\n res.name = result_id\n res.to_json(\n \".\".join([storage_location, \"json\"]), orient=\"split\")\n\n else:\n result_id = f\"{software}_{rtype}\"\n storage_location = os.path.join(data_storage_path, result_id)\n res = getattr(\n post_processed_data[software][rtype], wanted_results[rtype])\n res = pd.Series(res.values(), index=res.keys())\n res.name = result_id\n res.to_json(\n \".\".join([storage_location, \"json\"]), orient=\"split\")\n\n\n# draw advanced graphs\nadvanced_graphs = {}\nif CYTOSCAPE_ADVANCED_GRAPH:\n if ADVANCED_GRAPH_ON in SOFTWARES:\n\n reference_capacity = all_resultiers[\n ADVANCED_GRAPH_ON].node_installed_capacity[\"El Demand\"]\n reference_net_energy_flow = all_resultiers[\n ADVANCED_GRAPH_ON].edge_net_energy_flow[(\"Powerline\", \"El Demand\")]\n reference_emissions = all_resultiers[\n ADVANCED_GRAPH_ON].edge_specific_emissions[(\"Solar Panel\", \"Powerline\")]\n\n app = dcv.draw_advanced_graph(\n optimized_es=optimized_CompCnE_combinations[ADVANCED_GRAPH_ON],\n layout='cose',\n # layout_nodeDimensionsIncludeLabels=True,\n node_shape=\"circle\",\n node_color={\n 'Hard Coal Supply': '#666666',\n 'Hard Coal Supply Line': '#666666',\n 'Hard Coal PP': '#666666',\n 'Hard Coal CHP': '#666666',\n 'Solar Panel': '#FF7700',\n 'Heat Storage': '#cc0033',\n 'Heat Demand': 'Red',\n 'Heat Plant': '#cc0033',\n 'Heatline': 'Red',\n 'Power To Heat': '#cc0033',\n 'Biogas CHP': '#006600',\n 'Biogas Line': '#006600',\n 'Biogas Supply': '#006600',\n 'Onshore Wind Turbine': '#99ccff',\n 'Offshore Wind Turbine': '#00ccff',\n 'Gas Station': '#336666',\n 'Gas Line': '#336666',\n 'Combined Cycle PP': '#336666',\n 'El Demand': '#ffe34d',\n 'Battery': '#ffe34d',\n 'Powerline': '#ffcc00',\n 'Lignite Supply': '#993300',\n 'Lignite Supply Line': '#993300',\n 'Lignite Power Plant': '#993300',\n },\n reference_node_width=reference_capacity,\n reference_edge_width=reference_net_energy_flow/13,\n reference_edge_blackness=reference_emissions,\n node_border_width=0.1,\n node_fill_border_width=1.5,\n # edge_minimum_grey=0.15,\n nodes_to_remove=[\n \"Hard Coal Supply\",\n \"Hard Coal Supply Line\",\n \"Hard Coal PP\",\n \"Hard Coal CHP\",\n\n \"Lignite Supply\",\n \"Lignite Supply Line\",\n \"Lignite Power Plant\",\n\n \"Biogas Supply\",\n \"Biogas Line\",\n\n \"Gas Station\",\n \"Gas Line\",\n\n \"Heat Plant\"\n ],\n )\n\n app.run_server()\n\n\nif MATPLOTLIB_ADVANCED_GRAPH:\n if ADVANCED_GRAPH_ON in SOFTWARES:\n from tessif.transform.es2mapping import omf as tomf\n from tessif.transform import nxgrph as nxt\n import matplotlib.pyplot as plt\n import tessif.visualize.nxgrph as nxv\n\n es = optimized_CompCnE_combinations[software]\n formatier = tomf.AllFormatier(es, cgrp='all')\n grph = nxt.Graph(tomf.FlowResultier(es))\n\n for key, value in formatier.edge_data()['edge_width'].items():\n formatier.edge_data()['edge_width'][key] = 4 * value\n\n nxv.draw_graphical_representation(\n formatier=formatier, colored_by='sector')\n\n figure = plt.gcf()\n figure.show()\n\n# Use this in case you want to see the result representation of certain nodes\n# rdrs = {}\n# for software in SOFTWARES:\n# rdrs[software] = compile_result_data_representation(\n# optimized_es=optimized_CompCnE_combinations[software],\n# software=software,\n# node=\"Heatline\",\n# )\n# # print(rdrs['fine'])\n","repo_name":"tZ3ma/tessif-phd","sub_path":"docs/source/getting_started/examples/application/phd/field_study/CompCnE/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":10365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"12111936414","text":"import os\nfrom PIL import Image\nFILE_SIZE_LIMIT = 7*1024*1024\nAUTHOR = \"20020570007\"\n\n\ndef convert(images, ques_number):\n n = len(images)\n if AUTHOR:\n final_file = f'Question_{ques_number}_RollNo_{AUTHOR}.pdf'\n else:\n final_file = f'Question_{ques_number}.pdf'\n try:\n im1 = Image.open(images[0]).convert('RGB')\n extra_images = []\n for i in range(1, n):\n extra_images.append(Image.open(images[i]).convert('RGB'))\n im1.save(final_file, save_all=True, append_images=extra_images)\n file_size = \"%.2f\" % (os.path.getsize(final_file)/(1024*1024))\n print(f\"{final_file} created! (size = {file_size} Mb)\")\n except Exception as e:\n print(e)\n\n\ndef main(questions, pages_per_question, cover_page=None, question_numbers = None):\n assert len(pages_per_question) == questions # valid set of inputs\n\n files = sorted(\n [i for i in os.listdir() if (len(i) > 4 and i != cover_page) and (i[-4:] == \".png\" or (i[-5:] == \".jpeg\" or i[-4:] == \".jpg\"))]\n )\n\n if cover_page:\n assert cover_page in os.listdir()\n\n assert sum(pages_per_question) == len(files) # else missing pages\n\n if question_numbers:\n assert len(pages_per_question) == len(question_numbers)\n\n ctr = 0\n ques_count = 1\n for i in pages_per_question:\n if cover_page:\n current_set = [cover_page]+files[ctr:ctr+i]\n else:\n current_set = files[ctr:ctr+i]\n if question_numbers:\n question_number = question_numbers[ques_count-1] if question_numbers else ques_count\n\n convert(current_set, question_number)\n ctr += i\n ques_count += 1\n\n\nif __name__ == '__main__':\n main(3, [2,3,2], cover_page=\"cover.jpeg\", question_numbers=[1,2,6])\n","repo_name":"abhinav1912/Images-to-PDF","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30885836521","text":"#!/usr/bin/env python3\r\n\r\n'''\r\n===============================================================================\r\nENGR 133 Program Description \r\n\treplace this text with your program description as a comment\r\n\r\nAssignment Information\r\n\tAssignment: Py1_CFU\r\n\tAuthor: Zachary Williams, will2051@purdue.edu\r\n\tTeam ID: 001-01\r\n\t\r\nContributor: Name, login@purdue [repeat for each]\r\n\tMy contributor(s) helped me:\t\r\n\t[ ] understand the assignment expectations without\r\n\t\ttelling me how they will approach it.\r\n\t[ ] understand different ways to think about a solution\r\n\t\twithout helping me plan my solution.\r\n\t[ ] think through the meaning of a specific error or\r\n\t\tbug present in my code without looking at my code.\r\n\tNote that if you helped somebody else with their code, you\r\n\thave to list that person as a contributor here as well.\r\n===============================================================================\r\n'''\r\nfrom math import pi, pow, sqrt\r\n\r\n#Radius, height, and calculated surface area of cone\r\nr = 3\r\nh = 4\r\nsurfArea = (pi * pow(r,2)) + (pi * r * sqrt(pow(r,2) + pow(h,2)))\r\n\r\n#Outputs radius, height, and surface area\r\nprint(f\"Given a radius of {r}[cm] and a height of {h}[cm], the surface area of a cone is {surfArea}[cm^2]\")\r\n\r\n'''\r\n===============================================================================\r\nACADEMIC INTEGRITY STATEMENT\r\n I have not used source code obtained from any other unauthorized\r\n source, either modified or unmodified. Neither have I provided\r\n access to my code to another. The project I am submitting\r\n is my own original work.\r\n===============================================================================\r\n'''","repo_name":"ztwilliams197/ENGR-133","sub_path":"Python/Python 1/CFU/Py1_CFU_will2051.py","file_name":"Py1_CFU_will2051.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23586919641","text":"from math import pi\nwith open(\"input.txt\",\"r\") as reader, open(\"output.txt\",\"w\") as writer:\n cases = int(reader.readline())\n for cs in range(cases):\n n,k = map(int,reader.readline().split())\n cakes = []\n for _ in range(n):\n cakes.append(tuple(map(int,reader.readline().split())))\n cakes = sorted(cakes,reverse=True)\n dp = [[0 for _ in range(n)] for _ in range(k+1)]\n for j in range(n):\n dp[1][j] = (cakes[j][0] ** 2 + 2 * cakes[j][0] * cakes[j][1]) * pi\n for i in range(2,k+1):\n for j in range(n):\n for h in range(j):\n if cakes[h][0] >= cakes[j][0]:\n dp[i][j] = max(dp[i][j],dp[i-1][h]+ 2 * cakes[j][0] * cakes[j][1] * pi)\n answer = 0\n for j in range(n):\n answer = max(dp[k][j],answer)\n writer.write(\"Case #{}: {}\\n\".format(cs+1,answer))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/687.py","file_name":"687.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74335374275","text":"\"\"\"Pipeline for experiments with Oja's Rule with added noise.\"\"\"\nfrom tasks.oja_net_small import Factory as AF\n\n\nclass Factory(AF):\n def __init__(\n self,\n path_to_fit_conf=\"./tasks/oja_net_noise_small/\",\n path_to_sim_conf=\"../data/oja_net_noise_small/\",\n resume=False,\n resume_dir=None) -> None:\n \"\"\"Set up factory for Oja's rule and small net with noise pipeline.\"\"\"\n super(Factory, self).__init__(\n path_to_fit_conf, path_to_sim_conf, resume, resume_dir)\n","repo_name":"mackelab/synapsegan","sub_path":"tasks/oja_net_noise_small/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23581736011","text":"import math\r\n\r\nt = int(input())\r\nfor i in range(1, t + 1):\r\n d, n = input().split(\" \")\r\n d = int(d)\r\n n = int(n)\r\n\r\n max_time = -math.inf\r\n\r\n for j in range(0, n):\r\n k, s = input().split(\" \")\r\n k = int(k)\r\n s = int(s)\r\n max_time = max(max_time, (d - k) / s)\r\n\r\n print(\"Case #\" + str(i) + \": \" + str(\"{0:.6f}\".format(d / max_time)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/813.py","file_name":"813.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42713276876","text":"\nimport sys\n\n\ndef read():\n return sys.stdin.readline().rstrip()\n\n\ndef similar_palendrome(s, head_remove):\n removed = False\n head = 0\n tail = len(s)-1\n while head <= tail:\n if s[head] == s[tail]:\n head += 1\n tail -= 1\n else:\n if removed == True:\n return 2\n removed = True\n if head_remove == True:\n head += 1\n else:\n tail -= 1\n result = 1 if removed else 0\n return result\n\n\nT = int(read())\n\nfor _ in range(T):\n s = input()\n head_removed = similar_palendrome(s, True)\n tail_removed = similar_palendrome(s, False)\n if head_removed == 0:\n print(0)\n elif head_removed == 1 or tail_removed == 1:\n print(1)\n else:\n print(2)\n","repo_name":"SJ0000/PS","sub_path":"BOJ/BOJ_17609.py","file_name":"BOJ_17609.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2493297972","text":"from flask import Flask,render_template,request\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'\ndb = SQLAlchemy(app)\n\nclass User(db.Model):\n id = db.Column(db.Integer , primary_key = True)\n customerName = db.Column(db.String(20), nullable = True)\n rate = db.Column(db.Integer ,nullable = True)\n def __repr__(self) :\n return f\"User('{self.id}','{self.customerName}','{self.rate}')\"\n\nsum=0\ndata = User.query.all()\nfor i in data:\n sum = sum+i.rate\n\n@app.route('/')\ndef home():\n return render_template(\"index.html\",data=data,sum=sum)\n\n\nif __name__=='__main__':\n app.run(debug=True)","repo_name":"kunal9168397624/feedback","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31988736464","text":"# S盒子\r\nSBox_1 = [[1, 0, 3, 2],\r\n [3, 2, 1, 0],\r\n [0, 2, 1, 3],\r\n [3, 1, 0, 2]]\r\n\r\nSBox_2 = [[0, 1, 2, 3],\r\n [2, 3, 1, 0],\r\n [3, 0, 1, 2],\r\n [2, 1, 0, 3]]\r\n\r\n\r\n# 置换P10\r\ndef P10(key):\r\n return key[2] + key[4] + key[1] + key[6] + key[3] + key[9] + key[0] + key[8] + key[7] + key[5]\r\n\r\n\r\n# 左移\r\ndef Shift(key, n):\r\n left_part = key[:n]\r\n right_part = key[n:]\r\n return right_part + left_part\r\n\r\n\r\n# 置换P8\r\ndef P8(key):\r\n return key[5] + key[2] + key[6] + key[3] + key[7] + key[4] + key[9] + key[8]\r\n\r\n\r\n# 初始置换IP\r\ndef IP(value):\r\n return value[1] + value[5] + value[2] + value[0] + value[3] + value[7] + value[4] + value[6]\r\n\r\n\r\n# 最终置换逆IP\r\ndef IP_re(value):\r\n return value[3] + value[0] + value[2] + value[4] + value[6] + value[1] + value[7] + value[5]\r\n\r\n\r\n# 置换P4\r\ndef P4(value):\r\n return value[1] + value[3] + value[2] + value[0]\r\n\r\n\r\n# 映射F\r\ndef F(value, K):\r\n value_EP = value[3] + value[0] + value[1] + value[2] + value[1] + value[2] + value[3] + value[0]\r\n result = bin(int(value_EP, 2) ^ int(K, 2))[2:].rjust(8, '0')\r\n result_L = result[:4]\r\n result_R = result[4:]\r\n PL_row = int(result_L[0] + result_L[3], 2)\r\n PL_col = int(result_L[1] + result_L[2], 2)\r\n PL = bin(SBox_1[PL_row][PL_col])[2:].rjust(2, '0')\r\n PR_row = int(result_R[0] + result_R[3], 2)\r\n PR_col = int(result_R[1] + result_R[2], 2)\r\n PR = bin(SBox_2[PR_row][PR_col])[2:].rjust(2, '0')\r\n F_result = P4(PL + PR)\r\n return F_result\r\n\r\n\r\n# 复合函数Fk\r\ndef Fk(L, R, SK):\r\n F_result = F(R, SK)\r\n L = bin(int(L, 2) ^ int(F_result, 2))[2:].rjust(4, '0')\r\n Fk_result = L + R\r\n return Fk_result\r\n\r\n\r\n# 交换\r\ndef SW(value):\r\n return value[4:] + value[:4]\r\n\r\n\r\n# 加密\r\ndef EncryptASCII(plaintext, key):\r\n plaintext_ASCII = \"\".join([bin(ord(c))[2:].rjust(8, '0') for c in plaintext])\r\n key_ASCII = \"\".join([bin(ord(c))[2:].rjust(8, '0') for c in key])\r\n plaintext_IP = IP(plaintext_ASCII)\r\n K1 = P8(Shift(P10(key_ASCII), 1))\r\n K2 = P8(Shift(P10(key_ASCII), 3))\r\n plaintext_Fk1 = Fk(plaintext_IP[:4], plaintext_IP[4:], K1)\r\n plaintext_Fk1 = SW(plaintext_Fk1)\r\n plaintext_Fk2 = Fk(plaintext_Fk1[:4], plaintext_Fk1[4:], K2)\r\n ciphertext_ASCII = IP_re(plaintext_Fk2)\r\n ciphertext = \"\"\r\n for i in range(0, len(ciphertext_ASCII), 8):\r\n ciphertext += chr(int(ciphertext_ASCII[i:i+8], 2))\r\n return ciphertext\r\n\r\n\r\n# 解密\r\ndef DecryptASCII(ciphertext, key):\r\n ciphertext_ASCII = \"\".join([bin(ord(c))[2:].rjust(8, '0') for c in ciphertext])\r\n key_ASCII = \"\".join([bin(ord(c))[2:].rjust(8, '0') for c in key])\r\n ciphertext_IP = IP(ciphertext_ASCII)\r\n K1 = P8(Shift(P10(key_ASCII), 1))\r\n K2 = P8(Shift(P10(key_ASCII), 3))\r\n ciphertext_Fk1 = Fk(ciphertext_IP[:4], ciphertext_IP[4:], K2)\r\n ciphertext_Fk1 = SW(ciphertext_Fk1)\r\n ciphertext_Fk2 = Fk(ciphertext_Fk1[:4], ciphertext_Fk1[4:], K1)\r\n plaintext_ASCII = IP_re(ciphertext_Fk2)\r\n plaintext = \"\"\r\n for i in range(0, len(plaintext_ASCII), 8):\r\n plaintext += chr(int(plaintext_ASCII[i:i+8], 2))\r\n return plaintext\r\n# # 测试代码\r\n# plaintext=input(\"输入明文:\")\r\n# key=input(\"输入密钥:\")\r\n# print(\"密文为:\"+Encrypt(plaintext,key))\r\n# ciphertext=input(\"输入密文:\")\r\n# key2=input(\"输入密钥:\")\r\n# print(\"明文为:\"+Decrypt(ciphertext,key2))","repo_name":"wangzhenyun/SDES","sub_path":"S-DES/ascii.py","file_name":"ascii.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12676075074","text":"\r\nimport sys\r\nimport re \r\nimport array as arr\r\n \r\n\r\nclass DayCount: \r\n def __init__(self, d, m, y): \r\n self.d = d \r\n self.m = m \r\n self.y = y \r\n \r\nmonthDays = [31, 28, 31, 30, 31, 30, \r\n 31, 31, 30, 31, 30, 31 ] \r\n \r\ndef countLeapYears(d): \r\n \r\n years = d.y \r\n \r\n if (d.m <= 2) : \r\n years-= 1\r\n \r\n return int(years / 4) - int(years / 100) + int(years / 400)\r\n \r\n \r\ndef getDifference(dt1, dt2) : \r\n \r\n \r\n n1 = dt1.y * 365 + dt1.d \r\n \r\n for i in range(0, dt1.m - 1) : \r\n n1 += monthDays[i] \r\n \r\n \r\n n1 += countLeapYears(dt1) \r\n \r\n n2 = dt2.y * 365 + dt2.d \r\n for i in range(0, dt2.m - 1) : \r\n n2 += monthDays[i] \r\n n2 += countLeapYears(dt2) \r\n \r\n return (n2 - n1) \r\n \r\n\r\narglen = len(sys.argv)\r\na=arr.array('i', [1, 2, 3])\r\ncount = 0\r\ninputFile = open('date_calculator.txt','r')\r\nLines = inputFile.readlines() \r\nfor line in Lines:\r\n if line.startswith('D'):\r\n line = line[7:]\r\n k = 0 \r\n s = line.strip()\r\n\r\n lis = re.split('th |rd |st |, | |/|-|\\.',s)\r\n \r\n if lis[1] == 'Jan' or lis[1] == 'January':\r\n lis[1] = 1\r\n elif lis[1] == 'Feb' or lis[1] == 'February':\r\n lis[1] = 2 \r\n elif lis[1] == 'Mar' or lis[1] == 'March':\r\n lis[1] = 3\r\n elif lis[1] == 'Apr' or lis[1] == 'April':\r\n lis[1] = 4 \r\n elif lis[1] == 'May':\r\n lis[1] = 5\r\n elif lis[1] == 'Jun' or lis[1] == 'June':\r\n lis[1] = 6 \r\n elif lis[1] == 'Jul' or lis[1] == 'July':\r\n lis[1] = 7\r\n elif lis[1] == 'Aug' or lis[1] == 'August':\r\n lis[1] = 8\r\n elif lis[1] == 'Sep' or lis[1] == 'September':\r\n lis[1] = 9\r\n elif lis[1] == 'Oct' or lis[1] == 'October':\r\n lis[1] = 10\r\n elif lis[1] == 'Nov' or lis[1] == 'November':\r\n lis[1] = 11\r\n elif lis[1] == 'Dec' or lis[1] == 'December':\r\n lis[1] = 12 \r\n\r\n for i in lis:\r\n a[k] = int(i)\r\n k = k + 1\r\n\r\n if arglen > 1:\r\n if sys.argv[1][1] == 'm' or sys.argv[1][1] == 'M':\r\n temp = a[0]\r\n a[0] = a[1]\r\n a[1] = temp \r\n\r\n if count == 0: \r\n dt1 = DayCount(a[0], a[1], a[2])\r\n else:\r\n dt2 = DayCount(a[0], a[1], a[2])\r\n count = count + 1 \r\n\r\noutputFile = open('output.txt', 'w')\r\noutputFile.write('Date Difference: {} Day'.format(abs(getDifference(dt1, dt2))))\r\n\r\noutputFile = open('output.txt', 'r')\r\nreadFile = outputFile.read()\r\nprint(readFile)\r\n\r\ninputFile.close()\r\noutputFile.close()\r\n\r\n \r\n ","repo_name":"sayan-dey/Fun-with-Python","sub_path":"q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7166479007","text":"import string, re, time, os \nfrom urllib import unquote\nfrom TreeViewItem import *\nimport declare\nfrom ThreadLib import *\n\n\"\"\"Parse and tree functions for giFT's interface protocol.\n\nThis module is meant for giFT client developers. It provides a set\nof basic function that they'll definately need. The purpose of this\nmodule is the provide functions to modify data, without having to\ninterpreting it. There are two sets of functions and a couple of\nvariables:\n\nParsing functions:\nparse_server_object() -- Parse data as recieved from giFT to an object.\nparse_client_object() -- Parse an object to a string, to send to giFT.\n\nTree functions:\ntree_lookup() -- Look key arguments up in a tree object.\ntree_lookup_mod() -- Look key modifiers up in a tree object.\ntree_insert() -- Insert keys into a tree object.\n\nVariables:\nYou'll only need these if you want to work with tree objects\ndirectly. Using tree_lookup() is recommended.\nnodename -- contains the dictionary key used for the name of the node\nargument -- contains the dictionary key used for arguments\nmodifier -- contains the dictionary key used for modifiers\nchildren -- contains the dictionary key used for child nodes\n\nSee the interface protocol documentation for more information about\ngiFT's interface protocol.\n\"\"\"\n\n# Basicly C's enum\n(TOKEN_TEXT,\n TOKEN_SPACE,\n TOKEN_PAREN_OPEN,\n TOKEN_PAREN_CLOSE,\n TOKEN_BRACK_OPEN,\n TOKEN_BRACK_CLOSE,\n TOKEN_BRACE_OPEN,\n TOKEN_BRACE_CLOSE,\n TOKEN_SEMICOLON) = range(9)\n\n# These will be used for the names of the dictonary keys, use these, not their\n# values!\nargument = 'a'\nmodifier = 'm'\nchildren = 'c'\nnodename = 'n'\n\n# Link tokens and the element they precede\nabrevs = { TOKEN_PAREN_OPEN : argument,\n TOKEN_BRACK_OPEN : modifier,\n TOKEN_BRACE_OPEN : children,\n TOKEN_TEXT : nodename }\n\ncontextlist = [ [ '(', TOKEN_PAREN_OPEN ],\n [ ')', TOKEN_PAREN_CLOSE ],\n [ '[', TOKEN_BRACK_OPEN ],\n [ ']', TOKEN_BRACK_CLOSE ],\n [ '{', TOKEN_BRACE_OPEN ],\n [ '}', TOKEN_BRACE_CLOSE ],\n [ ';', TOKEN_SEMICOLON] ]\n\ntoken_chars = zip(*contextlist)[0]\n\nescape_chrs = '()[]{};\\\\'\n\ndef parse_server_object(tree_string):\n \"\"\"parse_server_object(tree_string) -> tree object \n \n A function to parse a string that has been sent by the giFT server\n\n The returned object will look like this (think 'tree'):\n { nodename: 'COMMAND', children:\n [ { nodename: 'key', ... },\n { nodename: 'key', ... },\n { nodename: 'SUBCOMMAND', children:\n [ { nodename: 'key', ... },\n ...\n ]\n }\n ]\n }\n \n Besides the nodename and children there can be two other keys\n (argument and modifier). Please see the Interface Protocol\n documentation for more information, it'll make you understand the\n tree structure better.\n \"\"\"\n\n # Why one big messy function? Because it's waaay faster than a few small\n # and clean ones.\n\n context = TOKEN_TEXT # Default context, in this context a token of\n # the type TOKEN_TEXT will become a key\n token_type = TOKEN_TEXT\n parsed = []\n keyindex = -1 # Dirty\n\n while len(tree_string):\n token = '' # This is waaay faster than a special function\n # to get the token\n\n if context != TOKEN_PAREN_OPEN and context != TOKEN_BRACK_OPEN:\n tree_string = string.lstrip(tree_string)\n\n # Find out if we're dealing with a special token, cut it of the total\n # string and return the type if we do.\n for contexttest in contextlist:\n if tree_string[0] == contexttest[0]:\n token = tree_string[0]\n token_type = contexttest[1]\n tree_string = tree_string[1:]\n break\n\n # No special tokens? Then get a text token. \n if not token:\n token_type = TOKEN_TEXT\n while len(tree_string):\n c = tree_string[0]\n # Unescaping, the right way (not just replacing '\\\\')\n if c == '\\\\':\n shift = 1\n elif c in token_chars or (context == TOKEN_TEXT and c in string.whitespace):\n break\n else:\n shift = 0\n token = token + tree_string[shift]\n tree_string = tree_string[shift+1:]\n\n abrev = abrevs[context]\n\n if token_type == TOKEN_TEXT:\n if context == TOKEN_TEXT:\n # Make a new key\n keyindex == keyindex + 1\n parsed.append({ abrev : token })\n elif context == TOKEN_PAREN_OPEN or context == TOKEN_BRACK_OPEN:\n # Append or create with a token the argument or the modifier\n if parsed[keyindex].has_key(abrev):\n parsed[keyindex][abrev] = parsed[keyindex][abrev] + ' ' + token\n else:\n parsed[keyindex][abrev] = token\n elif token_type == TOKEN_PAREN_OPEN or token_type == TOKEN_BRACK_OPEN:\n # Next token will be info in the argument or modifier\n context = token_type\n elif token_type == TOKEN_PAREN_CLOSE or token_type == TOKEN_BRACK_CLOSE:\n context = TOKEN_TEXT\n elif token_type == TOKEN_BRACE_OPEN:\n # Ooh, recursion. What's left of the tree_string will be returned too\n parsed[keyindex][abrevs[token_type]], tree_string = parse_server_object(tree_string)\n elif token_type == TOKEN_BRACE_CLOSE:\n return parsed, tree_string\n elif token_type == TOKEN_SEMICOLON:\n # Hackity-hack\n tree = parsed.pop(0)\n tree[abrevs[TOKEN_BRACE_OPEN]] = parsed\n return tree\n\ndef parse_client_object(tree, depth = 0):\n \"\"\"parse_client_object(tree) -> tree_string\n \n A function to parse a special 'tree' object into a string that\n can be sent to the giFT server. See parse_server_object.__doc__\n for a definition of the tree object.\n\n The generated tree_string attribute will look like this:\n \n command [modifier] (argument)\n key [modifier] (argument)\n subcommand [modifier] (argument) {\n key [modifier] (argument)\n }\n ;\n\n Tabs are used for indentation. See the documentation on the giFT\n Interface Protocol for more information about this format.\n \"\"\"\n \n parsed = ''\n indent = depth * '\\t'\n\n if type(tree) == dict:\n # If it's a list, it can be parsed recursivly\n tree = [tree]\n # The COMMAND doesn't use 'child-delimiters'\n child_start = '\\n'\n child_end = ';'\n else:\n child_start = ' {\\n'\n child_end = depth * '\\t' + '}\\n'\n\n for node in tree:\n # Escaping\n for element in node.keys():\n if element == argument or element == modifier:\n token_list = list(node[element])\n for i in range(len(token_list)):\n if token_list[i] in escape_chrs:\n token_list[i] = '\\\\' + token_list[i]\n node[element] = ''.join(token_list)\n\n name = node[abrevs[TOKEN_TEXT]]\n arg, mod, children = ('',) * 3 # Will create 3 empty variables\n\n # Give it up for readability! :/\n if node.has_key(abrevs[TOKEN_PAREN_OPEN]):\n arg = ' (' + node[abrevs[TOKEN_PAREN_OPEN]] + ')'\n if node.has_key(abrevs[TOKEN_BRACK_OPEN]):\n mod = ' [' + node[abrevs[TOKEN_BRACK_OPEN]] + ']'\n if node.has_key(abrevs[TOKEN_BRACE_OPEN]):\n # And another lovely case of recursion.\n children = child_start + parse_client_object(node[abrevs[TOKEN_BRACE_OPEN]], depth+1) + child_end\n else:\n # I confess, this is an ugly solution. \n children = '\\n'\n\n # Construct the block\n parsed = parsed + indent + name + mod + arg + children\n\n return parsed\n\ndef _tree_lookup(tree, lookup, key=argument):\n \"\"\"[internal]\"\"\"\n \n lookup = list(lookup.split('/'))\n result = list()\n # We need a list for iteration purposes\n if type(tree) != list: tree = [tree]\n \n for node in tree:\n if node[nodename] == lookup[0]:\n # Are we there yet?\n if len(lookup) > 1:\n for a in _tree_lookup(node[children], \"/\".join(lookup[1:])):\n result.append(a)\n elif node.has_key(key):\n # Yes, we are...\n result.append(node[key])\n \n return result\n\ndef tree_lookup(tree, *lookups):\n \"\"\"tree_lookup(tree, [lookup[, ...]]) -> result list\n\n Returns a list of arguments looked up in tree. The values are\n looked up in the children of the main node, unless you didn't\n specify a lookup. If needed, specify levels, seperated by '/'.\n If more than one lookup is specified a list of tuples will be\n returned.\n\n Examples: \n tree_lookup(tree, 'ITEM/availabilty') ->\n ['-1'] \n tree_lookup(tree, 'DOWNLOADS/DOWNLOAD', 'DOWNLOADS/DOWNLOAD/transmit') ->\n [('1', '1048576'), ('2', '349525')]\n \"\"\"\n \n result = []\n for lookup in lookups:\n result.append(_tree_lookup(tree[children], lookup))\n \n if len(result) == 1:\n return result[0]\n else:\n if lookups:\n # Elegant and hackish at the same time ;)\n return apply(zip, result)\n else:\n # Only the latter applies to this... :/\n result = _tree_lookup(tree, tree[nodename])\n if result: return result\n\ndef tree_lookup_mod(tree, *lookups):\n \"\"\"tree_lookup(tree, [lookup[, ...]]) -> result list\n\n Returns a list of modifiers looked up in tree. See\n tree_lookup.__doc__ for further documentation.\n \"\"\"\n # Aargh! So... much... double... code...! :(\n \n result = []\n for lookup in lookups:\n result.append(_tree_lookup(tree[children], lookup, modifier))\n \n if len(result) == 1:\n return result[0]\n else:\n if lookups:\n # Elegant and hackish at the same time ;)\n return apply(zip, result)\n else:\n # Only the latter applies to this... :/\n result = _tree_lookup(tree, tree[nodename], modifier)\n if result: return result\n\ndef _tree_insert(tree, insert, *args):\n \"\"\"[internal]\"\"\"\n \n if type(args[0]) == tuple: args = args[0]\n if len(tree) == 0: tree = list()\n # We need a list for iteration purposes\n if type(tree) != list: tree = [tree]\n \n insert = list(insert.split('/'))\n\n # Every node must have a name, so don't worry about the index being shifted\n nodelist = [node[nodename] for node in tree if node.has_key(nodename)]\n if insert[0] in nodelist: \n tree_index = nodelist.index(insert[0])\n else:\n tree.append({nodename:insert[0]})\n tree_index = -1\n \n if len(insert) > 1:\n # Make sure there are children ;)\n tree[tree_index][children] = ((tree[tree_index].has_key(children) and tree[tree_index][children]) or list())\n tree[tree_index][children] = _tree_insert(tree[tree_index][children], \"/\".join(insert[1:]), args)\n else:\n for input in zip((argument, modifier),args): \n if input[1]: tree[tree_index][input[0]] = input[1]\n\n return tree\n\ndef tree_insert(insert_dict, tree={}):\n \"\"\"tree_insert(insert_dict, tree={}) -> tree object\n \n Insert nodes, with arguments and modifiers, into a tree object,\n or create a new one. See parse_server_object.__doc__ for a\n definition of the tree object.\n \n A few examples of insert_dict elements and the command they'll\n create:\n insert_dict = { \n 'COMMAND' : argument, # COMMAND (argument);\n 'COMMAND/key' : argument, # COMMAND key (argument);\n 'COMMAND/key' : (argument,), # COMMAND key (argument);\n 'COMMAND/key' : (argument, modifier), # COMMAND key [modifier] (argument);\n 'COMMAND/key' : '', # COMMAND key;\n 'COMMAND/SUBCOMMAND/key : argument # COMMAND SUBCOMMAND { key (argument) };\n }\n \n Note that adding multiple keys is not possible. They'll be\n overwritten. Also note that you can make an object without a\n main command, if you don't specify a 'root'. Use this in your\n advantage :) \n \"\"\"\n \n for insert in insert_dict.keys():\n tree = _tree_insert(tree, insert, insert_dict[insert])\n\n if len(tree) > 1:\n return tree\n else:\n return tree[0]\n\n######\n######\n## MAIN PARSELIB STARTS HERE! ##\n######\n######\n\nclass ParseLib:\n\tdef __init__(self):\n\t\tself.win = declare.win\n\t\tself.ListFull = 0 ## Max nr of items displayed?\n\t\tself.SearchItemURL = declare.SearchItemURL ## Contains the OpenFT:// url\n\t\tself.SearchItemHash = declare.SearchItemHash ## Contains the hashes of the items\n\t\tself.SearchItemUser = declare.SearchItemUser ## Contains username (ip) of user\n\t\tself.SearchItemSize = declare.SearchItemSize ## Filesize of item\n\t\tself.SearchItemSaveName = declare.SearchItemSaveName ## Name that it will be saved as\n\t\tself.CurrentSearchID = declare.CurrentSearchID ## ID of current search\n\t\tself.SItemCount = 1 ## count of Search items\n\t\tself.SItemList = [] ## Contains the actual search items\n\t\tself.DItemCount = 1 ## count of Downloading items\n\t\tself.DItemList = {} ## Contains the actual downloading items\n\t\tself.DItemList[0] = {} ## { 0 = {}, 1 = {'url' : \"OpenFT://...,...}, ...}\n\t\t\n\tdef SortTags(self, sock, args): ## This will sort the arguments we get, and adjust the GUI and such\n\t\t## Make the item's SearchItem vars..\n\t\tself.SearchItemURL[str(self.SItemCount)] = \"\"\n\t\tself.SearchItemHash[str(self.SItemCount)] = \"\"\n\t\tself.SearchItemUser[str(self.SItemCount)] = \"\"\n\t\tself.SearchItemSize[str(self.SItemCount)] = \"\"\n\t\tself.SearchItemSaveName[str(self.SItemCount)] = \"\"\n\t\t\n\t\t## - ATTACH function - ##\n\t\tif args[nodename] == \"ATTACH\":\n\t\t\tself.win.lblgiFTVersion.setText(tree_lookup(args, 'version')[0])\n\n\t\t## - STATS function - ##\n\t\telif args[nodename] == \"STATS\":\n\t\t\tif self.win.lblOpenFTStatus.text != \"Connected\":\n\t\t\t\tself.win.lblOpenFTStatus.setText(\"Connected\") ## Put us as online when we get the STATS\n\t\t\t\t\n\t\t\tself.win.lblOpenFTUsers.setText(tree_lookup(args, 'OpenFT/users')[0]) ## Display nr of OpenFT users\n\t\t\tself.win.lblOpenFTFiles.setText(tree_lookup(args, 'OpenFT/files')[0]) ## Display nr of OpenFT shared files\n\t\t\tself.win.lblOpenFTSize.setText(tree_lookup(args, 'OpenFT/size')[0]) ## Display total OpenFT share size\n\t\t\n\t\t## - ITEM function - ##\n\t\telif args[nodename] == \"ITEM\":\n\t\t\tself.CurrentSearchID = tree_lookup(args)[0]\n\t\t\t\n\t\t\tif len(args[children]) == 0: ## No results found\n\t\t\t\tif self.SItemCount == 0:\n\t\t\t\t\tqApp.lock()\n\t\t\t\t\tTreeViewItem(self.win.lstSearch, (\" \", \"No results found\", \" \", \" \", \" \"))\n\t\t\t\t\tqApp.unlock()\n\t\t\t\telse:\n\t\t\t\t\tif self.win.lstSearch.childCount() == 0: ## List is still empty\n\t\t\t\t\t\tqApp.lock()\n\t\t\t\t\t\tfor x in self.SItemList:\n\t\t\t\t\t\t\tTreeViewItem(self.win.lstSearch, x)\n\t\t\t\t\t\tqApp.unlock()\n\t\t\t\t\t\n\t\t\telse: ## Results found!\n\t\t\t\tif self.SItemCount < 101: ## List not full yet\n\t\t\t\t\t#children = args[\"children\"]\n\t\t\t\t\tItem = {}\n\t\t\t\t\t\n\t\t\t\t\t## Shop off the URL so it looks better in the results listview\n\t\t\t\t\tItem[\"url\"] = os.path.basename(tree_lookup(args, \"url\")[0])\n\t\t\t\t\tself.SearchItemURL[str(self.SItemCount)] = Item[\"url\"]\n\t\t\t\t\t## Let's filter some URL encoding\n\t\t\t\t\tItem[\"url\"] = unquote(Item[\"url\"])\n\t\t\t\t\t\n\t\t\t\t\t##Make sure it can display the used protocol\n\t\t\t\t\tif tree_lookup(args, \"url\")[0][:6] == \"OpenFT\":\n\t\t\t\t\t\tItem[\"protocol\"] = \" OpenFT \"\n\t\t\t\t\telse:\n\t\t\t\t\t\tItem[\"protocol\"] = \" Unknown \"\n\t\t\t\t\t\t\t\t\n\t\t\t\t\tItem[\"node\"] = tree_lookup(args, \"node\")[0]\n\t\t\t\t\t\t\t\n\t\t\t\t\t## Fill up our per-item list\n\t\t\t\t\tself.SearchItemURL[str(self.SItemCount)] = tree_lookup(args, \"url\")[0]\n\t\t\t\t\tself.SearchItemHash[str(self.SItemCount)] = tree_lookup(args, \"hash\")[0]\n\t\t\t\t\tself.SearchItemUser[str(self.SItemCount)] = tree_lookup(args, \"user\")[0]\n\t\t\t\t\tself.SearchItemSize[str(self.SItemCount)] = tree_lookup(args, \"size\")[0]\n\t\t\t\t\tself.SearchItemSaveName[str(self.SItemCount)] = Item[\"url\"]\n\t\t\t\t\t\n\t\t\t\t\tItem[\"number\"] = str(self.SItemCount) \n\t\t\t\t\t\n\t\t\t\t\tItem[\"type\"] = \"--\"\n\t\t\t\t\tif string.lower(Item[\"url\"][-3:]) == \"mp3\":\n\t\t\t\t\t\tItem[\"type\"] = \"mp3\"\n\t\t\t\t\telif string.lower(Item[\"url\"][-3:]) == \"ogg\":\n\t\t\t\t\t\tItem[\"type\"] = \"ogg\"\n\t\t\t\t\telif string.lower(Item[\"url\"][-3:]) == \"mpg\":\n\t\t\t\t\t\tItem[\"type\"] = \"mpg\"\n\t\t\t\t\telif string.lower(Item[\"url\"][-3:]) == \"jpg\":\n\t\t\t\t\t\tItem[\"type\"] = \"jpg\"\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\tItem[\"size\"] = \" \" + str(\"%.2f\" % (float(tree_lookup(args, \"size\")[0]) / 1024.0 / 1024.0)) + \" MB \"\n\t\t\t\t\tself.SItemCount = self.SItemCount + 1\n\t\t\t\t\tself.SItemList.append((Item[\"number\"], Item[\"url\"], \n\t\t\t\t\t\tItem[\"type\"], Item[\"size\"], Item[\"node\"], Item[\"protocol\"]))\n\t\t\t\t\t\n\t\t\t\t\tif int(self.SItemCount) == 100 / 10: ## send cancel after 10%\n\t\t\t\t\t\tsock.send(\"SEARCH(\" + str(declare.CurrentSearchID) + \") action(cancel) ;\")\n\t\t\t\t\t\t\n\t\t\t\telse: ## List is full\n\t\t\t\t\tif self.ListFull == 1:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tqApp.lock()\n\t\t\t\t\t\tfor x in self.SItemList:\n\t\t\t\t\t\t\tTreeViewItem(self.win.lstSearch, x)\n\t\t\t\t\t\tqApp.unlock()\n\t\t\t\t\t\tself.ListFull = 1\n\n\t\t## - ITEM function - ##\n\t\telif args[nodename] == \"DOWNLOADS\":\n\t\t\tfor id in tree_lookup(args, \"DOWNLOAD\"):\n\t\t\t\tif id not in declare.ID_List:\n\t\t\t\t\tdeclare.ID_List.append(id)\n\t\t\t#print str(len(tree_lookup(args, \"DOWNLOAD\")))\n\t\t\t#print tree_lookup(args, \"DOWNLOAD/SOURCE\")\n\t\t\t\n\t\t\t#if self.win.lstDownloads.childCount() < len(tree_lookup(args, \"DOWNLOAD\")) or self.win.lstDownloads.childCount() > len(tree_lookup(args, \"DOWNLOAD\")): ## List is still not totally populated\n\t\t\t\n\t\t\tself.win.lstDownloads.clear()\n\t\t\tfor x in range(len(tree_lookup(args, \"DOWNLOAD\"))):\n\t\t\t\t#print \"SOURCES: \", str(len(tree_lookup(args, \"DOWNLOAD/SOURCE\")))\n\t\t\t\tid = tree_lookup(args, \"DOWNLOAD\")[x]\n\t\t\t\turl = os.path.basename(unquote(tree_lookup(args, \"DOWNLOAD/SOURCE/url\")[x]))\n\t\t\t\t## Get % complete\n\t\t\t\tcompleted = int(tree_lookup(args, \"DOWNLOAD/size\")[x]) / 100\n\t\t\t\t## Now we have the value of 1%, calculate full % now\n\t\t\t\tcompleted = str(int(tree_lookup(args, \"DOWNLOAD/transmit\")[x]) / completed)\n\n\t\t\t\tself.DItemList[id] = {\"url\": tree_lookup(args, \"DOWNLOAD/SOURCE/url\")[x],\n\t\t\t\t\t\"user\": tree_lookup(args, \"DOWNLOAD/SOURCE/user\")[x],\n\t\t\t\t\t\"hash\": tree_lookup(args, \"DOWNLOAD/hash\")[x]}\n\n\t\t\t\tqApp.lock()\n\t\t\t\tTreeViewItem(self.win.lstDownloads, (id, url, \" \", \" \", (completed + \" %\"), \" \", \" \"))\n\t\t\t\tqApp.unlock()\n\t\t\t#else: ## List is just being updated\n\t\t\t\t#pass","repo_name":"hexwab/gift","sub_path":"lokipoki/ParseLib.py","file_name":"ParseLib.py","file_ext":"py","file_size_in_byte":18640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70150220675","text":"import os\nfrom math import factorial\nimport albumentations as A\n\n\nclass Augmentor:\n def __init__(self):\n pass\n\n @staticmethod\n def round_clip_0_1(x, **kwargs):\n return x.round().clip(0, 1)\n\n def _get_base_probability(self, n: int) -> int:\n \"\"\"\n Uniforms events of all augmentation occurred\n :param n:\n :return:\n \"\"\"\n\n counts = 0\n for i in range(n):\n counts += factorial(n) // factorial(i) // factorial(n - i)\n\n return counts\n\n def get_training_augmentation(self):\n augs_count = 11\n base_proba = self._get_base_probability(n=augs_count)\n\n train_transform = [\n # simple\n A.HorizontalFlip(p=base_proba),\n A.VerticalFlip(p=base_proba),\n A.Rotate(limit=(-180, 180), p=base_proba),\n A.Transpose(p=base_proba),\n # cutting and scaling\n A.OneOf(\n [\n A.RandomResizedCrop(\n height=256,\n width=256,\n scale=(0.4, 1.0),\n ratio=(0.8, 2.0),\n p=base_proba,\n ),\n A.IAAPerspective(\n scale=(0.1, 0.2),\n keep_size=True,\n always_apply=False,\n p=base_proba,\n ),\n ],\n p=base_proba,\n ),\n # colour\n A.RandomBrightnessContrast(\n brightness_limit=(-0.3, 0.6),\n contrast_limit=(-0.3, 0.6),\n brightness_by_max=True,\n always_apply=False,\n p=base_proba,\n ),\n A.HueSaturationValue(\n hue_shift_limit=(-360, 360),\n sat_shift_limit=(-80, 80),\n val_shift_limit=(-80, 80),\n p=base_proba,\n ),\n A.ToGray(p=base_proba),\n A.RGBShift(\n r_shift_limit=(100, 200),\n g_shift_limit=(100, 200),\n b_shift_limit=(100, 200),\n always_apply=False,\n p=base_proba,\n ),\n # noise\n A.OneOf(\n [\n A.GridDistortion(\n num_steps=16,\n distort_limit=(-0.5, 0.5),\n p=base_proba,\n ),\n A.GaussianBlur(\n blur_limit=(5, 11),\n sigma_limit=0,\n p=base_proba,\n ),\n A.GaussNoise(\n var_limit=(50, 75),\n mean=0,\n p=base_proba,\n ),\n A.Blur(\n blur_limit=(7, 13),\n p=base_proba,\n ),\n ],\n p=base_proba,\n ),\n A.MotionBlur(\n blur_limit=10,\n p=base_proba,\n ),\n A.Lambda(mask=self.round_clip_0_1),\n A.Resize(\n height=256,\n width=256,\n p=1,\n ),\n ]\n\n return A.Compose(\n train_transform,\n p=base_proba,\n )\n\n def get_validation_augmentation(self):\n test_transform = []\n return A.Compose(test_transform)\n\n def get_preprocessing(self, preprocessing_fn):\n _transform = [A.Lambda(image=preprocessing_fn)]\n return A.Compose(_transform)\n\n\nif __name__ == \"__main__\":\n from dataset import Dataset, visualize\n\n data_path = \"data\"\n dataset = Dataset(\n images_dir=os.path.join(data_path, \"test\"),\n masks_dir=os.path.join(data_path, \"train_annotations\"),\n classes=[\"bulk\"],\n augmentations=Augmentor.get_training_augmentation(),\n preprocessing=None,\n )\n\n image, mask = dataset[5]\n\n visualize(image=image, bulk_mask=mask[..., 0].squeeze())\n","repo_name":"fortis3000/blob_detection","sub_path":"augs.py","file_name":"augs.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29832542749","text":"from unittest import TestCase\n\nfrom parameterized import parameterized\n\nfrom ex_11_check_primality_functions import is_prime\n\n\nclass Tests(TestCase):\n\n @parameterized.expand([\n (1,),\n (2,),\n (3,),\n (5,),\n (7,),\n (19,),\n (31,)\n ])\n def test_if_prime_number_returns_true(self, number):\n self.assertTrue(is_prime(number))\n\n @parameterized.expand([\n (-3,),\n (0,),\n (4,),\n (10,),\n (27,),\n (45,),\n (63,)\n ])\n\n def test_if_non_prime_number_returns_false(self, number):\n self.assertFalse(is_prime(number))\n","repo_name":"PatrykGorol/Python_beginner_excercises","sub_path":"ex_11_tests.py","file_name":"ex_11_tests.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29131620276","text":"# weird mypy bug with imports\nfrom typing import Any, Dict, Generator # pylint: disable=unused-import\n\nimport attr\n\nfrom ...models import TestResultSet\nfrom ...utils import get_requests_auth\nfrom .. import events\nfrom .core import BaseRunner, get_session, network_test, run_test, wsgi_test\n\n\n@attr.s(slots=True) # pragma: no mutate\nclass SingleThreadRunner(BaseRunner):\n \"\"\"Fast runner that runs tests sequentially in the main thread.\"\"\"\n\n def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:\n auth = get_requests_auth(self.auth, self.auth_type)\n with get_session(auth, self.headers) as session:\n for endpoint, test in self.schema.get_all_tests(network_test, self.hypothesis_settings, self.seed):\n for event in run_test(\n self.schema,\n endpoint,\n test,\n self.checks,\n results,\n session=session,\n request_timeout=self.request_timeout,\n ):\n yield event\n if isinstance(event, events.Interrupted):\n return\n\n\n@attr.s(slots=True) # pragma: no mutate\nclass SingleThreadWSGIRunner(SingleThreadRunner):\n def _execute(self, results: TestResultSet) -> Generator[events.ExecutionEvent, None, None]:\n for endpoint, test in self.schema.get_all_tests(wsgi_test, self.hypothesis_settings, self.seed):\n for event in run_test(\n self.schema,\n endpoint,\n test,\n self.checks,\n results,\n auth=self.auth,\n auth_type=self.auth_type,\n headers=self.headers,\n ):\n yield event\n if isinstance(event, events.Interrupted):\n return\n","repo_name":"borisrny/efforte1","sub_path":"venv/lib/python3.7/site-packages/schemathesis/runner/impl/solo.py","file_name":"solo.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30890170111","text":"#!/usr/bin/python\n\nclass bird(object):\n \tfeather=True\n\t\nclass chicken(bird):\n\tfly=False\n\tdef __init__(self, age):\n\t\tself.age=age\n\nsummer=chicken(2)\n\n\nprint(bird.__dict__)\nprint(chicken.__dict__)\nprint(summer.__dict__)\n\nsummer.__dict__['age']=3\nprint(summer.__dict__['age'])\n\nsummer.age=5\nprint(summer.age)\n","repo_name":"zhaofeng555/python2-test","sub_path":"pytest/hjg/objattr.py","file_name":"objattr.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13695181702","text":"class Solution:\n def removeOccurrences(self, s: str, part: str) -> str:\n stack = []\n N = len(part)\n\n for c in s:\n stack.append(c)\n if stack and len(stack) >= N and ''.join(stack[-N:]) == part:\n for i in range(N):\n stack.pop()\n\n return ''.join(stack)","repo_name":"pdkz/leetcode","sub_path":"1910_Remove_All_Occurrences_of_a_Substring/1910_Remove_All_Occurrences_of_a_Substring.py","file_name":"1910_Remove_All_Occurrences_of_a_Substring.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8581448603","text":"from flask import Flask, request, jsonify\nfrom flask_cors import cross_origin\nimport json\nimport tensorflow as tf\nimport tensorflow_hub as tf_hub\nimport tensorflow_text as tf_text\nimport openai\n\napp = Flask(__name__)\nspmodel = tf.keras.models.load_model('./spamModel.h5', custom_objects={'KerasLayer': tf_hub.KerasLayer})\nnmodel = tf.keras.models.load_model('./newsModel.h5', custom_objects={'KerasLayer': tf_hub.KerasLayer})\n\nopenai.api_key = \"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\"\n\ndef get_news(text):\n predictions = nmodel.predict([text])\n return predictions[0]\n\n\ndef get_spam(text):\n predictions = nmodel.predict([text])\n return predictions[0]\n\n\ndef clsfy(text):\n predictions = nmodel.predict([text])\n return predictions[0]\n\n\ndef display_score(raw_score):\n return round(raw_score, 3)\n\n\n@app.route('/news', methods=['POST'])\n@cross_origin()\ndef news():\n if request.method == 'POST':\n f = request.json\n score = get_news(str(f['text']), nmodel)\n print(float(score[0]))\n out = jsonify(msg=display_score(float(score[0])))\n return out\n else:\n out = jsonify(msg='dont work')\n return out\n\n\n@app.route('/spam', methods=['POST'])\n@cross_origin()\ndef spam():\n if request.method == 'POST':\n f = request.json\n score = get_spam(str(f['text']))\n print(float(score[0]), spmodel)\n out = jsonify(msg=display_score(float(score[0])))\n return out\n else:\n out = jsonify(msg='dont work')\n return out\n\n\n@app.route('/summary', methods=['POST'])\n@cross_origin()\ndef summary():\n if request.method == 'POST':\n f = request.json\n\n response = openai.Completion.create(\n model=\"text-davinci-002\",\n prompt=f['text'],\n temperature=0.7,\n max_tokens=1000,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n\n summ = response.choices[0].text.strip()\n\n out = jsonify(msg=summ)\n return out\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"skim-1/notiserver","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4945289454","text":"#!/usr/bin/python\n\n# Import required libraries\nimport sys\nimport os\nimport RPi.GPIO as GPIO # GPIO library we need to use the GPIO pins\nimport time # time library for sleep function\n\n# GPIO pins for pump\nStepPinForward=32\n\n# Function for turning water pump on to suck from tub & push to plants\ndef pumpforward():\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(StepPinForward, GPIO.OUT)\n print(\"Pump running for 5s\")\n time.sleep(3)\n GPIO.cleanup()\n time.sleep(3)\n print(\"sleep for 5s\")\n\ntry:\n while True:\n pumpforward()\nexcept KeyboardInterrupt:\n GPIO.cleanup()\n","repo_name":"xunillinux/ESHHPlantWatering","sub_path":"sensorScripts/notUsed/pump_joel.py","file_name":"pump_joel.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71217027074","text":"import chromedriver_autoinstaller\nchromedriver_autoinstaller.install()\nfrom dotenv import load_dotenv\nimport time\nimport os\nload_dotenv()\n\nfrom bs4 import BeautifulSoup\nimport re as re\nimport time\nimport pandas as pd\nimport os\nimport numpy as np\nimport requests\n\ndef plant_names (url): \n \"\"\"Esta función sirve para obtener el nombre de todas las plantas usando la librería beautiful soup. La función toma como argumento un url\n donde aparecen todos los nombres de las plantas y devuelve una lista de nombre.\"\"\"\n html = requests.get(url)\n soup = BeautifulSoup(html.content, \"html.parser\")\n\n plants_list = soup.find_all('div',{'class':'plant col-sm-12'})\n names_plants = [i.getText().split('(')[0].strip().replace(' ','%2B').replace('/','~') for i in plants_list] #Cambio formato replace para adecuarlo al formato que tiene el url\n\n for i in range(len(names_plants)):\n if names_plants[i] == 'Artichokes':\n names_plants[i] = 'Artichokes%20(Globe)'\n elif names_plants[i] == 'Strawberries':\n names_plants[i] = 'Strawberries%2B%28from%2Bseeds%29'\n\n return names_plants\n\nnames_plants = plant_names('http://www.gardenate.com/plants/')\n\n\ndef zones (url):\n \"\"\"Esta función sirve para obtener todas las zonas climáticas registradas en la página web y guardarlos en un diccionario donde la key es el value (numero\n de zona climática) y el value es el nombre de la zona climática. La función toma como argumento un link y devuelve un diccionario. \"\"\"\n\n html = requests.get(url)\n soup = BeautifulSoup(html.content, \"html.parser\")\n\n filter = soup.find(\"select\", class_=\"form-control zone-selector\")\n \n # Diccionario vació para guardar las zonas climáticas\n climate_zones = {}\n\n # Extraer la opción elementos y completar el diccionario\n for option in filter.find_all(\"option\"):\n zone_number = option[\"value\"]\n zone_name = option.text.strip()\n climate_zones[zone_number] = zone_name\n\n return climate_zones\n\n\n\ndef filter_dict_climate (dict_, substring): \n\n \"\"\"Esta función sirve para crear un nuevo diccionario a partir de otro en base a aquellos values que contengan una substring, lo uso para obtener solamente\n los links que sean de las zonas climáticas de USA.\"\"\"\n new_dict = {}\n\n for key, value in dict_.items(): \n if substring in value:\n new_dict[key] = value\n return new_dict\n\n\ndef get_urls_plants (list_):\n \"\"\"Esta función sirve para crear los link de todas las plantas a partir de la lista de nombre de plantas. Toma como argumento una lista y devuelve\n otra lista de links\"\"\"\n list_url = []\n for i in list_:\n list_url.append(f'https://www.gardenate.com/plant/{i}')\n return list_url\n\n\ndef get_urls_complete (list_, dict_): \n \"\"\"Esta función sirve para crear los links de todas las plantas según zona climática. Toma como argumento la lista de nombre de plantas y el diccionario\n de zonas climáticas y devuelve una lista de links\"\"\"\n list_url = []\n for i in list_:\n for j in dict_.keys():\n list_url.append(f'https://www.gardenate.com/plant/{i}?zone={j}')\n return list_url\n\n\ndef characterisitics(list_):\n\n \"\"\"Esta función se utiliza para extraer de una lista de links y crear un dataframe de características de las plantas, por lo que la función \n toma como valor una lista de urls y devuelve un data frame\"\"\"\n\n plants = []\n\n for i in list_: #url_plants\n html = requests.get(i)\n soup = BeautifulSoup(html.content, \"html.parser\")\n \n \n # Find the position of the last / \n last_slash_index = i.rfind(\"/\")\n\n # Extract the desired part of the url\n name = i[last_slash_index + 1 :]\n \n \n # Extract sowing if available\n sowing_element = soup.find(\"li\", {'class': 'sowing'})\n sowing = sowing_element.getText() if sowing_element else np.nan\n \n # Extract spacing if available\n spacing_element = soup.find(\"li\", {'class': 'spacing'})\n spacing = spacing_element.getText() if spacing_element else np.nan\n \n # Extract harvest if available\n harvest_element = soup.find(\"li\", {'class': 'harvest'})\n harvest = harvest_element.getText() if harvest_element else np.nan\n\n companion_element = soup.find(\"li\", {'class': 'companion'})\n companion = companion_element.getText() if companion_element else np.nan\n\n avoid_element = soup.find(\"li\", {'class': 'avoid'})\n avoid = avoid_element.getText() if avoid_element else np.nan\n\n\n\n image_link_element = soup.find(\"div\", {\"class\":\"image\"})\n image_link = ('www.gardenate.com' + image_link_element.find(\"a\").get('href') if image_link_element is not None else np.nan)\n\n dict_ = {'name':name ,'sowing':sowing, 'spacing':spacing, 'harvest':harvest, 'compatibility':companion,'avoid':avoid, 'image_link': image_link}\n\n plants.append(dict_)\n\n df = pd.DataFrame(plants)\n \n return df \n\n\ndef drop_url_list (list_, string):\n \"\"\"Función para quitar un link de la lista de links\"\"\"\n list_.remove(string)\n return list_\n\n\n\ndef check_links(list_):\n\n \"\"\"Función para chequar si todos los urls de la lista funcionan. Toma como valor la lista y printea solamente los links que no\n funcionen.\"\"\"\n for link in list_:\n try:\n response = requests.get(link)\n if response.status_code != 200:\n print(f\"Link {link} is not working. Status code: {response.status_code}\")\n except requests.exceptions.RequestException as e:\n print(f\"An error occurred while checking link {link}: {str(e)}\")\n\n\n\ndef months_to_plant (url): \n \n html = requests.get(url)\n soup = BeautifulSoup(html.content, \"html.parser\")\n\n table = soup.find('table')\n rows = table.find_all('tr')\n\n name = re.search(r'/([^/?]+)\\?', url).group(1)\n zone = re.search(r'zone=(\\d+)', url).group(1)\n\n data = []\n headers = [header.text.strip() for header in rows[0].find_all('th')]\n\n for row in rows[1:]:\n if row is None:\n cells = np.nan\n else:\n cells = [cell.text.strip() for cell in row.find_all('td')]\n data.append(cells)\n\n\n df_plant = pd.DataFrame(data, columns = headers)\n df_plant ['name'] = name \n df_plant ['zone'] = zone\n\n return df_plant \n\n\ndef concat_multiple_tables(list_):\n \n dfs = []\n\n for i in list_:\n df = months_to_plant(i)\n if df is not None: \n dfs.append(df)\n combined_df = pd.concat(dfs, ignore_index = True)\n\n return combined_df\n\n\n\n\n\n","repo_name":"georginamanyanic/Final-project","sub_path":"src/scrapping.py","file_name":"scrapping.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23506343381","text":"#! /usr/bin/env python3\n\nimport re\nimport random as rnd\n\nrnd.seed(1)\n\ndef enumerate_coins(N):\n for i in range(2**(N - 2)):\n s = '1{:0{}b}1'.format(i, N-2)\n yield [int(x) for x in s]\n\ndef random_enumerate_coins(N):\n m = 2 ** (N-2)\n done = set()\n while True:\n i = rnd.randint(0, m)\n s = '1{:0{}b}1'.format(i, N-2)\n if s in done: continue\n done.add(s)\n yield [int(x) for x in s]\n\nclass Solver(object):\n\n def __init__(self, N):\n self.N = N\n self.bases = dict()\n for b in range(2, 11):\n self.bases[b] = [b**i for i in range(N-1, -1, -1)]\n\n def test_coin(self, coin):\n self.divs = list()\n for b in range(2, 11):\n d = self._test_coin(coin, b)\n if d is None: return None\n self.divs.append(d)\n return list(self.divs)\n\n def _test_coin(self, coin, b):\n rep = sum(i*bb for (i, bb) in zip(coin, self.bases[b]))\n for i in range(3, min(10000, rep)):\n if i in self.divs: continue\n if rep % i == 0: return i\n return None\n\nimport sys\nwith open(sys.argv[1]) as f:\n content = f.read()\n\nns = re.findall(r'\\d+', content)\nN = int(ns[1])\nJ = int(ns[2])\ns = Solver(N)\ni = 0\nprint('Case #1:')\n#for coin in enumerate_coins(N):\nfor coin in random_enumerate_coins(N):\n strcoin = ''.join(map(str, coin))\n divs = s.test_coin(coin)\n if divs is None: continue\n i += 1\n if i > J: break\n strdivs = ' '.join(map(str, divs))\n print(strcoin, strdivs)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_179/2101.py","file_name":"2101.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23740136537","text":"import os\nimport csv\n\n# finds the budget_data csv using the os module\nmain_bank_csv = os.path.join('PyBank', 'Resources','budget_data.csv')\n\n# lists to store data as I split the columns into date and price. I created an extra, avgchange to calculate the average change and started it with zero since the first month has no change\ndate = []\nprice = []\navgchange = [0]\n#avgchange2 = [] \navgchange3 = [0]\n\n# opens the csv file to start manipulating it\nwith open(main_bank_csv) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n #removes the header\n header = next(csvreader,None)\n #starts a for loop to split the columns into lists\n for row in csvreader:\n date.append(row[0])\n price.append(float(row[1]))\n avgchange.append(float(row[1]))\n\n # deletes the last figure in the avgchange so the columns match up \n del avgchange[-1]\n # counts the elements in the date list to determine the number of months\n totalmonths = (len(date))\n # sums the values in the Profit/Loss list\n net_total = sum(price)\n # zips the lists price and avgchange together and minuses the correspending element and creates a new list\n avgchange2 = [a - b for a, b in zip(price, avgchange)]\n del avgchange2[0]\n\n #avgchange3.append(avgchange2)\n # finds the max value in the avgchange2 list to determing the highest average change\n max_change = max(avgchange2)\n # finds the min value in the avgchange2 list to determing the lowest average change\n min_change = min(avgchange2)\n # finds the average value of the avgchange2 list\n avg_change = sum(avgchange2) / len(avgchange2)\n \n # finds the index value of the max and min change values to be used to find the corresponding date element. adds 1 as avgchange2 has 1 less element\n index_max = avgchange2.index(max_change) + 1\n index_min = avgchange2.index(min_change) + 1\n #max_change_date = \n # finds and stores the corresponding date value to the above to be used when printing the max and min values\n index_max_date = date[index_max]\n index_min_date = date[index_min]\n\n #print(round(net_total))\n #print(totalmonths)\n #print(avgchange)\n #print(avgchange2)\n #print(round(max_change))\n #print(round(min_change))\n #print(round(avg_change,2))\n #print(index_max)\n #print(index_min)\n #print(index_max_date)\n\n#print(\"Financial Analysis\")\n#print(\"_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\")\n#print(\"Total Months: \", str(totalmonths))\n#print(\"Total: $\", str(round(net_total)))\n#print(\"Average Change: $\", str(round(avg_change,2)))\n#print(\"Greatest Increase in Profits: $\", str(index_max_date), str(round(max_change)))\n#print(\"Greatest Decrease in Profits: $\", str(index_min_date), str(round(min_change)))\n \n#print(avgchange3) \n# stores the prints as seperate lines to make it easier when writing the text document. rounds the figures to make them look more user friendly \nline1 = (\"Financial Analysis\")\nline2 = (\"_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\")\nline3 = (f\"Total Months: {totalmonths}\")\nline4 = (f\"Total: ${(round(net_total))}\")\nline5 = (f\"Average Change: $ {(round(avg_change,2))}\")\nline6 = (f\"Greatest Increase in Profits: {index_max_date} (${(round(max_change))})\")\nline7 = (f\"Greatest Decrease in Profits: {index_min_date} (${(round(min_change))})\")\n\n# prints the above into the terminal\nprint(line1)\nprint(line2)\nprint(line3)\nprint(line4)\nprint(line5)\nprint(line6)\nprint(line7)\n\n# sets the directory to create the text file\noutput_file = os.path.join('PyBank', 'Analysis','financial_analysis.txt')\n\n# creates the output text file and prints the lines '\\n' is used to print them onto seperate lines\nwith open(output_file, \"w\") as text_file:\n text_file.writelines([line1 + '\\n' + line2 + '\\n' + line3 + '\\n' + line4 + '\\n' + line5 + '\\n' + line6 + '\\n' + line7])\n \n","repo_name":"matthewbelevski/python-challenge","sub_path":"PyBank/main_bank.py","file_name":"main_bank.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12926855836","text":"import getBlocklist\nimport getTaskid\nimport getSession\nimport getRyjd\nimport json\n\n\n# 考评\ndef kp(period, userid, hj):\n # 登录老赵账号获取考评人userID,考评tabID=43\n userid1 = getRyjd.getRyjd(period, 43, hj, userid)\n session = getSession.getSession(userid1, hj)\n # 获取taskID,考评tab=21\n taskid = getTaskid.getTaskid(userid, 21, period, session, hj)\n res = getBlocklist.getblocklist(taskid, userid, period, session, hj)\n deptId = res[\"data\"][\"taskInfo\"][\"deptId\"]\n blocklist = res[\"data\"][\"targetBaseInfo\"][\"blockList\"]\n blockList1 = []\n n = 0\n for i in blocklist:\n blockList1.append({\"id\": i[\"id\"], \"blockItemList\": []})\n\n for j in i[\"blockItemList\"]:\n if i[\"blockItemList\"] != []:\n if i[\"type\"] == 1:\n blockList1[n][\"blockItemList\"].append({\n \"id\": j[\"id\"],\n \"assessMain\": {\n \"id\": 101,\n \"assessContent\": \"考评\",\n \"type\": 1,\n \"weight\": j[\"weight\"]\n }\n })\n else:\n blockList1[n][\"blockItemList\"].append({\n \"id\": j[\"id\"],\n \"assessMain\": {\n \"id\": 4,\n \"assessContent\": \"考评\",\n \"type\": 2,\n \"weight\": j[\"weight\"]\n }\n })\n n = n + 1\n # print(blockList1)\n data = json.dumps({\n \"taskId\": taskid,\n \"period\": period,\n \"targetId\": userid,\n \"deptId\": deptId,\n \"blockList\": blockList1,\n \"strength\": \"考评\",\n \"weakness\": \"考评\",\n \"developOption\": \"考评\",\n \"yearEndAbility\": 101,\n \"yearEndLevel\": \"B+\",\n \"evaluateLevel\": \"A\",\n \"yearSuggestScore\": \"A\",\n \"yearEndScore\": 4.25,\n \"adviceAbility\": 101,\n \"finalScore\": 4,\n \"suggestLevel\": \"A\"\n })\n # print(data)\n\n headers = {'Content-Type': 'application/json', 'kbn-version': '6.4.3'}\n url = hj + \"/cloud/review-server/target/assess/main/submit\"\n res1 = session.post(url, data=data, headers=headers)\n return userid1 + \" 绩效考评: \" + res1.text\n\n# print(kp(\"2020Q1\", \"H6836\", \"http://mbo.test.netease.com\"))","repo_name":"maxueting/MBO","sub_path":"kp1.py","file_name":"kp1.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70156574274","text":"while True:\n n=int(input())\n if n>=2 and n<=10:\n break\n else:\n #print('Enter Valid Number 2 -10 !')\n continue\narry=[]\nfor i in range(n):\n x=int(input())\n arry.append(x)\n\narry=list(arry)\nx = max(arry)\ny = -9999999\nfor i in range(0, n):\n if arry[i] < x and arry[i] > y:\n y = arry[i]\n\nprint(y)\n\n\n","repo_name":"engnraminul/hackerrank_problem_solve","sub_path":"Problem/Runner up Score.py","file_name":"Runner up Score.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72134755715","text":"import os\nimport torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, models\nfrom torchvision.transforms import ToTensor, Lambda, Compose\nfrom torchvision import transforms as T\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset\nimport pandas as pd\nfrom PIL import Image\n\nclass BU3DFEDataset(Dataset):\n def __init__(self, annotations_file, img_dir, transform=None, target_transform=None, train=True):\n self.img_labels = pd.read_csv(annotations_file, header=None)\n self.img_dir = img_dir\n self.transform = transform\n self.target_transform = target_transform\n self.train = train\n\n def __len__(self):\n return len(self.img_labels)\n\n def __getitem__(self, idx):\n img_path = self.img_labels.iloc[idx, 0]\n if(datatype=='2D'):\n image = Image.open(self.img_dir + img_path)\n image = image.resize((224,224))\n elif(datatype=='Depth'):\n img_path = img_path.replace('2D.bmp','3D.bmp')\n image = Image.open(self.img_dir + img_path).convert('RGB')\n \n label1 = self.img_labels.iloc[idx, 1]\n label2 = self.img_labels.iloc[idx, 2]\n label3 = self.img_labels.iloc[idx, 3]\n label4 = self.img_labels.iloc[idx, 4]\n \n if self.transform:\n transform = []\n if self.train:\n transform.append(T.RandomHorizontalFlip())\n transform.append(T.ToTensor())\n transform.append(T.Normalize([0.5]*3, [0.5]*3)) \n transform = T.Compose(transform)\n image = transform(image)\n \n if self.target_transform:\n label1 = self.target_transform(label1)\n label2 = self.target_transform(label2)\n label3 = self.target_transform(label3)\n label4 = self.target_transform(label4)\n return image, label1, label2, label3, label4, img_path\n\n\ndef denorm(x):\n \"\"\"Convert the range from [-1, 1] to [0, 1].\"\"\"\n out = (x + 1) / 2\n return out.clamp_(0, 1)\n \n \n#MAIN\nimport sys\nprint('Start')\ndatatype = sys.argv[1] #2D, Depth\nconstraint = int(sys.argv[2]) #0,1,2,3,4,5,6\n\n\ntorch.manual_seed(0)\ntorch.use_deterministic_algorithms(True)\nos.environ[\"CUBLAS_WORKSPACE_CONFIG\"]=\":4096:8\"\n\n\nprint( datatype + ', ' + 'Constraint: ' + str(constraint) )\nif constraint == 0:\n folder_path = 'pretrain/'\nelif constraint == 1:\n folder_path = 'identity/'\nelif constraint == 2:\n folder_path = 'expression/'\nelif constraint == 3:\n folder_path = 'gender/'\nelif constraint == 4:\n folder_path = 'ethnicity/'\nelif constraint == 5:\n folder_path = 'cons/'\nelif constraint == 6:\n folder_path = 'de-id/'\n if datatype =='2D':\n para = 3\n else:\n para = 5\n\ndir_to_base = 'AE/' #*may require editing\ntrain_label_filename = dir_to_base + '../train_label.txt' \ntest_label_filename = dir_to_base + '../test_label.txt'\n\ndata_path = dir_to_base + '../Data/' + datatype + '/' #*may require editing\nout_path = dir_to_base + 'constraint-'+datatype+'/'+ folder_path \nif not os.path.exists(out_path):\n os.makedirs(out_path)\n\ntraining_data = BU3DFEDataset(\n annotations_file = train_label_filename,\n img_dir = data_path,\n transform=True,\n train=True,\n)\n\ntest_data = BU3DFEDataset(\n annotations_file = test_label_filename,\n img_dir = data_path,\n transform=True,\n train=False,\n)\n\n\nbatch_size = 16\n\n# Create data loaders.\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size, shuffle=True)\ntest_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)\n\nprint(len(train_dataloader.dataset))\nprint(len(test_dataloader.dataset))\n\nfor X, y1, y2, y3, y4, X_path in test_dataloader:\n print(\"Shape of X [N, C, H, W]: \", X.shape)\n print(\"Shape of y1: \", y1.shape, y1.dtype)\n print(\"Shape of y2: \", y2.shape, y2.dtype)\n print(\"Shape of y3: \", y3.shape, y3.dtype)\n print(\"Shape of y4: \", y4.shape, y4.dtype)\n break\n\n\n# Display image and label.\ntrain_features, train_label1, train_label2, train_label3, train_label4, train_img_path = next(iter(train_dataloader))\nprint(f\"Feature batch shape: {train_features.size()}\")\nprint(f\"Label1 batch shape: {train_label1.size()}\")\nprint(f\"Label2 batch shape: {train_label2.size()}\")\nprint(f\"Label3 batch shape: {train_label3.size()}\")\nprint(f\"Label4 batch shape: {train_label4.size()}\")\nimg = train_features[0].squeeze().permute(1,2,0)\nlabel1 = train_label1[0]\nlabel2 = train_label2[0]\nlabel3 = train_label3[0]\nlabel4 = train_label4[0]\n\nimg = denorm(img)\n#plt.imshow(img)\n#plt.show()\nprint(f\"Label1: {label1}\")\nprint(f\"Label2: {label2}\")\nprint(f\"Label3: {label3}\")\nprint(f\"Label4: {label4}\")\n\n\n# Get cpu or gpu device for training.\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nprint(f\"Using {device} device\")\ntorch.cuda.empty_cache()\nif torch.cuda.is_available():\n map_location=lambda storage, loc: storage.cuda()\nelse:\n map_location='cpu'\n\n \n## AE Model Integration\nfrom convae import AutoEncoder\ncae = AutoEncoder()\nif datatype=='Depth': #Enforce 1 channel output\n cae.autoencoder[12] = nn.Conv2d(128, 1, kernel_size=(1, 1), stride=(1, 1))\nif not constraint==0:\n cae.load_state_dict(torch.load(dir_to_base + 'constraint-'+datatype+'/'+ 'pretrain/'+'model.pth'))\ncae.to(device)\n\n\n## Classification Models\npretrain = True\nif constraint == 1 or constraint == 2 or constraint == 3 or constraint == 4:\n if constraint == 1:\n if datatype=='2D':\n model = models.resnet50(pretrained=pretrain) #2D, task 1, model 2\n model.fc = nn.Linear(2048, 100)\n model.load_state_dict(torch.load(dir_to_base + '../task/'+ folder_path + datatype + '/model' + str(2) + '/' +'model.pth'))\n else:\n model = models.densenet121(pretrained=pretrain) #Depth, task1, model 4\n model.classifier = nn.Linear(1024, 100)\n model.load_state_dict(torch.load(dir_to_base + '../task/'+ folder_path + datatype + '/model' + str(4) + '/' +'model.pth'))\n elif constraint == 2:\n if datatype=='2D':\n model = models.resnet50(pretrained=pretrain) #2D, task2, model 2\n model.fc = nn.Linear(2048, 6)\n model.load_state_dict(torch.load(dir_to_base + '../task/'+ folder_path + datatype + '/model' + str(2) + '/' +'model.pth'))\n else:\n model = models.densenet121(pretrained=pretrain) #Depth, task 2, model 4\n model.classifier = nn.Linear(1024, 6)\n model.load_state_dict(torch.load(dir_to_base + '../task/'+ folder_path + datatype + '/model' + str(4) + '/' +'model.pth'))\n elif constraint == 3:\n model = models.resnet50(pretrained=pretrain) #2D/Depth, task3, model 2\n model.fc = nn.Linear(2048, 2)\n model.load_state_dict(torch.load(dir_to_base + '../task/'+ folder_path + datatype + '/model' + str(2) + '/' +'model.pth'))\n elif constraint == 4:\n model = models.resnet50(pretrained=pretrain) #2D/Depth, task4, model 2\n model.fc = nn.Linear(2048, 6)\n model.load_state_dict(torch.load(dir_to_base + '../task/'+ folder_path + datatype + '/model' + str(2) + '/' +'model.pth'))\n \n for param in model.parameters():\n param.requires_grad = False\n model.eval()\n model.to(device) \nelif constraint == 5 or constraint == 6:\n if datatype=='2D':\n model2 = models.resnet50(pretrained=pretrain) #2D, task2, model 2\n model2.fc = nn.Linear(2048, 6)\n model2.load_state_dict(torch.load(dir_to_base + '../task/'+ 'expression/' + datatype + '/model' + str(2) + '/' +'model.pth'))\n else:\n model2 = models.densenet121(pretrained=pretrain) #Depth, task 2, model 4\n model2.classifier = nn.Linear(1024, 6)\n model2.load_state_dict(torch.load(dir_to_base + '../task/'+ 'expression/' + datatype + '/model' + str(4) + '/' +'model.pth'))\n for param in model2.parameters():\n param.requires_grad = False\n model2.eval()\n model2.to(device)\n\n model3 = models.resnet50(pretrained=pretrain) #2D, task3, model 2\n model3.fc = nn.Linear(2048, 2)\n model3.load_state_dict(torch.load(dir_to_base + '../task/'+ 'gender/' + datatype + '/model' + str(2) + '/' +'model.pth'))\n for param in model3.parameters():\n param.requires_grad = False\n model3.eval()\n model3.to(device)\n\n model4 = models.resnet50(pretrained=pretrain) #2D, task4, model 2\n model4.fc = nn.Linear(2048, 6)\n model4.load_state_dict(torch.load(dir_to_base + '../task/'+ 'ethnicity/' + datatype + '/model' + str(2) + '/' +'model.pth'))\n for param in model4.parameters():\n param.requires_grad = False\n model4.eval()\n model4.to(device)\nif constraint == 6:\n if datatype=='2D':\n model1 = models.resnet50(pretrained=pretrain) #2D, task 1, model 2\n model1.fc = nn.Linear(2048, 100)\n model1.load_state_dict(torch.load(dir_to_base + '../task/'+ 'identity/' + datatype + '/model' + str(2) + '/' +'model.pth'))\n else:\n model1 = models.densenet121(pretrained=pretrain) #Depth, task1, model 4\n model1.classifier = nn.Linear(1024, 100)\n model1.load_state_dict(torch.load(dir_to_base + '../task/'+ 'identity/' + datatype + '/model' + str(4) + '/' +'model.pth'))\n for param in model1.parameters():\n param.requires_grad = False\n model1.eval()\n model1.to(device)\n\noptimizer = torch.optim.Adam(cae.parameters(), lr=0.0002) #0.0002\nloss_fn = nn.CrossEntropyLoss()\n\n\nif constraint == 0:\n epoch_start = 0\n num_epochs = 5\nelse:\n epoch_start = 5\n num_epochs = 20\n\n\nfor epoch in range(epoch_start, num_epochs):\n cae.train()\n for i, (X, y1, y2, y3, y4, X_path) in enumerate(train_dataloader):\n X, y1, y2, y3, y4 = X.to(device), y1.to(device), y2.to(device), y3.to(device), y4.to(device)\n X_rec = cae(X)\n if datatype=='Depth': #Enforce 1 channel output \n X_rec = X_rec.repeat(1,3,1,1)\n loss_rec = torch.mean(torch.abs(X - X_rec))\n \n \n\n if constraint == 0:\n loss_constraint = 0\n elif constraint == 1:\n loss_constraint = loss_fn(model(X_rec), y1)\n elif constraint == 2:\n loss_constraint = loss_fn(model(X_rec), y2)\n elif constraint == 3:\n loss_constraint = loss_fn(model(X_rec), y3)\n elif constraint == 4:\n loss_constraint = loss_fn(model(X_rec), y4)\n elif constraint == 5:\n loss_constraint = loss_fn(model2(X_rec),y2)+loss_fn(model3(X_rec),y3)+loss_fn(model4(X_rec),y4)\n elif constraint == 6:\n loss_constraint_att = loss_fn(model2(X_rec), y2)+loss_fn(model3(X_rec),y3)+loss_fn(model4(X_rec),y4)\n loss_constraint_id = loss_fn(model1(X_rec), y1)\n loss_constraint = -para/10*loss_constraint_id+loss_constraint_att\n \n loss = loss_rec + loss_constraint\n \n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i+1) % 20 == 0:\n print ('Epoch [{}/{}], Iter [{}/{}] Loss: {:.3f} {:.3f} {:.3f}'.format(\n epoch+1, num_epochs, i+1, len(train_dataloader), \n loss_rec, loss_constraint, loss))\n\ntorch.save(cae.state_dict(), os.path.join(out_path, 'model.pth'))\nprint(\"Saved PyTorch Model State\") \n\n\n## Image Generation\ncae.eval()\n\nfrom torchvision.utils import save_image\nwith torch.no_grad():\n for i, (X, y1, y2, y3, y4, X_path) in enumerate(test_dataloader):\n X, y1, y2, y3, y4 = X.to(device), y1.to(device), y2.to(device), y3.to(device), y4.to(device)\n print ('Iter [{}/{}] '.format(i+1, len(test_dataloader)))\n\n rec = cae(X)\n rec = denorm(rec)\n\n for f,r in zip(X_path, rec):\n #print(r.shape)\n f_a = f[0:6]\n if not os.path.exists(out_path+'processed/'+f_a):\n os.makedirs(out_path+'processed/'+f_a)\n save_image(r, out_path+'processed/'+f)\n\n \n## Visual Inspection\nX, y1, y2, y3, y4, X_path = next(iter(test_dataloader))\n\nimg = X[0].squeeze().permute(1,2,0)\nimg = denorm(img)\n#plt.imshow(img)\n#plt.show()\n\nrec = cae(X.to(device)).cpu().detach()\nif datatype=='Depth':\n img = rec[0].squeeze()\nelse:\n img = rec[0].squeeze().permute(1,2,0)\nimg = denorm(img)\n#plt.imshow(img, cmap='gray')\n#plt.show()\n\nprint(torch.min(X), torch.max(X))\nprint(torch.min(rec), torch.max(rec))\n","repo_name":"kevinhmcheng/3d-face-de-id","sub_path":"AE/de-identification.py","file_name":"de-identification.py","file_ext":"py","file_size_in_byte":12634,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"72985403075","text":"import os\nfrom fpdf import FPDF\n\ndef images2pdf(outpath, images, w = 1920, h = 1080):\n pdf = FPDF()\n pdf.compress = False\n titleH = 60\n size=(h + titleH, w)\n for image in images:\n pdf.add_page(orientation = 'L', format=size, same=False)\n pdf.set_font('helvetica', size = titleH)\n pdf.cell(400, titleH, os.path.basename(image), 1, 1, 'C')\n pdf.image(image, 0, titleH + 10, w, h, 'JPG')\n\n pdf.output(outpath, \"F\")\n \nif __name__ == '__main__': \n images=['data/frame00:00:01-0.jpg', 'data/frame00:00:06-0.56.jpg']\n images2pdf('data/test.pdf', images)","repo_name":"ZedeX/extract-video-ppt","sub_path":"video2ppt/images2pdf.py","file_name":"images2pdf.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2664067301","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nDefines the names and properties of the MC datasets in signal, background and datasets dicts of dicts:\n\n* dict key: dataset name\n* ``MC``: bool, True for MC\n* ``Signal``: bool, True for signal\n* ``FileName``: root filename of dataset\n* ``Label``: plotlabel\n* ``Color``: plotcolor\n* ``XS``: cross section in pb\n* ``XSUncertainty``: dict with ``Up`` and ``Down`` cross section uncertainty\n* year name: dict containing year specific information:\n * ``KFactor``: correction factor\n * ``EventWeights``: event weights to apply for this dataset\n\"\"\"\n\n#cross section sources:\n#[0] generator cross section: https://cms-gen-dev.cern.ch/xsdb/\n#[1] ttbar NNLO: https://twiki.cern.ch/twiki/bin/view/LHCPhysics/TtbarNNLO\n#[2] WJets NNLO: https://indico.cern.ch/event/673253/contributions/2756806/attachments/1541203/2416962/20171016_VJetsXsecsUpdate_PH-GEN.pdf\n#[3] DY + diboson: https://twiki.cern.ch/twiki/bin/viewauth/CMS/StandardModelCrossSectionsat13TeV\n#[4] single top NLO: https://twiki.cern.ch/twiki/bin/view/LHCPhysics/SingleTopRefXsec\n#[5] W boson BR: https://pdg.lbl.gov/2021/listings/rpp2021-list-w-boson.pdf\n\n\n\nimport os\nimport pandas\n\n\ngen_json = pandas.read_json(os.path.abspath(os.path.dirname(__file__)) + '/xsecs.json')\n\ngen_weights = ['genweight/genEventSumw']\nme_weight = ['fragEventWeight_fragCP5BLVsPt'] #['MEweight_murNominal_mufNominal']\npdf_weight = [] #['PDFweight_0']\n\nweights_electron = ['tightElectrons_weight_reco_nominal', 'tightElectrons_weight_id_nominal']\nweights_muon = ['tightMuons_weight_reco_nominal', 'tightMuons_weight_id_nominal', 'tightMuons_weight_iso_nominal']\nweights_prefiring = ['L1PreFiringWeight_ECAL_Nom', 'L1PreFiringWeight_Muon_Nom']\nweights_bjets = ['btagEventWeight_deepjet_shape_nominal']\nweights_pileup = ['puWeight']\n\nweights = weights_electron + weights_muon + weights_prefiring + weights_bjets + weights_pileup\nweights_2016 = []\nweights_2017 = ['IsoMuTrigger_weight_trigger_2017_nominal', 'IsoElectronTrigger_weight_trigger_2017_nominal']\nweights_2018 = []\n\n\nsignal = {}\nfor index in gen_json.keys():\n signal['WbWbX_{}'.format(index)] = {\n 'MC': True,\n 'Signal': True,\n 'Label': r'Wb x Wb',\n 'FileName': 'WbjToLNu_4f_TuneCP5_13TeV-madgraph-pythia8',\n 'Color': 'red',\n 'XS': gen_json[index]['xsec'],\n 'XSUncertainty': {\n 'Up': 5, #TODO update\n 'Down': 5, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017\n + ['LHEWeight_width_{i}/LHESumw_width_{i}'.format(i=index)],\n },\n }\n# TODO cut option\n\n\nbackground = {\n 'ttbar_dilep': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'TTTo2L2Nu_TuneCP5_13TeV-powheg-pythia8',\n 'Label': r't$\\bar{t}$ (dilep)',\n 'Color': 'darkorange',\n 'XS': 831.76 * (1 - 0.6741)**2, # [1]*[5]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017,\n },\n },\n\n 'ttbar_semilep': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'TTToSemiLeptonic_TuneCP5_13TeV-powheg-pythia8',\n 'Label': r't$\\bar{t}$ (semilep)',\n 'Color': 'gold',\n 'XS': 831.76 * (0.6741 * (1 - 0.6741) * 2), # [1]*[5]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017,\n },\n },\n\n 'ttbar_had': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'TTToHadronic_TuneCP5_13TeV-powheg-pythia8',\n 'Label': r't$\\bar{t}$ (had)',\n 'Color': 'yellow',\n 'XS': 831.76 * 0.6741**2, # [1]*[5]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017,\n },\n },\n\n 'WJets_0j': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'WJetsToLNu_0J_TuneCP5_13TeV-amcatnloFXFX-pythia8',\n 'Label': 'W+Jets (0j)',\n 'Color': 'limegreen',\n 'XS': 50131.98259, #[2]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017,\n },\n },\n\n 'WJets_1j': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'WJetsToLNu_1J_TuneCP5_13TeV-amcatnloFXFX-pythia8',\n 'Label': 'W+Jets (1j)',\n 'Color': 'forestgreen',\n 'XS': 8875.0, #[2]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017,\n },\n },\n\n 'WJets_2j': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'WJetsToLNu_2J_TuneCP5_13TeV-amcatnloFXFX-pythia8',\n 'Label': 'W+Jets (2j)',\n 'Color': 'green',\n 'XS': 3172.958208, #[2]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + me_weight + pdf_weight + weights + weights_2017,\n },\n },\n\n 'DY': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'DYJetsToLL_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8',\n 'Label': 'Drell Yan',\n 'Color': 'aqua',\n 'XS': 6529.0, #[0]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + weights + weights_2017,\n },\n },\n\n 'WW': {\n 'MC': True,\n 'Signal': False,\n 'FileName': 'WW_TuneCP5_13TeV-pythia8',\n 'Label': 'diboson (WW)',\n 'Color': 'cornflowerblue',\n 'XS': 75.8, #[0]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + weights + weights_2017,\n },\n },\n\n 'WZ': {\n 'MC': True,\n 'Signal': False,\n 'Label': 'diboson (WZ)',\n 'FileName': 'WZ_TuneCP5_13TeV-pythia8',\n 'Color': 'royalblue',\n 'XS': 27.6, #[0]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + weights + weights_2017,\n },\n },\n\n 'ZZ': {\n 'MC': True,\n 'Signal': False,\n 'Label': 'diboson (ZZ)',\n 'FileName': 'ZZ_TuneCP5_13TeV-pythia8',\n 'Color': 'deepskyblue',\n 'XS': 12.14, #[0]\n 'XSUncertainty': {\n 'Up': 50, #TODO update\n 'Down': 50, #TODO update\n },\n '2017': {\n 'KFactor': 1.,\n 'EventWeights': gen_weights + weights + weights_2017,\n },\n },\n\n #TODO check included in signal\n # 'ST_t_top': {\n # 'MC': True,\n # 'Signal': False,\n # 'FileName': 'ST_t-channel_top_4f_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8',\n # 'Label': 't t-ch',\n # 'Color': 'indigo',\n # 'XS': 136.02, #[4]\n # 'XSUncertainty': {\n # 'Up': 50, #TODO update\n # 'Down': 50, #TODO update\n # },\n # '2017': {\n # 'KFactor': 1.,\n # 'EventWeights': gen_weights + me_weight + weights + weights_2017,\n # },\n # },\n #\n # 'ST_t_anti': {\n # 'MC': True,\n # 'Signal': False,\n # 'FileName': 'ST_t-channel_antitop_4f_InclusiveDecays_TuneCP5_13TeV-powheg-madspin-pythia8',\n # 'Label': '#bar{t} t-ch',\n # 'Color': 'rebeccapurple',\n # 'XS': 80.95, #[4]\n # 'XSUncertainty': {\n # 'Up': 50, #TODO update\n # 'Down': 50, #TODO update\n # },\n # '2017': {\n # 'KFactor': 1.,\n # 'EventWeights': gen_weights + me_weight + weights + weights_2017,\n # },\n # },\n\n #'ST_tW_top': {\n #'MC': True,\n #'Signal': False,\n #'FileName': 'ST_tW_top_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8',\n #'Label': 'WW',\n #'Color': 'darkorchid',\n #'XS': 71.7 * 0.5, #[4]\n #'XSUncertainty': {\n #'Up': 50, #TODO update\n #'Down': 50, #TODO update\n #},\n #'2017': {\n #'KFactor': 1.,\n #'EventWeights': gen_weights + me_weight + weights + weights_2017,\n #},\n #},\n\n #'ST_tW_antitop': {\n #'MC': True,\n #'Signal': False,\n #'FileName': 'ST_tW_antitop_5f_NoFullyHadronicDecays_TuneCP5_13TeV-powheg-pythia8',\n #'Label': 'WW',\n #'Color': 'darkviolet',\n #'XS': 71.7 * 0.5, #[4]\n #'XSUncertainty': {\n #'Up': 50, #TODO update\n #'Down': 50, #TODO update\n #},\n #'2017': {\n #'KFactor': 1.,\n #'EventWeights': gen_weights + me_weight + weights + weights_2017,\n #},\n #},\n\n #'ST_s': {\n #'MC': True,\n #'Signal': False,\n #'FileName': 'ST_s-channel_4f_leptonDecays_TuneCP5_13TeV-amcatnlo-pythia8',\n #'Label': 'single t (s channel)',\n #'Color': 'purple',\n #'XS': 10.32 , #[4]\n #'XSUncertainty': {\n #'Up': 50, #TODO update\n #'Down': 50, #TODO update\n #},\n #'2017': {\n #'KFactor': 1.,\n #'EventWeights': gen_weights + me_weight + weights + weights_2017,\n #},\n #},\n\n}\n\n\ndatasets = signal.copy()\ndatasets.update(background)\n\nfrom config.data import data\nall_samples = datasets.copy()\nall_samples.update(data)\n\n\n","repo_name":"WbWbX/extraction","sub_path":"config/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71419068353","text":"from math import log\nimport operator\n\ndef calcShannonEnt(dataSet):\n \"\"\"\n 计算熵\n :param dataSet: 数据集\n :return: float\n \"\"\"\n numEntries = len(dataSet)\n labelsCounts = {}\n for featVec in dataSet:\n currentLabel = featVec[-1]\n if currentLabel not in labelsCounts.keys():\n labelsCounts[currentLabel] = 0\n labelsCounts[currentLabel] += 1\n shannonEnt = 0.0\n for key in labelsCounts:\n prob = float(labelsCounts[key]) / numEntries\n shannonEnt -= prob * log(prob, 2)\n return shannonEnt\n\ndef createDataSet():\n dataSet = [[1, 1, 'yes'],\n [1, 1, 'yes'],\n [1, 0, 'no'],\n [0, 1, 'no'],\n [0, 1, 'no'],]\n labels = ['no surfacing', 'flippers']\n return dataSet, labels\n\ndef splitDataSet(dataSet, axis, value):\n \"\"\"\n 划分数据集\n :param dataSet: 待划分的数据集\n :param axis: 划分数据集的特征\n :param value: 需要返回的特征的值\n :return:\n \"\"\"\n retDataSet = []\n for featVec in dataSet:\n if featVec[axis] == value:\n reducedFeatVec = featVec[: axis]\n reducedFeatVec.extend(featVec[axis+1:])\n retDataSet.append(reducedFeatVec)\n return retDataSet\n\ndef chooseBestFeatureToSplit(dataSet):\n \"\"\"\n 找到最佳特征\n :param dataSet: 数据集\n :return:\n \"\"\"\n numFeatures = len(dataSet[0]) - 1\n baseEntropy = calcShannonEnt(dataSet)\n bestInfoGain = 0.0; bestFeature = -1\n for i in range(numFeatures):\n featList = [example[i] for example in dataSet]\n uniqueVals = set(featList)\n newEntropy = 0.0\n for value in uniqueVals:\n subDataSet = splitDataSet(dataSet, i, value)\n prob = len(subDataSet) / float(len(dataSet))\n newEntropy =+ prob * calcShannonEnt(subDataSet)\n infoGain = baseEntropy - newEntropy\n if infoGain > bestInfoGain:\n bestInfoGain = infoGain\n bestFeature = i\n return bestFeature\n\ndef majorityCnt(classList):\n classCount = {}\n for vote in classList:\n if vote not in classCount.keys(): classCount[vote] = 0\n classCount[vote] += 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\n return sortedClassCount[0][0]\n\ndef createTree(dataSet, labels):\n \"\"\"\n 构建决策树\n :param dataSet: 数据集\n :param labels: 标签列表\n :return:\n \"\"\"\n classList = [example[-1] for example in dataSet]\n # 所有类标签全部相同 直接返回该类标签\n if classList.count(classList[0]) == len(classList):\n return classList[0]\n # 使用完了所有特征 仍然不能将数据集划分成仅包含唯一类别的类标签\n if len( [0]) == 1:\n return majorityCnt(classList)\n bestFeat = chooseBestFeatureToSplit(dataSet) # 选择最佳特征\n bestFeatLabel = labels[bestFeat]\n myTree = {bestFeatLabel:{}}\n del(labels[bestFeat])\n featValues = [example[bestFeat] for example in dataSet]\n uniqueVals = set(featValues)\n for values in uniqueVals:\n subLabels = labels[:]\n myTree[bestFeatLabel][values] = createTree(splitDataSet(dataSet, bestFeat, values), subLabels)\n return myTree\n\ndef classify(inputTree, featLabels, testVec):\n firstStr = list(inputTree.keys())[0]\n secondDict = inputTree[firstStr]\n featIndex = featLabels.index(firstStr)\n for key in secondDict.keys():\n if testVec[featIndex] == key :\n if type(secondDict[key]).__name__ == 'dict':\n classLabel = classify(secondDict[key], featLabels, testVec)\n else:\n classLabel = secondDict[key]\n return classLabel","repo_name":"UaHaLiubolun/MachingLeraningAction","sub_path":"ch03/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72727167555","text":"\"\"\"\nRun a holdout set of data through our trained RNN. Requires we first\nrun train_rnn.py and save the weights.\n\"\"\"\nfrom rnn_utils import get_network, get_network_deep, get_network_wide, get_data\nimport tflearn\n\ndef main(filename, frames, batch_size, num_classes, input_length):\n \"\"\"From the blog post linked above.\"\"\"\n # Get our data.\n X_train, _, y_train, _ = get_data(filename, frames, num_classes, input_length)\n\n # Get sizes.\n num_classes = len(y_train[0])\n\n # Get our network.\n net = get_network_wide(frames, input_length, num_classes)\n\n # Get our model.\n model = tflearn.DNN(net, tensorboard_verbose=0)\n model.load('checkpoints/rnn.tflearn')\n\n # Evaluate.\n print(model.evaluate(X_train, y_train))\n\nif __name__ == '__main__':\n filename = 'data/predicted-frames-2.pkl'\n input_length = 2\n # filename = 'data/cnn-features-frames-2.pkl'\n # input_length = 2048\n frames = 40\n batch_size = 32\n num_classes = 2\n\n main(filename, frames, batch_size, num_classes, input_length)\n","repo_name":"harvitronix/continuous-online-video-classification-blog","sub_path":"rnn_eval.py","file_name":"rnn_eval.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"61"} +{"seq_id":"23617773941","text":"#! /usr/bin/python\n# GCJ 2011 QR - Magicka\n\nfrom sys import stdin\n\n\ndef parse_test_case(line) :\n tokens = line.strip().split(' ')\n\n n_combos = int(tokens.pop(0))\n combos = {}\n for i in range(0, n_combos) :\n [base1, base2, result] = tokens.pop(0)\n combos[(base1, base2)] = result\n combos[(base2, base1)] = result\n\n n_opposites = int(tokens.pop(0))\n opposites = dict([(l, []) for l in 'QWERASDF'])\n for i in range(0, n_opposites) :\n [base1, base2] = tokens.pop(0)\n opposites[base1].append(base2)\n opposites[base2].append(base1)\n\n invocation = tokens[1]\n\n return (combos, opposites, invocation)\n\n\ndef cast_spell(combos, opposites, invocation) :\n current_spell = []\n\n for s in invocation :\n current_spell.append(s)\n\n if len(current_spell) >= 2 :\n c = (current_spell[-1], current_spell[-2])\n if c in combos :\n current_spell[-2:] = combos[c]\n continue\n\n for o in current_spell[:-1] :\n if o in opposites[s] :\n current_spell = []\n break\n\n return current_spell\n\n\ndef print_output(test_id, solution) :\n solution_str = str(solution).replace('\\'', '')\n print('Case #{0}: {1}'.format(test_id, solution_str))\n\n\ndef main() :\n input_data = stdin.readlines()\n\n n_tests = int(input_data[0])\n for i in range(1, n_tests + 1) :\n test_data = parse_test_case(input_data[i])\n solution = cast_spell(*test_data)\n print_output(i, solution)\n\nif __name__ == '__main__' :\n main()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/429.py","file_name":"429.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1822051568","text":"import numpy as np\nimport os\nimport torch\nimport torch.utils.data as data\nimport pandas as pd\nimport random\n\nMEAN = 58.81274059207973\nSTDDEV = 48.56406668573295\n\n\n\nclass MRDataset(data.Dataset):\n def __init__(self, root_dir, plane, indexes, train=True, transform=None, weights=None):\n super().__init__()\n self.plane = plane\n self.root_dir = root_dir\n self.train = train\n self.indexes = indexes\n self.records = pd.read_csv('metadata.csv')\n if self.train:\n self.folder_path = self.root_dir + 'train/{0}/'.format(plane)\n self.records = self.records.loc[self.records['mrnet_split'] == 0] \n \n else:\n transform = None\n self.folder_path = self.root_dir + 'valid/{0}/'.format(plane)\n self.records = self.records.loc[self.records['mrnet_split'] == 1] \n\n self.records['id'] = self.records['id'].map(\n lambda i: '0' * (4 - len(str(i))) + str(i))\n self.paths = [self.folder_path + filename +\n '.npy' for filename in self.records['id'].tolist()]\n self.transform = transform\n \n \n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, index):\n \n indexes = self.indexes\n array = np.load(self.paths[index])\n if self.transform:\n array = self.transform(array)\n else:\n array = np.stack((array,)*3, axis=1)\n array = torch.FloatTensor(array)\n\n \n ind = np.random.randint(len(indexes) + 1) -1\n while (ind == index):\n ind = np.random.randint(len(indexes) + 1) -1\n array2 = np.load(self.paths[indexes[ind]])\n if self.transform:\n array2 = self.transform(array2)\n else:\n array2 = np.stack((array2,)*3, axis=1)\n array2 = torch.FloatTensor(array2)\n \n label = torch.FloatTensor([0])\n array = (array - MEAN) / STDDEV\n array2 = (array2 - MEAN) / STDDEV\n\n return array, array2, label\n\n","repo_name":"niamhbelton/Siamese_Network_Bad_Data","sub_path":"models/Siamese_Network/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32431053164","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 8 10:44:41 2020\n\n@author: yhily\n\"\"\"\n\nimport tensorflow as tf\n#构建模型\nW1 = tf.Variable(tf.random.uniform([2,20],-1,1))\nB1 = tf.Variable(tf.random.uniform([ 20],-1,1))\nW2 = tf.Variable(tf.random.uniform([20,1],-1,1))\nB2 = tf.Variable(tf.random.uniform([ 1],-1,1))\n\n@tf.function\ndef predict(X):\n X = tf.convert_to_tensor(X, tf.float32)\n H1 = tf.nn.leaky_relu(tf.matmul(X,W1) + B1)\n pre = tf.sigmoid(tf.matmul(H1,W2) + B2)\n return pre\n\ndef fit(X, y):\n Optim = tf.keras.optimizers.SGD(1e-1)\n num_iter = 10000\n y_true = tf.convert_to_tensor(y, tf.float32)\n\n for step in range(num_iter):\n if step%(num_iter/10)==0:\n y_pre = predict(X)\n loss = tf.reduce_mean(tf.square(y_true - y_pre))\n print(step, \" Loss:\", loss.numpy())\n\n with tf.GradientTape() as tape:\n y_pre = predict(X)\n Loss = tf.reduce_mean(tf.square(y_true - y_pre))\n #自动求导\n Grads = tape.gradient(Loss,[W1,B1,W2,B2])\n # 反向传播并更新权值\n Optim.apply_gradients(zip(Grads,[W1,B1,W2,B2]))\n\nif __name__ == '__main__':\n # 构建数据\n X = [[0, 0], [0, 1], [1, 0], [1, 1]]\n y = [[0], [1], [1], [0]]\n fit(X, y)\n pre = predict(X)\n print(\"预测值: \", pre)\n\n\n","repo_name":"yhily/deep-learning-resource","sub_path":"srcs/chap05/5-6/bp-xor.py","file_name":"bp-xor.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"40513599381","text":"n = int(input())\r\n\r\ntotal = 0\r\ntmin = 0\r\ntmax = 0\r\ngmin = 0\r\ngmax = 0\r\n\r\nfor i in range(n):\r\n m = int(input())\r\n total += m\r\n tmax = m if tmax < 0 else tmax + m\r\n tmin = m if tmin > 0 else tmin + m\r\n gmax = max(gmax, tmax)\r\n gmin = min(gmin, tmin)\r\n\r\nprint(max(gmax, total - gmin))\r\n","repo_name":"GuuJiang/51nod","sub_path":"1050.py","file_name":"1050.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11896835875","text":"import argparse\nimport os\nimport pandas as pd\n\n\ndef split_file(file_path, max_lines):\n # get the name and the extension of the input file\n file_name, file_extension = os.path.splitext(file_path)\n if file_extension == \".xlsx\":\n # read the Excel file and get the first sheet\n df = pd.read_excel(file_path, sheet_name=0)\n elif file_extension == \".csv\":\n # read the CSV file\n df = pd.read_csv(file_path)\n else:\n print(\"Invalid file format. Only XLSX and CSV files are supported.\")\n return\n # extract the header row\n header = df[:1]\n # extract the data rows\n data_rows = df[1:]\n chunk_number = 0\n start_row_index = 0\n while start_row_index < len(data_rows):\n # get the end row index of the chunk\n end_row_index = min(start_row_index + max_lines, len(data_rows))\n # create a new chunk DataFrame\n chunk_df = pd.concat(\n [header, data_rows[start_row_index:end_row_index]])\n # save the chunk DataFrame to a new file\n chunk_df.to_csv(\n f\"{file_name}_{chunk_number}.csv\", index=False)\n # increment the chunk number and start row index\n chunk_number += 1\n start_row_index = end_row_index\n\n\n# parse command line arguments\nparser = argparse.ArgumentParser(\n description='Split a CSV or XLSX file into multiple smaller files.')\nparser.add_argument('file_path', help='Path to the input file')\nparser.add_argument('max_lines', type=int,\n help='Maximum number of rows per output file', default=10000)\n\nargs = parser.parse_args()\n\n# call the split_file function with the command line argument values\nsplit_file(args.file_path, args.max_lines)\n","repo_name":"eabasir/data-split","sub_path":"src/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25376864345","text":"#!/usr/bin/env python\n\"\"\" Standard model visualization routines\n\"\"\"\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport re\nfrom glob import glob\nimport argparse\nimport sys\n\nfrom aetherpy.io import read_routines\nfrom aetherpy.utils import inputs, time_conversion\nfrom aetherpy.plot import data_prep, movie_routines\n\ndef get_args():\n\n parser = argparse.ArgumentParser(\n description = 'Plot Aether / GITM model results')\n \n parser.add_argument('-list', \\\n action='store_true', default = False, \\\n help = 'list variables in file')\n\n parser.add_argument('-timeplot', \\\n action='store_true', default = False, \\\n help = 'Plot integrated (or mean) value vs. time')\n\n parser.add_argument('-mean', \\\n action='store_true', default = False, \\\n help = 'Plot mean value instead of integrated value')\n \n parser.add_argument('-var', \\\n default = 3, type = int, \\\n help = 'variable to plot (number)')\n parser.add_argument('-cut', metavar = 'cut', default ='alt', \\\n choices = ['alt', 'lat', 'lon'], \n help = 'alt,lat,lon : which cut you would like')\n parser.add_argument('-ext', default ='png', \\\n choices = ['png', 'jpg', 'pdf'], \n help = 'plot type file extention')\n parser.add_argument('-winds', default = False,\\\n help='overplot winds', \\\n action=\"store_true\")\n parser.add_argument('-alt', metavar = 'alt', default =400.0, type = int, \\\n help = 'altitude : alt in km (closest)')\n parser.add_argument('-lat', metavar = 'lat', default =-100.0, \\\n help = 'latitude : latitude in degrees (closest)')\n parser.add_argument('-lon', metavar = 'lon', default =-100.0,\\\n help = 'longitude in degrees (closest)')\n parser.add_argument('-alog', default = False,\n action=\"store_true\",\n help = 'plot the log of the variable')\n parser.add_argument('-IsLog', default =False,\n help='plot the log of the variable', \n action=\"store_true\") \n parser.add_argument('-diff', default = False, \n action = 'store_true',\n help = 'plot difference of files (2 files needed)')\n parser.add_argument('-mkv',\n action = 'store_true',\n default = False,\n help = 'movie format = mkv')\n parser.add_argument('-mp4',\n action = 'store_true',\n default = True,\n help = 'movie format = mp4')\n parser.add_argument('-gif',\n action='store_true',\n default = False, \n help = 'movie format = gif')\n parser.add_argument('-movie', default = False,\\\n action='store_true',\n help = 'Make a movie out of results')\n parser.add_argument('-tec', default = False, \\\n action='store_true',\n help = 'plot total electron content (TEC)')\n parser.add_argument('-rate', default =30,\\\n help = 'framerate for movie')\n parser.add_argument('filelist', nargs='+', \\\n help = 'list files to use for generating plots')\n \n args = parser.parse_args()\n\n return args\n\n\n## ----------------------------------------------------------------------------\n## Define the support routines\n#\n#def get_help(file_vars=None):\n# \"\"\" Provide string explaining how to run the command line interface\n#\n# Parameters\n# ----------\n# file_vars : list or NoneType\n# List of file variables or None to exclude this output (default=None)\n#\n# Returns\n# -------\n# help_str : str\n# String with formatted help statement\n#\n# \"\"\"\n#\n# mname = os.path.join(\n# os.path.commonpath([inputs.__file__, data_prep.__file__]),\n# 'run_plot_model_results.py') if __name__ == '__main__' else __name__\n#\n# help_str = 'Usage:\\n{:s} -[flags] [filenames]\\n'.format(mname)\n# help_str += 'Flags:\\n'\n# help_str += ' -help : print this message, include filename for '\n# help_str += 'variable names and indices\\n'\n# help_str += ' -var=number : index of variable to plot\\n'\n# help_str += ' -cut=alt, lat, or lon : which cut you would like\\n'\n# help_str += ' -alt=number : alt in km or grid number (closest)\\n'\n# help_str += ' -lat=number : latitude in degrees (closest)\\n'\n# help_str += ' -lon=number: longitude in degrees (closest)\\n'\n# help_str += ' -log : plot the log of the variable\\n'\n# help_str += ' -winds : overplot winds\\n'\n# help_str += ' -tec : plot the TEC variable\\n'\n# help_str += ' -movie=number : provide a positive frame rate to '\n# help_str += 'create a movie\\n'\n# help_str += ' -ext=str : figure or movie extension\\n'\n# help_str += 'At end, list the files you want to plot. This code should '\n# help_str += 'work with either GITM files (*.bin) or Aether netCDF files '\n# help_str += '(*.nc)'\n#\n# if file_vars is not None:\n# help_str += \"File Variables (index, name):\\n\"\n# for ivar, var in enumerate(file_vars):\n# help_str += \" ({:d}, {:s})\\n\".format(ivar, var)\n#\n# return help_str\n#\n#\n#def get_command_line_args(argv):\n# \"\"\" Parse the arguements and set to a dictionary\n#\n# Parameters\n# ----------\n# argv : list\n# List of arguments fed on the command line\n#\n# Returns\n# -------\n# args : dict\n# A dictionary containing information about arguements, including:\n# filelist (list of filenames), gitm (flag that is true for GITM input,\n# determined by examining filelist naming convention),\n# var (variable index to plot), cut (coordinate to hold constant),\n# diff (difference with other plots),\n# movie (framerate for movie, which is > 0 if a movie is desired),\n# ext (output extension), winds (flag to plot with winds),\n# alt (to plot), lat (to plot), lon (to plot),\n# log (flag to use log scale), and help (flag to display help)\n#\n# \"\"\"\n# # Initialize the arguments to their default values\n# args = {'filelist': [], 'log': False, 'var': 15, 'alt': 400, 'tec': False,\n# 'lon': np.nan, 'lat': np.nan, 'cut': 'alt', 'winds': False,\n# 'diff': False, 'IsGitm': False, 'HasHeader': False, 'movie': 0,\n# 'ext': 'png'}\n#\n# arg_type = {'filelist': list, 'log': bool, 'var': int, 'alt': int,\n# 'tec': bool,\n# 'lon': float, 'lat': float, 'cut': str, 'help': bool,\n# 'winds': bool, 'diff': bool, 'IsGitm': bool, 'HasHeader': bool,\n# 'tec': bool,\n# 'movie': int, 'ext': str}\n#\n# # If there is input, set default help to False\n# args['help'] = False if len(argv) > 0 else True\n#\n# # Cycle through all arguments except the first, saving input\n# for arg in argv:\n# # Treat the file list and formatting seperately\n# if arg.find('-') == 0:\n# # This is not a filename, remove the dash to get the key\n# split_arg = arg.split('=')\n# akey = split_arg[0][1:]\n#\n# # Get the argument value as the desired type\n# if akey not in arg_type.keys():\n# raise ValueError(''.join(['unknown command line input, ',\n# arg, ', try -help for details']))\n#\n# if len(split_arg) == 1:\n# if arg_type[akey] == bool:\n# arg_val = True\n# else:\n# raise ValueError('expected equality after flag {:}'.format(\n# akey))\n# else:\n# if arg_type[akey] == int:\n# arg_val = int(split_arg[1])\n# elif arg_type[akey] == float:\n# arg_val = float(split_arg[1])\n# elif arg_type[akey] == str:\n# arg_val = split_arg[1]\n# else:\n# # This is boolean input\n# arg_val = inputs.bool_string(split_arg[1])\n#\n# # Assign the output\n# if akey.find('tec') == 0:\n# args['var'] = 34\n# else:\n# args[akey] = arg_val\n# else:\n# # Save the filenames\n# args['filelist'].append(arg)\n#\n# m = re.match(r'(.*)bin',arg)\n# if m:\n# args['IsGitm'] = 1\n# args['HasHeader'] = 0\n# # check for a header file:\n# checkFile = glob(m.group(1)+\"header\")\n# if (len(checkFile) > 0):\n# if (len(checkFile[0]) > 1):\n# args['HasHeader'] = 1\n# else:\n# args['IsGitm'] = 0\n#\n# # Update default movie extention for POSIX systems\n# if args['movie'] > 0 and args['ext'] == 'png':\n# if (os.name == \"posix\"):\n# args['ext'] = \"mkv\"\n# else:\n# args['ext'] = \"mp4\"\n#\n# return args\n\n\ndef determine_file_type(file):\n\n IsGitm = False\n HasHeader = False\n m = re.match(r'(.*)bin', file)\n if m:\n IsGitm = True\n # check for a header file:\n checkFile = glob(m.group(1)+\"header\")\n if (len(checkFile) > 0):\n if (len(checkFile[0]) > 1):\n HasHeader = True\n\n return IsGitm, HasHeader\n\ndef fix_vars(vars):\n newvars = []\n for v in vars:\n nv = re.sub('!U', '', v)\n nv = re.sub('!N', '', nv)\n nv = re.sub('!D', '', nv)\n newvars.append(nv)\n\n return newvars\n\n# ----------------------------------------------------------------------------\n# Define the main plotting routine\n\ndef plot_model_results():\n\n # Get the input arguments\n args = get_args()\n\n # determine what kind of files we are dealing with\n IsGitm, HasHeader = determine_file_type(args.filelist[0])\n \n if ((IsGitm) and (not HasHeader)):\n header = read_routines.read_gitm_headers(args.filelist, finds = 0)\n else:\n if (HasHeader):\n header = read_routines.read_aether_ascii_header(args.filelist)\n IsGitm = 0\n else:\n header = read_routines.read_aether_header(args.filelist)\n\n header['vars'] = fix_vars(header['vars'])\n \n if (args.list):\n for k, v in header.items():\n if (k != 'vars'):\n print(k, '-> ', v)\n else:\n print('vars : ')\n for i, var in enumerate(v):\n print(i, var)\n exit()\n \n if (args.var >= len(header[\"vars\"])):\n raise ValueError(\"requested variable doesn't exist: {:d}>{:d}\".format(\n args.var, len(header[\"vars\"])))\n\n # Define the plotting inputs\n plot_vars = [0, 1, 2, args.var]\n\n # Update plotting variables to include the wind, if desired\n if args.winds:\n plot_vars.append(16 if args.cut in ['alt', 'lat'] else 17)\n plot_vars.append(18 if args.cut in ['lat', 'lon'] else 17)\n all_winds_x = []\n all_winds_y = []\n\n # Prepare to load the desired file data\n all_2dim_data = []\n all_times = []\n all_int_data = []\n\n for j, filename in enumerate(args.filelist):\n # Read in the data file\n if IsGitm:\n data = read_routines.read_gitm_file(filename, plot_vars)\n ivar = args.var\n else:\n if j == 0:\n var_list = []\n for pvar in plot_vars:\n var_list.append(header[\"vars\"][pvar])\n if (HasHeader):\n data = read_routines.read_aether_one_binary_file(header, j, plot_vars)\n ivar = args.var\n else:\n data = read_routines.read_aether_file(filename, var_list)\n ivar = 3\n\n # For the first file, initialize the necessary plotting data\n if j == 0:\n # Get 1D arrays for the coordinates\n alts = data[2][0][0] / 1000.0 # Convert from m to km\n lons = np.degrees(data[0][:, 0, 0]) # Convert from rad to deg\n lats = np.degrees(data[1][0, :, 0]) # Convert from rad to deg\n # Find the desired index to cut along to get a 2D slice\n isgrid = False\n if (args.cut == 'alt'):\n pos = args.alt\n if (len(alts) == 1):\n print(\"Only one alt found, setting alt pos = 0\");\n pos = 0\n isgrid = True\n lat2d = data[1][:, :, 0] # Convert from rad to deg\n dlon = data[0][1, 0, 0] - data[0][0, 0, 0]\n dlat = data[1][0, 1, 0] - data[1][0, 0, 0]\n area = np.cos(lat2d) * dlon * dlat *((6372.0 + 100.0)*1000.0)**2\n int_area = np.sum(area)\n if (args.cut == 'lon'):\n pos = args.lon\n if (args.cut == 'lat'):\n pos = args.lat\n \n icut, cut_data, x_pos, y_pos, z_val = data_prep.get_cut_index(\n lons, lats, alts, pos, isgrid, args.cut)\n\n if (args.cut == 'alt'):\n int_data = data[ivar][cut_data] * area\n if (args.mean):\n int_data = int_data / int_area\n all_int_data.append(np.sum(int_data))\n \n # Save the time data\n all_times.append(data[\"time\"])\n\n # Save the z-axis data\n if args.tec:\n all_2dim_data.append(data_prep.calc_tec(alts, data[ivar], 2, -4))\n else:\n all_2dim_data.append(data[ivar][cut_data])\n\n if (args.winds):\n all_winds_x.append(data[plot_vars[-1]][cut_data])\n all_winds_y.append(data[plot_vars[-1]][cut_data])\n\n # Convert data list to a numpy array\n all_2dim_data = np.array(all_2dim_data)\n \n if args.winds:\n all_winds_x = np.array(all_winds_x)\n all_winds_y = np.array(all_winds_y)\n\n # If desired, take the log of the data\n if args.alog:\n all_2dim_data = np.log10(all_2dim_data)\n\n # Define plotting limits\n symmetric = False\n cmap = mpl.cm.plasma\n \n maxi = all_2dim_data.max() * 1.01\n mini = all_2dim_data.min() * 0.99\n\n factorString = ''\n if ((mini < 0.0) and (not args.alog)):\n symmetric = True\n cmap = mpl.cm.bwr\n maxi = abs(all_2dim_data).max() * 1.05\n mini = -maxi\n else:\n if (not args.alog):\n if ((np.log10(maxi) > 5.0) or (np.log10(maxi) < -5.0)):\n factor = 10**float(int(np.log10(maxi)))\n all_2dim_data = all_2dim_data / factor\n maxi = maxi / factor\n mini = mini / factor\n factorString = '(x%7.1e)' % factor\n\n if args.cut == 'alt':\n\n mask_north = ((y_pos >= 40) & (y_pos <= 90.0))\n mask_south = ((y_pos <= -40) & (y_pos >= -90.0))\n plot_north = mask_north.max()\n plot_south = mask_south.max()\n\n if plot_north:\n if symmetric:\n maxi_north = abs(all_2dim_data[:, :, mask_north]).max() * 1.05\n mini_north = -maxi_north\n else:\n maxi_north = all_2dim_data[:, :, mask_north].max() * 1.05\n mini_north = all_2dim_data[:, :, mask_north].min() * 0.95\n\n if plot_south:\n if symmetric:\n maxi_south = abs(all_2dim_data[:, :, mask_south]).max() * 1.05\n mini_south = -maxi_south\n else:\n maxi_south = all_2dim_data[:, :, mask_south].max() * 1.05\n mini_south = all_2dim_data[:, :, mask_south].min() * 0.95\n\n # Define plot range\n minx = (x_pos[1] + x_pos[2]) / 2.0\n maxx = (x_pos[-2] + x_pos[-3]) / 2.0\n miny = (y_pos[1] + y_pos[2]) / 2.0\n maxy = (y_pos[-2] + y_pos[-3]) / 2.0\n\n # Prepare the output filename\n filename = \"var{:02d}_{:s}{:03d}\".format(args.var, args.cut, icut)\n\n if args.movie > 0:\n img_file_fmt = movie_routines.setup_movie_dir(filename)\n else:\n img_file_fmt = filename+'_{:}.'+args.ext\n\n if (args.timeplot):\n fig = plt.figure(figsize=(10, 8.5))\n ax = fig.add_subplot(111)\n ax.plot(all_times, all_int_data)\n\n start = all_times[0].strftime('%b %d, %Y %H:%M')\n end = all_times[-1].strftime('%b %d, %Y %H:%M')\n ax.set_xlabel(start + ' to ' + end)\n if (args.mean):\n type = 'mean'\n else:\n type = 'integral'\n ax.set_ylabel('Global '+type+' (' + header[\"vars\"][args.var] + ')')\n\n title = 'Global '+type+' of ' + header[\"vars\"][args.var]\n title = title + ' at {:.2f} km'.format(z_val)\n ax.set_title(title)\n \n stime = all_times[0].strftime('%y%m%d')\n fig.savefig(filename+'_'+stime+'.'+args.ext)\n exit()\n\n # Create a plot for each time\n for itime, utime in enumerate(all_times):\n # Initialize the figure\n fig = plt.figure(constrained_layout=False, figsize=(10, 8.5))\n\n gs1 = mpl.gridspec.GridSpec(nrows=2, ncols=2, wspace=0.0, hspace=0)\n gs = mpl.gridspec.GridSpec(nrows=2, ncols=2, wspace=-0.05,\n left=0.02, right=0.95,\n top = 0.99, bottom = 0.05)\n #ax = fig.add_subplot(gs1[1, 0:2])\n ax = fig.add_axes([0.07, 0.06, 0.98, 0.48])\n\n # Plot the global data set (square plot at bottom if three plots):\n\n dx = (x_pos[1] - x_pos[0])/2.0\n xp = np.append(x_pos - dx, x_pos[-1:]+dx)\n dy = (y_pos[1] - y_pos[0])/2.0\n yp = np.append(y_pos - dy, y_pos[-1]+dy)\n con = ax.pcolormesh(xp, yp, all_2dim_data[itime].transpose(),\n vmin=mini, vmax=maxi, cmap=cmap, shading='auto')\n\n # Add the winds, if desired\n if args.winds:\n ax.quiver(x_pos, y_pos, all_winds_x[itime].transpose(),\n all_winds_y[itime].transpose())\n ax.set_ylim([miny, maxy])\n ax.set_xlim([minx, maxx])\n\n # Set the labels and aspect ratio\n ax.set_title(\"{:s}; {:s}: {:.2f} {:s}\".format(\n utime.strftime(\"%d %b %Y %H:%M:%S UT\"), args.cut, z_val,\n 'km' if args.cut == 'alt' else r'$^\\circ$'))\n ax.set_xlabel(r'Latitude ($^\\circ$)' if args.cut == 'lon'\n else r'Longitude ($^\\circ$)')\n ax.set_ylabel(r'Latitude ($^\\circ$)' if args.cut == 'alt'\n else r'Altitude (km)')\n if args.cut == 'alt':\n ax.set_aspect(1.0)\n\n # Set the colorbar\n cbar = fig.colorbar(con, ax=ax, shrink=0.75, pad=0.02)\n cbar.set_label(header[\"vars\"][args.var] + factorString, rotation=90)\n\n # If this is an altitude slice, add polar dials\n if args.cut == 'alt' and (plot_north or plot_south):\n # Set the common inputs\n shift = time_conversion.calc_time_shift(utime)\n\n #xlabels = ['12', '18', '00']\n #xlabelpos = [np.pi/2, np.pi, 3*np.pi/2]\n xlabels = []\n xlabelpos = []\n ylabels = [r'80$^\\circ$', r'70$^\\circ$', r'60$^\\circ$',\n r'50$^\\circ$']\n\n ylabelpos = [10.0, 20.0, 30.0, 40.0]\n xticks = np.arange(0, 2 * np.pi, np.pi / 2.0)\n yticks = np.arange(10, 50, 10)\n\n if plot_north:\n # Top Left Graph Northern Hemisphere\n #ax2 = fig.add_subplot(gs[0, 0], projection='polar')\n ax2 = fig.add_axes([0.06, 0.55, 0.43, 0.43], projection='polar')\n yp = 90.0 - y_pos[mask_north]\n dy = (int(100.0*(yp[1]-yp[0]))/100.0)/2.0\n yp = np.append(yp - dy, yp[-1] + dy)\n xp = np.radians(x_pos + shift - 90.0)\n dx = (xp[1] - xp[0])/2\n xp = np.append(xp - dx, xp[-1] + dx)\n z = all_2dim_data[itime][:, mask_north].transpose()\n ax2.grid(False)\n conn = ax2.pcolormesh(xp, yp,\n z,\n shading = 'auto',\n vmin=mini_north, vmax=maxi_north,\n cmap=cmap)\n ax2.set_xticks(xlabelpos)\n ax2.set_xticklabels(xlabels)\n ax2.text(-np.pi/2, 45.0, '00 LT',\n verticalalignment='top',\n horizontalalignment='center')\n ax2.text(np.pi/2, 45.0, '12 LT',\n verticalalignment='bottom',\n horizontalalignment='center')\n ax2.text(-np.pi, 47.0, '18 LT',\n verticalalignment='center',\n horizontalalignment='center',\n rotation = 90)\n ax2.text(3*np.pi/4, 45.0, 'North',\n verticalalignment='bottom',\n horizontalalignment='center',\n rotation = 45)\n ax2.set_yticks(ylabelpos)\n ax2.set_yticklabels(ylabels)\n ax2.grid(linestyle=':', color='black')\n ax2.set_xticks(xticks)\n ax2.set_yticks(yticks)\n ax2.set_ylim([0, 45])\n cbar2 = fig.colorbar(conn, ax=ax2, shrink=0.5, pad=0.01)\n cbar2.set_label(header[\"vars\"][args.var] + factorString, rotation=90) \n\n if plot_south:\n # Top Right Graph Southern Hemisphere\n rad, theta = np.meshgrid(90.0 + y_pos[mask_south],\n np.radians(x_pos + shift - 90.0))\n #ax3 = fig.add_subplot(gs[0, 1], projection='polar')\n ax3 = fig.add_axes([0.54, 0.55, 0.43, 0.43], projection='polar')\n\n yp = 90.0 + y_pos[mask_south]\n dy = (int(100.0*(yp[1]-yp[0]))/100.0)/2.0\n yp = np.append(yp - dy, yp[-1] + dy)\n xp = np.radians(x_pos + shift - 90.0)\n dx = (xp[1]-xp[0])/2.0\n xp = np.append(xp - dx, xp[-1] + dx)\n z = all_2dim_data[itime][:, mask_south].transpose()\n ax3.grid(False)\n cons = ax3.pcolormesh(xp, yp, z,\n shading = 'auto',\n vmin=mini_south, vmax=maxi_south, cmap=cmap)\n ax3.set_xticks(xlabelpos)\n ax3.set_xticklabels(xlabels)\n ax3.text(-np.pi/2, 45.0, '00 LT',\n verticalalignment='top',\n horizontalalignment='center')\n ax3.text(np.pi/2, 45.0, '12 LT',\n verticalalignment='bottom',\n horizontalalignment='center')\n ax3.text(-np.pi, 47.0, '18 LT',\n verticalalignment='center',\n horizontalalignment='center',\n rotation = 90)\n ax3.text(3*np.pi/4, 45.0, 'South',\n verticalalignment='bottom',\n horizontalalignment='center',\n rotation = 45)\n ax3.set_yticks(ylabelpos)\n ax3.set_yticklabels(ylabels)\n ax3.grid(linestyle=':', color='black')\n ax3.set_xticks(xticks)\n ax3.set_yticks(yticks)\n ax3.set_ylim([0, 45])\n cbar3 = fig.colorbar(cons, ax=ax3, shrink=0.5, pad=0.01)\n cbar3.set_label(header[\"vars\"][args.var] + factorString, rotation=90) \n\n # Format the output filename\n if args.movie > 0:\n fmt_input = itime\n else:\n fmt_input = utime.strftime('%y%m%d_%H%M%S')\n outfile = img_file_fmt.format(fmt_input)\n\n # Save the output file\n print(\"Writing file : \", outfile)\n fig.savefig(outfile)\n plt.close(fig)\n\n # Create a movie, if desired\n if args.movie > 0:\n movie_routines.save_movie(filename, ext=args.ext,\n rate=args.rate)\n \n return\n\n\n# Needed to run main script as the default executable from the command line\nif __name__ == '__main__':\n plot_model_results()\n","repo_name":"aaronjridley/GITM","sub_path":"srcPython/run_plot_model_results.py","file_name":"run_plot_model_results.py","file_ext":"py","file_size_in_byte":24723,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"6504080026","text":"import adv_method\nimport model_loader\nimport user as user_module\nimport config as config_module\nimport utils as utils_module\nimport dataloader as dataloader_module\nimport os\nimport argparse\nimport yaml\nimport torch\n\ndef main():\n utils_module.setup_seed(2022)\n\n ## Required parameters for target model and hyper-parameter\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', \n default=None,\n help=\"The config parameter yaml file contains the parameters of the dataset, target model and attack method\",\n type=str)\n parser.add_argument(\"--device_id\", type=int, default=0)\n\n args = parser.parse_args()\n\n\n ## Universal parameters\n config = config_module.Config()\n\n if args.config:\n assert os.path.exists(args.config), \"There's no '\" + args.config + \"' file.\"\n with open(args.config, \"r\") as load_f:\n config_parameter = yaml.load(load_f)\n config.load_parameter(config_parameter)\n\n ## Save the parameters into log\n Log = config.log_output()\n\n ## Configure the GPU\n device = torch.device('cuda', args.device_id)\n\n ## Prepare the dataset\n idx2word, word2idx = utils_module.load_embedding_dict_info(config.Common['embedding_path'])\n adv_dataset = getattr(dataloader_module, config.CONFIG['dataset_name'])(**getattr(config, config.CONFIG['dataset_name']), word2id = word2idx)\n\n ## Prepare the target model\n model = getattr(model_loader, 'load_' + config.CONFIG['model_name'])(**getattr(config, config.CONFIG['model_name']))\n model.to(device)\n model.eval()\n\n ## Prepare the attack method\n attack_parameter = getattr(config, config.CONFIG['attack_name'])\n attack_name = config.CONFIG['attack_name']\n attack_method = getattr(adv_method, attack_name)(model, device, **attack_parameter)\n\n ## Prepare the attacker\n attacker = user_module.Attacker(model, config, attack_method)\n \n\n ## Start the attack\n log = attacker.start_attack(adv_dataset)\n Log.update(log)\n\n \n ## Save and print the Log\n print(config.Checkpoint['log_dir'], config.Checkpoint['log_filename'])\n utils_module.ensure_dir(config.Checkpoint['log_dir'])\n filename = os.path.join(config.Checkpoint['log_dir'], config.Checkpoint['log_filename'])\n f = open(filename,'w')\n\n for key, value in Log.items():\n if 'print' not in key:\n print(' {:15s}: {}'.format(str(key), value))\n log = {}\n log[key] = value\n utils_module.log_write(f, log)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JHL-HUST/SparseMA","sub_path":"attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14297771229","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 10 07:27:09 2020\n\n@author: mathvolcano\n\nDaily Temperatures\nhttps://leetcode.com/explore/learn/card/queue-stack/230/usage-stack/1363/\n\"\"\"\n\ndef dailyTemperatures(T):\n result = [0] * len(T)\n stack = [] #indexes from hottest to coldest\n for i in range(len(T) - 1, -1, -1):\n while stack and T[i] >= T[stack[-1]]:\n stack.pop() #remove lower and not soonest \n if stack:\n result[i] = stack[-1] - i\n stack.append(i)\n return result \n\nT = [73, 74, 75, 71, 69, 72, 76, 73]\n#Output [1, 1, 4, 2, 1, 1, 0, 0]\n","repo_name":"mathvolcano/leetcode","sub_path":"dailyTemperatures.py","file_name":"dailyTemperatures.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11998813775","text":"import json\n\n\nclass WithCache():\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.reset()\n\n def get_cache_key(self, key):\n return json.dumps(key)\n\n def cache_by(self, primary, secondary, method):\n cache = self._get_secondary(primary)\n secondary = self.get_cache_key(secondary)\n\n if secondary not in cache:\n cache[secondary] = method()\n\n return cache[secondary]\n\n def _get_secondary(self, primary):\n if not hasattr(self, '_cache'):\n self.reset()\n\n primary = self.get_cache_key(primary)\n\n if primary not in self._cache:\n self._cache[primary] = {}\n\n return self._cache[primary]\n\n async def cache_by_async(self, primary, secondary, method):\n cache = self._get_secondary(primary)\n secondary = self.get_cache_key(secondary)\n\n if secondary not in cache:\n cache[secondary] = await method()\n\n return cache[secondary]\n\n def reset(self):\n self._cache = {}\n","repo_name":"aleontiev/adbc","sub_path":"adbc/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"38547647729","text":"\"\"\"\n @Time : 1/12/21 18:41\n @Author : TaylorMei\n @Email : mhy666@mail.dlut.edu.cn\n \n @Project : iccv\n @File : infer.py\n @Function:\n \n\"\"\"\nimport numpy as np\nimport os\nimport time\nimport sys\nsys.path.append(\"..\")\n\nimport torch\nfrom PIL import Image\nfrom torch.autograd import Variable\nfrom torchvision import transforms\n\nfrom config import msd_testing_root\nfrom config import more_testing_root\nfrom misc import check_mkdir, crf_refine\n# from mirrornet_plus import MirrorNet_Plus\nfrom mirrornet_plus_gb import MirrorNet_Plus_GB\n# from mirrornet_plus_rb import MirrorNet_Plus_RB\n\ndevice_ids = [0]\ntorch.cuda.set_device(device_ids[0])\n\nckpt_path = './ckpt'\n# ckpt_path = './'\n# ckpt_path = '/media/iccd/disk1/tip_mirror_ckpt'\n# exp_name = 'MirrorNet_Plus_3'\n# exp_name = 'results'\nexp_name = 'MirrorNet_Plus_GB_1'\n# exp_name = 'MirrorNet_Plus_RB_2'\n# pth_name = 'epoch_190_ber_6.03693.pth'\n# pth_name = 'MirrorNet+.pth'\npth_name = 'epoch_200_ber_6.22.pth'\nargs = {\n 'snapshot': '200',\n 'scale': 384,\n 'crf': True\n}\n\nimg_transform = transforms.Compose([\n transforms.Resize((args['scale'], args['scale'])),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n])\n\nto_test = {'MSD': msd_testing_root}\n# to_test = {'more': more_testing_root}\n\nto_pil = transforms.ToPILImage()\n\n\ndef main():\n net = MirrorNet_Plus_GB().cuda(device_ids[0])\n\n if len(args['snapshot']) > 0:\n print('Load snapshot {} for testing'.format(args['snapshot']))\n net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, pth_name)))\n print('Load {} succeed!'.format(os.path.join(ckpt_path, exp_name, pth_name)))\n\n net.eval()\n with torch.no_grad():\n for name, root in to_test.items():\n img_list = [img_name for img_name in os.listdir(os.path.join(root, 'image'))]\n start = time.time()\n for idx, img_name in enumerate(img_list):\n print('predicting for {}: {:>4d} / {}'.format(name, idx + 1, len(img_list)))\n # check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (exp_name, pth_name[:-4]) + \"_crf\"))\n check_mkdir(os.path.join(ckpt_path, exp_name, pth_name[:-4]))\n img = Image.open(os.path.join(root, 'image', img_name))\n if img.mode != 'RGB':\n img = img.convert('RGB')\n print(\"{} is a gray image.\".format(name))\n w, h = img.size\n img_var = Variable(img_transform(img).unsqueeze(0)).cuda(device_ids[0])\n f_4, f_3, f_2, f_1 = net(img_var)\n f_4 = f_4.data.squeeze(0).cpu()\n f_3 = f_3.data.squeeze(0).cpu()\n f_2 = f_2.data.squeeze(0).cpu()\n f_1 = f_1.data.squeeze(0).cpu()\n f_4 = np.array(transforms.Resize((h, w))(to_pil(f_4)))\n f_3 = np.array(transforms.Resize((h, w))(to_pil(f_3)))\n f_2 = np.array(transforms.Resize((h, w))(to_pil(f_2)))\n f_1 = np.array(transforms.Resize((h, w))(to_pil(f_1)))\n if args['crf']:\n f_1 = crf_refine(np.array(img.convert('RGB')), f_1)\n\n Image.fromarray(f_1).save(\n # os.path.join(ckpt_path, exp_name, '%s_%s' % (exp_name, pth_name[:-4]) + \"_crf\",\n # img_name[:-4] + \".png\"))\n os.path.join(ckpt_path, exp_name, pth_name[:-4], img_name[:-4] + \".png\"))\n\n end = time.time()\n print(\"Average Time Is : {:.2f}\".format((end - start) / len(img_list)))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Mhaiyang/iccv","sub_path":"plus/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72618344195","text":"# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\nPROJECT_ROOT = BASE_DIR\nAXES_FAILURE_LIMIT=10\nCORS_ORIGIN_ALLOW_ALL = True\nDATABASES = {\n 'default': {\n # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'pwa_sc', # Or path to database file if using sqlite3./AP\n 'USER': 'postgres',\n 'PASSWORD': '',\n #'PASSWORD': 'Zz9~K*s:U6h5e+,',\n 'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n 'PORT': '', # Set to empty string for default.\n 'CONN_MAX_AGE': 600\n }\n}\n\nMP_SANDBOX = False\nMP_ACCESS_TOKEN = \"APP_USR-5854aaf5-0a77-476d-8059-63f24c12c60b\"\nMP_CLIENT_ID = \"6461526720216209\"\nMP_SECRET_KEY = \"otXARuI35i5ZcukURJZAAw9l5MCjjBh3\"\n\n\n\n# IOS\nIOS_PUSH_HEADERS = {\n \"Authorization\": \"key=AIzaSyB2pfw5MHTe3iajal6niPsKWaRruWuQooc\",\n \"content-Type\": \"application/json\"\n}\n\nANDROID_PUSH_HEADERS_TABLET = {\n \"Authorization\": \"key=AIzaSyCBhq_afMrGKAuPNrZ6t-FpgBlH1BPOeQM\", \n \"content-Type\": \"application/json\"\n}\n\n#===============================================================================\n# PDFKIT\n#===============================================================================\n\nWKHTMLTOPDF_PATH = '/usr/local/bin/wkhtmltopdf'\n\n#===============================================================================\n\nDESARROLLO = False# not DEBUG\n\nSITE_ID = 4 \n\nADMINS = [('Developer', 'developer@liricus.com.ar')]\nDEBUG = True\nALLOWED_HOSTS = ['*','api.jugaya.com', 'admin.sc.loteriamovil.com.ar','localhost']\n\n\"\"\"LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': 'A24 %(levelname)s [%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n 'console':{\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n },\n 'logfile': {\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': 'agencia24/error.log'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['logfile', 'mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'agencia24_default': {\n 'handlers': ['logfile', 'console', 'mail_admins'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n }\n}\"\"\"\n\nPASSWORD_MIN_LENGTH = 4\n\nPASSWORD_COMPLEXITY = { # You can omit any or all of these for no limit for that particular set\n \"UPPER\": 0, # Uppercase\n \"LOWER\": 0, # Lowercase\n \"LETTERS\": 0, # Either uppercase or lowercase letters\n \"DIGITS\": 0, # Digits\n \"SPECIAL\": 0, # Not alphanumeric, space or punctuation character\n \"WORDS\": 0 # Words (alphanumeric sequences separated by a whitespace or punctuation character)\n}\n\nAPP_CODE=\"PWA_SC\"\nURL_DOMAIN=\"http://api.aikejugar.com.ar/\"\n#URL_DOMAIN=\"http://13.58.40.234:8090/\"\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_ROOT, \"templates\"),\n \"/home/ubuntu/projects/loteriamovil/src/agencia24/bet/templates\",\n ],\n 'OPTIONS': {\n 'context_processors': [\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'bet.context_processors.debug',\n ],\n 'loaders': [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n ],\n #'debug': False,\n },\n },\n]\n\nEMAIL_HOST_USER = \"contacto_aikejugar\"\nEMAIL_HOST_PASSWORD = \"aikejugar100\"\nEMAIL_HOST = 'smtp.webfaction.com'\nEMAIL_PORT = 587\nEMAIL_USE_TLS = True\n\nDEFAULT_FROM_EMAIL = '(AikeJugar) '\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\n\nREQUIRE_UNIQUE_EMAIL = False\n\n\n\n","repo_name":"jmluque72/telebingo_api","sub_path":"agencia24/local_settings_pwa_sc.py","file_name":"local_settings_pwa_sc.py","file_ext":"py","file_size_in_byte":5001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39098578686","text":"\"\"\"\nнайти 2 числа в сумме дающие k\n\"\"\"\n\nk = 9\nnumbers = [-1, 2, 3, 7, 10]\n\n\ndef sum_of_two(num: list, find: int):\n for n in numbers:\n x = k - n\n if x in numbers:\n nn = numbers.index(x)\n if numbers[nn] == n and numbers.count(numbers[nn]) == 1:\n continue\n else:\n return n, numbers[nn]\n else:\n continue\n\n\nanswer = sum_of_two(numbers, k)\nprint(answer)\n","repo_name":"osinin/leetcode","sub_path":"LeetCode/n_m_k.py","file_name":"n_m_k.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32441866956","text":"from pynput.keyboard import Key, Controller\nimport pyautogui\nimport webbrowser\nimport subprocess\nimport time\nimport os\n\ndef main():\n chrome_path = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s'\n webbrowser.open('https://github.com/login')\n f = open(\"C:/Users/Admin/source/repos/ProjectAutomation/Debug/date.txt\",\"r\")\n screenWidth, screenHeight = pyautogui.size()\n currentMouseX, currentMouseY = pyautogui.position()\n keyboard = Controller()\n time.sleep(5);\n\n #Username Button\n pyautogui.moveTo(1000, 310)\n time.sleep(2)\n pyautogui.click()\n username = f.readline()\n keyboard.type(username)\n\n #Password Button\n time.sleep(3)\n pyautogui.moveTo(1000, 470)\n pyautogui.click()\n keyboard.type(f.readline())\n time.sleep(2)\n\n #Create Git\n webbrowser.open('https://github.com/new')\n time.sleep(5)\n pyautogui.moveTo(900, 360)\n pyautogui.click()\n time.sleep(4)\n folder = f.readline()\n print(folder)\n keyboard.type(folder)\n time.sleep(4)\n pyautogui.moveTo(900, 485)\n pyautogui.click()\n keyboard.type(f.readline())\n\n\n #Git Init\n time.sleep(6)\n file = \"\"\n real = \"\"\n for i in range(len(folder)):\n if(folder[i] != '\\n'):\n file = file + folder[i]\n for i in range(len(username)):\n if(username[i] != '\\n'):\n real = real + username[i]\n print(os.system(\"mkdir \" + file))\n fo = open(\"data.sh\", \"w+\")\n fo.write(\"mkdir \" + file + \" \\n\")\n fo.write(\"cd \" + file + \"/ \\n\")\n fo.write(\"touch README.md \\n\")\n fo.write(\"echo \\\"# \" + file + \"\\\" >> README.md \\n\")\n fo.write(\"git init \\n\")\n fo.write(\"git add README.md \\n\")\n fo.write(\"git commit -m \\\"first commit\\\" \\n\")\n fo.write(\"git remote add origin https://github.com/\" + real + \"/\" + file + \".git \\n\")\n fo.write(\"git push -u origin master\")\n subprocess.call('file.py', shell=True) \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DavidCurca/Project-Automation","sub_path":"ProjectAutomation/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25240481476","text":"import sqlite3\n\nregions = [(\"021\", \"上海\"), (\"022\", \"天津\"), (\"023\", \"重庆\"), (\"024\", \"沈阳\")]\n# 打开SQLite数据库 E:\\software\\SQLiteStudio\\database\\test.db\ncon = sqlite3.connect(r\"E:\\software\\SQLiteStudio\\database\\test.db\")\n# 使用不同的方法分别插入一行数据\ncon.execute(\"insert into region(id, name) values ('020','广东')\")\ncon.execute(\"insert into region(id, name) values (?,?)\", ('001', '北京'))\n# 插入多行数据\ncon.executemany(\"insert into region(id, name) values (?,?)\", regions)\n# 修改一行数据\ncon.execute(\"update region set name = ? where id = ?\", ('广州', '020'))\n# 删除一行数据\nn = con.execute(\"delete from region where id = ?\", ('024', ))\nprint(\"删除了%d行数据\" % n.rowcount)\ncon.commit() # 提交\ncon.close() # 关闭数据库\n","repo_name":"Oxidaner/python-exercises-answers","sub_path":"myclass/17.0/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31906282577","text":"\"\"\"\n Draw Spirograph using Turtle Package.\n\"\"\"\nfrom turtle import Turtle, Screen\n\n# Initialize turtle object to perform actions.\nturtle = Turtle()\n# Initialize the screen object to control the screen.\nscreen = Screen()\n\n# Create Spirograph using Turtle\nturtle.speed(\"fastest\")\n\nfor _ in range(int(360/10)):\n turtle.circle(100)\n turtle.setheading(turtle.heading()+10)\n","repo_name":"Reshma-shaik/Python","sub_path":"draw_spirograph.py","file_name":"draw_spirograph.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23568296771","text":"import sys\r\n\r\ndef stall(string):\r\n n, k = [int(x) for x in string.split()]\r\n n = n - 1 #for 0 indexed length\r\n #for each empty stall s, compute ls and rs empty stalls left and right\r\n ls = int(n/2)\r\n rs = n - ls\r\n #print(ls, rs)\r\n splits = [ls,rs]\r\n for i in range(1, k):\r\n #get index of next stall\r\n ind = splits.index(max(splits))\r\n #get next split and place\r\n n = splits.pop(ind) - 1\r\n ls = int(n/2)\r\n rs = n - ls\r\n splits.insert(ind, ls)\r\n splits.insert(ind+1, rs)\r\n if ls < 0: ls = 0\r\n if rs < 0: rs = 0\r\n return str(max(ls,rs)) + \" \" + str(min(ls,rs))\r\n #find max(min(ls,rs)) -> fill\r\n #if multiple, find max(max(ls,rs)) -> fill\r\n #if multiple, choose leftmost\r\n #return last max(ls,rs) and min(ls,rs)\r\n\r\ndef main():\r\n with open(sys.argv[1], 'r') as infile:\r\n for i, line in enumerate(infile):\r\n if i == 0:\r\n continue\r\n print(\"Case #\" + str(i) + \": \" + str(stall(line.strip())) )\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1757.py","file_name":"1757.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8821595595","text":"\"\"\"\nFrom https://similarapi.appspot.com/allLibPair.html\n\nUnused Code\n\"\"\"\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass PPair:\n \"\"\"Project Pair\"\"\"\n proj1: str\n proj2: str\n proj1_subpath: str = None\n proj2_subpath: str = None\n\n\nsimilar_api_pairs = [\n PPair(\"spring-projects/spring-security-oauth\", \"thymeleaf/thymeleaf\"), # thymeleaf?\n PPair(\"easymock/easymock\", \"mockito/mockito\"),\n PPair(\"jmock-developers/jmock-library\", \"powermock/powermock\"),\n PPair(\"sgothel/jogl\", \"LWJGL/lwjgl\"),\n PPair(\"spring-projects/spring-ldap\", \"pingidentity/ldapsdk\"),\n PPair(\"hvtuananh/lingpipe\", \"apache/opennlp\"),\n PPair(\"gephi/gephi\", \"jrtom/jung\"),\n PPair(\"apache/mina\", \"jpcap/jpcap\"),\n PPair(\"halfhp/androidplot\", \"julienchastang/charts4j\"),\n # PPair(\"apache/lucene-solr\", \"\"), # ?? lucene, solr\n # PPair(\"\", \"\"), ?? awt, swing\n # java-3d, jogl\n # lingpipe, stanford-nlp\n # hamcrest, jmockit\n PPair(\"Netflix/astyanax\", \"datastax/java-driver\"),\n]","repo_name":"saratavakoli77/codesum_replication","sub_path":"paper source code/affinity_data/similar_api_project_list.py","file_name":"similar_api_project_list.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22005600453","text":"'''\n8 9 10\n'''\nfrom collections import deque\n\nans = []\ndef bfs(x, y):\n Q.append([x, y])\n while Q:\n x, y = Q.popleft()\n z = C - x - y\n if visit[x][y] != 0: continue\n visit[x][y] = 1\n if x == 0:\n ans.append(z)\n if y < B:\n water = min(x, B-y)\n newx = x - water\n newy = y + water\n Q.append([newx, newy])\n if z < C:\n water = min(x, C-z)\n newx = x - water\n Q.append([newx, y])\n if x < A:\n water = min(y, A-x)\n newy = y - water\n newx = x + water\n Q.append([newx, newy])\n if z < C:\n water = min(y, C-z)\n newy = y - water\n Q.append([x, newy])\n if x < A:\n water = min(z, A-x)\n newx = x + water\n Q.append([newx, y])\n if y < B:\n water = min(z, B-y)\n newy = y + water\n Q.append([x, newy])\n\nA, B, C = map(int, input().split())\nvisit = [[0] * (B + 1) for _ in range(A + 1)]\nQ = deque()\nbfs(0, 0)\nresult = sorted(ans)\n\nfor i in range(len(result)):\n print(result[i], end=\" \")\n","repo_name":"vreez/APS","sub_path":"boj/boj_2251.py","file_name":"boj_2251.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31249153301","text":"################################\n#\n# break / continue\n#\n################################\n\nimport modules.utils as u\nfrom datetime import date, timedelta\nfrom modules.money import money\n\n\nu.clear()\n\ntoday = date.today()\ntomorrow = timedelta( days=1 )\nproducts = [\n { 'sku': 1, 'expiration_date': today, 'price': 100.0 },\n { 'sku': 2, 'expiration_date': tomorrow, 'price': 50.0 },\n { 'sku': 3, 'expiration_date': today, 'price': 20 },\n]\n\n\nu.banner(\"\"\"\nIf the expiration date is not today,\nskip the item using 'continue', else\nprint the item\n\"\"\")\n###########################################\n\nu.header('Today\\'s specials...')\n\nfor product in products:\n if product['expiration_date'] != today:\n continue\n product['price'] *= 0.8 # 20% discount\n print(\n 'price for sku {}'.format(product['sku']),\n 'is now {}'.format(money(product['price']))\n )\n\n\nu.banner(\"\"\"Flip it to tomorrow\"\"\")\n###########################################\n\nu.header('Tomorrow\\'s specials...')\n\nfor product in products:\n if product['expiration_date'] != tomorrow:\n continue\n product['price'] *= 0.8 # 20% discount\n print(\n 'price for sku {}'.format(product['sku']),\n 'is now {}'.format(money(product['price']))\n )\n\n\nu.banner(\"\"\"Note that you can make a custom\nerror using else right after a for \nloop. This seems to be unique to the \nPython language. Below, an error \nwill be thrown if no driver is \nfound.\"\"\")\n###########################################\n\nclass DriverNotFound(Exception):\n pass\n\npeople = [\n ('Tom', 16),\n ('Beth', 17),\n ('Rob', 13),\n ('Missy', 14),\n]\ndriver_age = 17\n\nfor person, age in people:\n if age >= driver_age:\n driver = (person, age)\n print(driver)\n break\nelse:\n raise DriverNotFound('No driver old enough was found.')\n\n\nu.banner(\"\"\"Example of prime number printer\nusing break\"\"\")\n###########################################\n\nprimes = []\nupto = 100\n\nfor n in range(2, upto + 1):\n is_prime = True\n for divisor in range(2, n):\n if n % divisor == 0:\n is_prime = False\n break\n if is_prime:\n primes.append(n)\n\nprint(primes)\n\n\nu.banner(\"\"\"Simplified prime number printer,\nremoving superfluous code\"\"\")\n###########################################\n\nprimes = []\nupto = 100\nfor n in range(2, upto+1):\n for divisor in range(2, n):\n if n % divisor == 0:\n break\n else:\n primes.append(n)\nprint(primes)\n","repo_name":"rob-kistner/udemy-python-masterclass","sub_path":"examples/break_continue.py","file_name":"break_continue.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27689371264","text":"from skimage import io, measure, feature, transform\nfrom matplotlib import pylab as plt\nfrom scipy import ndimage as ndi\nfrom skimage.feature import corner_harris, corner_subpix, corner_peaks\nimport numpy as np\nfrom typing import List\nimport ramda as R\nimport math\nimport copy\n\nio.use_plugin('matplotlib')\n\n\ndef break_edges(edges: np.ndarray):\n left = np.zeros_like(np.arange(edges.shape[0]))\n right = np.zeros_like(np.arange(edges.shape[0]))\n top = np.zeros_like(np.arange(edges.shape[1]))\n bottom = np.zeros_like(np.arange(edges.shape[1]))\n print(edges.shape, left.shape, right.shape)\n for i in range(edges.shape[0]):\n for j in range(edges.shape[1]):\n if edges[i][j]:\n left[i] = j\n break\n else:\n left[i] = -1\n right[i] = -1\n if right[i] == 0:\n for j in range(edges.shape[1] - 1, -1, -1):\n if edges[i][j]:\n right[i] = j\n break\n\n for j in range(edges.shape[1]):\n for i in range(edges.shape[0]):\n if edges[i][j]:\n top[j] = i\n break\n else:\n top[j] = -1\n bottom[j] = -1\n if bottom[j] == 0:\n for i in range(edges.shape[0] - 1, -1, -1):\n if edges[i][j]:\n bottom[j] = i\n break\n top_left = []\n top_right = []\n bot_left = []\n bot_right = []\n for i in range(edges.shape[0]):\n if left[i] >= 0:\n l = left[i]\n if top[l] == i:\n top_left.append([i, l])\n if bottom[l] == i:\n bot_left.append([i, l])\n if right[i] >= 0:\n l = right[i]\n if top[l] == i:\n top_right.append([i, l])\n if bottom[l] == i:\n bot_right.append([i, l])\n return [top_left, top_right, bot_right, bot_left]\n\n\ndef edges_to_functions(edges):\n result = []\n for edge in edges:\n # z założenia są monotoniczne\n min_e = R.reduce(R.min_by(lambda x: x[0]), [1e9, 0], edge)\n max_e = R.reduce(R.max_by(lambda x: x[0]), [0, 0], edge)\n a = (max_e[1] - min_e[1]) / (max_e[0] - min_e[0])\n b = min_e[1] - a * min_e[0]\n result.append([a, b])\n return result\n\n\ndef blad_srednikwadratowy(edges, functions):\n result = []\n for edge, wspolczynniki in zip(edges, functions):\n f = lambda x: wspolczynniki[0] * x + wspolczynniki[1]\n errors = R.map(lambda x: (f(x[0]) - x[1]) ** 2)(edge[1:-1])\n result.append(0 if len(errors)<1 else sum(errors) / len(errors))\n return result\n\n\ndef angles_between_functions(functions):\n for i, [a1, b1] in enumerate(functions):\n print(i)\n\n for j, [a2, b2] in enumerate(functions):\n if i == j: continue\n tan = (a1 - a2) / (1 + a1 * a2)\n print(j, tan, math.degrees(math.atan(tan)))\n\n\ndef angles_with_neighbouring_functions(functions):\n results = []\n for i, [a1, b1] in enumerate(functions):\n print(i)\n r = []\n for j in [(i - 1) % 4, (i + 1) % 4]:\n a2 = functions[j][0]\n tan = (a1 - a2) / (1 + a1 * a2)\n r.append(abs(math.degrees(math.atan(tan))))\n results.append(r)\n return results\n\n\ndef list_to_display(shape, my_list):\n new_boi = np.full(shape, False)\n for i in my_list:\n new_boi[i[0]][i[1]] = True\n return new_boi\n\n\nif __name__ == \"__main__\":\n checkpoint = []\n contours = []\n how_many_in_folder = [6, 20, 20, 20, 20, 200, 20, 100]\n for set_nr in range(9):\n for img_nr in range(how_many_in_folder[set_nr]):\n # nazwa_pliku = \"set{}/{}.png\".format(set_nr, img_nr)\n # Problemy z: (7,15)\n nazwa_pliku = \"set{}/{}.png\".format(set_nr, img_nr)\n print(nazwa_pliku)\n im = io.imread(nazwa_pliku)\n # im2 = copy.deepcopy(im)\n edges1 = feature.canny(im)\n edges = break_edges(edges1)\n functions = edges_to_functions(edges)\n errors = blad_srednikwadratowy(edges, functions)\n print(errors)\n # angles_between_functions(functions)\n\n neighbour_angles = angles_with_neighbouring_functions(functions)\n print(neighbour_angles)\n \n neighbour_angles_sum =R.map(sum, neighbour_angles)\n print(neighbour_angles_sum)\n\n max_index = lambda my_list: R.reduce(lambda acc, x: acc if acc[1] >= x[1] else x, [0, 0], R.zip(range(len(my_list)), my_list))\n e = max_index(errors)\n print(e)\n a = max_index(neighbour_angles_sum)\n print(a)\n\n #wg błędów\n index_podstawy = (e[0] + 2) % 4\n print('wg błędów', index_podstawy, functions[index_podstawy])\n [a1, _] = functions[index_podstawy]\n\n #wg kątów\n index_podstawy2 = a[0]\n print('wg kątów',index_podstawy2, functions[index_podstawy2])\n [a2, _] = functions[index_podstawy2]\n\n im2 = transform.rotate(im, 90 - math.degrees(math.atan(a1)))\n im3 = transform.rotate(im, 90 - math.degrees(math.atan(a2)))\n memes = []\n for i in edges:\n # print(i)\n memes.append(list_to_display(im.shape, i))\n\n row_count = 2 + len(memes) + 2\n fig, axes = plt.subplots(nrows=row_count, ncols=1, figsize=(3, 3 * row_count),\n sharex=True, sharey=True)\n\n axes[0].imshow(im, cmap=plt.cm.gray)\n axes[0].axis('off')\n # axes[0].set_title('noisy image', fontsize=20)\n\n axes[1].imshow(edges1, cmap=plt.cm.gray)\n axes[1].axis('off')\n\n for i, m in enumerate(memes):\n axes[2 + i].imshow(m, cmap=plt.cm.gray)\n axes[2 + i].axis('off')\n\n axes[6].imshow(im2, cmap=plt.cm.gray)\n axes[6].axis('off')\n axes[7].imshow(im3, cmap=plt.cm.gray)\n axes[7].axis('off')\n fig.tight_layout()\n\n plt.show()\n # break\n # break\n if set_nr>=0:\n break","repo_name":"TheCommonCold/PIRO1","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32164522520","text":"n = int(input())\n\ndp = [0]*(n+2)\n\ndp[1] = 1\ndp[2] = 2\n\nfor i in range(3, n+2):\n if dp[i] > 0:\n continue\n else:\n dp[i] = dp[i-1] + dp[i-2]\n\nprint(dp[-1])\n\n\n# print(dp)\n","repo_name":"HaJunYoo/Algorithm_Study","sub_path":"DP/Inflearn/돌다리_건너기(bottom-up).py","file_name":"돌다리_건너기(bottom-up).py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3474787504","text":"from datetime import datetime\nimport numpy as np\nimport torch\nfrom tqdm.auto import tqdm\n\n\nclass EarlyStopping:\n def __init__(self, patience=1, min_delta=0):\n self.patience = patience\n self.min_delta = min_delta\n self.counter = 0\n self.min_validation_loss = np.inf\n\n def early_stop(self, validation_loss):\n if validation_loss < self.min_validation_loss:\n self.min_validation_loss = validation_loss\n self.counter = 0\n elif validation_loss > (self.min_validation_loss + self.min_delta):\n self.counter += 1\n if self.counter >= self.patience:\n return True\n return False\n\n\ndef batch_gd(model, criterion, optimizer, train_loader, test_loader, early_stopping:bool = False, \\\n early_stopper:EarlyStopping = None, epochs: int = 10):\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model.to(device)\n\n train_losses, test_losses = np.zeros(epochs), np.zeros(epochs)\n\n for it in range(epochs): # iterate over epochs\n\n # ----- Training -----\n\n t0 = datetime.now()\n model.train()\n train_loss = []\n\n for idx, data in tqdm(enumerate(train_loader), total=len(train_loader)): # iterate over batches\n inputs, targets = data\n inputs, targets = inputs.to(device), targets.to(device) # convert the targets to the new classes\n optimizer.zero_grad() # reset the optimizer gradient between steps\n \n # forward pass\n outputs = model(inputs) \n loss = criterion(outputs, targets)\n\n # backward pass\n loss.backward() # compute the gradient\n optimizer.step() # perform a step of gradient descent\n\n train_loss.append(loss.item())\n train_loss = np.mean(train_loss)\n \n\n # ----- Eval -----\n\n model.eval()\n test_loss = []\n\n for idx, data in tqdm(enumerate(test_loader), total=len(test_loader)): # iterate over batches\n inputs, targets = data # iterate over batches\n inputs, targets = inputs.to(\n device), targets.to(device) # convert the targets to the new classes\n \n # forward pass\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n test_loss.append(loss.item())\n test_loss = np.mean(test_loss)\n train_losses[it], test_losses[it] = train_loss, test_loss\n dt = datetime.now() - t0\n\n if early_stopping:\n if early_stopper.early_stop(test_loss):\n print(f'Stopped at epoch: {it+1} with Train Loss : {train_loss:.4f}, Test Loss : {test_loss:.4f}')\n return train_losses, test_losses\n \n print(f'Epoch {it+1} / {epochs}: Train Loss : {train_loss:.4f}, Test Loss : {test_loss:.4f}, duration: {dt}')\n\n return train_losses, test_losses","repo_name":"RPegoud/PyTorch_traffic_sign_detection","sub_path":"package/batch_gd.py","file_name":"batch_gd.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72948166273","text":"from point import Point\n\nHARD_COLORS = {\"WHITE\": [255, 255, 255, 255], \"RED\": [255, 0, 0, 255], \"GREEN\": [0, 255, 0, 255], \"BLUE\": [0, 0, 255, 255], \"YELLOW\": [255, 255, 0, 255], \"NONE\": [0, 0, 0, 0]}\n \n#COLOR_WHITE = [255, 255, 255, 255]\n#COLOR_RED = [255, 0, 0, 255]\n#COLOR_GREEN = [0, 255, 0, 255]\n#COLOR_BLUE = [0, 0, 255, 255]\n#COLOR_YELLOW = [255, 255, 0, 255]\n#COLOR_NONE = [0, 0, 0, 0]\n\n\n# Because the game is grid-like, there are only four directions\n# Not currently used, I guess it's more of a guideline for\n# how all direction formats should be named and ordered (like in dictionaries)\n# {\"TOP\": Exit(), \"BOTTOM\": Exit(), \"LEFT\": Exit(), \"RIGHT\": Exit()}\nDIRECTIONS = [\"TOP\", \"BOTTOM\", \"LEFT\", \"RIGHT\"]\nROTATION = [0, 90, 180, 270]\n\n# Window Size\nWINDOW_MAX_X = 900\nUI_Y_POS = 100 # The actual Scene is between 100 - 900\nWINDOW_MAX_Y = 600\n\n# The frame at which the animation frame is updated\nUPDATE_FRAME = 5\n\n## SIZE OF ACTORS/FONT IN PIXELS ##\nFONT_SIZE = 30\nCOUNTER_SIZE = FONT_SIZE\n\n# To account for extra blank pixels in the actor\n# Note: The Player is 24 pixels/ Enemy 32 pixels, \n# but the sprites are upscaled up to be printed at this size\nACTOR_WIDTH = 80\nACTOR_HEIGHT = ACTOR_WIDTH\n# int(ACTOR_WIDTH * 1.5)\nACTOR_SCALE = 1.25\nENEMY_SCALE = ACTOR_SCALE + .5\n\nPICKUP_SIZE = 50\n\n# Actor Names (VERY HARDCODED AND WEIRD)\nPLAYER_NAME = \"Player\"\nPLAYER_SPAWN = Point(300, 325)\n#Point(450, 300)\n\n## RELATIVE FILEPATHS FOR IMAGES ##\n\n# NOTE: An idea I had, but not implemented\n#SPRITE_SOURCE = \"Astronaut\\\\\"\n#RUNNING = \"Astronaut_Run\"\n#IDLE = \"Astronaut_Idle\"\n#IMAGE_FILETYPE = \".png\"\n\nBLANK_ICON = \"blank.png\"\nGEM_ICON = \"OtherSprites\\\\Diamond.png\"\nBULLET_ICON = \"OtherSprites\\\\EnergyPack.png\"\nHEALTH_ICON = \"OtherSprites\\\\Heart.png\"\nLIFE_ICON = \"OtherSprites\\\\LivesCounter.png\"\nKEY_ICON = \"OtherSprites\\\\key.png\"\nSPACESHIP_ICON = \"SmallDriller.png\"\n#\"8-bit-space-ship.png\"\nROCK_BLACK = \"Rock\\\\rock_black.png\"\nROCK_BLUE = \"Rock\\\\rock_blue.png\"\n\nBOSS_BG = \"possible_boss_fight_background.png\"\n\nGAME_TITLE = \"Astronaut Adventure\"\n\n# When the Collision Actor is flung by a collision, how long until it can change its velocity\nFREEZE_TIME = 15\n\nAGGRO = \"\"\n\n# Actor Constants\nSTEP_SIZE = 5\nCOLOR_TIMER_MAX = 2\nINVULNERABLE_TIMER = 15\n\n# Player Constants\nSTARTING_LIVES = 1 #3\nPLAYER_HP = 25\nSTARTING_SHOTS = 75\n\n# Boss Constants\nBOSS_NAME = \"Boss\"\nBOSS_HP = 40\nBOSS_ATTACK = 10\n\nENEMY_NAME = \"Enemy\"\n\nBOSS_KEY_NAME = \"Boss_key\"\nHEALTH_NAME = \"Health\"\nBULLET_NAME = \"Bullet\"\nGEM_NAME = \"Gem\"\nLIFE_NAME = \"Life\"\n\n# How far the bullet spawns from the Player\nBULLET_PADDING = 100\n# Speed of the bullet\nBULLET_SPEED = 5\n\n# Game Over display\nGAME_OVER_SIZE = FONT_SIZE * 2\nBUTTON_SIZE = int(FONT_SIZE * 1.5)\n# Replay buttons\nBUTTON_PADDING = 0\nBUTTON_COLOR = \"GREEN\"\nBUTTON_TEXT_COLOR = \"WHITE\"","repo_name":"Galaticash/CSE210-11","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11486750758","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/118667\n\ndef solution(queue1, queue2):\n sum1, sum2 = sum(queue1), sum(queue2)\n queue_all = queue1 + queue2\n \n idx1_start, idx1_end = 0, len(queue1)-1\n idx2_start, idx2_end = len(queue1), len(queue_all)-1\n \n answer = 0\n while answer < 2 * len(queue_all):\n if sum1 == sum2:\n return answer\n \n if sum1 > sum2:\n x = queue_all[idx1_start]\n sum1, sum2 = sum1-x, sum2+x\n idx2_end = idx2_end+1 if idx2_end < len(queue_all)-1 else 0\n idx1_start = idx1_start+1 if idx1_start < len(queue_all)-1 else 0\n \n elif sum1 < sum2:\n x = queue_all[idx2_start]\n sum1, sum2 = sum1+x, sum2-x\n idx1_end = idx1_end+1 if idx1_end < len(queue_all)-1 else 0\n idx2_start = idx2_start+1 if idx2_start < len(queue_all)-1 else 0\n \n answer += 1\n \n return -1\n","repo_name":"treejw/python-for-coding-test","sub_path":"2022_KAKAO_TECH_INTERNSHIP/sum_two_queues.py","file_name":"sum_two_queues.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14788612907","text":"from gensim.models import Word2Vec\nfrom sklearn.cross_validation import train_test_split\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers import TimeDistributed\nimport pickle\nfrom keras.layers import Bidirectional\nimport os\nfrom sklearn.metrics import f1_score, classification_report\n\n\nN_SENTENCES = 42255\nMAX_SENTENCE_LENGTH = 11\nEMBEDDING_VECTOR_DIM = 400\nVOCABULARY_SIZE = 7679 #?????\n\nfrom nltk.tokenize import TweetTokenizer\nimport pymorphy2\nimport nltk.data\n\ntwtk = TweetTokenizer()\ntokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\nmorph = pymorphy2.MorphAnalyzer(lang='uk')\n\n\n\nprint(\"Loading word vectors...\")\nw2v_model = Word2Vec.load(\"../vectors/300features_20minwords_10context\")\nvectors = w2v_model.wv\ntoponims = set()\n\n\nwith open(\"../dictionaries/toponims.txt\", \"r\") as file:\n toponims.update([w.strip() for w in file.readlines()])\n\n\n\nto_open = [\"/home/dzvinka/PycharmProjects/eleks_ds/data/VelikaIstoriyaYkrajni_1412180965.txt\"]\nsentences = []\n#processed_sentences = []\n\nfor file in os.listdir(\"/home/dzvinka/PycharmProjects/eleks_ds/data/lang-uk-data/data\"):\n if file.endswith(\".txt\"):\n to_open.append(os.path.join(\"../data/lang-uk-data/data/\", file))\n\n\nfor filename in to_open:\n with open(filename, \"r\") as file:\n data = file.read()\n sentences.extend(tokenizer.tokenize(data))\n\nX = np.zeros((N_SENTENCES, MAX_SENTENCE_LENGTH, EMBEDDING_VECTOR_DIM))\nY = []\n#Y = np.zeros((N_SENTENCES, MAX_SENTENCE_LENGTH, 1))\n# embedding_matrix = np.zeros((VOCABULARY_SIZE, EMBEDDING_VECTOR_DIM))\n#\n# i = 0\n# for word in vectors.vocab:\n# embedding_matrix[i] = vectors[word]\n# i += 1\n#\nfor i in range(len(sentences)):\n words = [morph.parse(word)[0].normal_form for word in twtk.tokenize(sentences[i]) if word.isalpha()]\n labels = [0] * MAX_SENTENCE_LENGTH\n j = 0\n for word in words:\n if j >= MAX_SENTENCE_LENGTH:\n break\n if word in vectors.vocab:\n X[i, j] = vectors[word]\n if word in toponims:\n labels[j] = 1\n j += 1\n\n Y.append(labels)\n\n#\nY = np.array(Y)\nY = Y.reshape(N_SENTENCES, MAX_SENTENCE_LENGTH, 1)\n#\n#\n# print(X.shape)\n# print(Y.shape)\n#\nprint(X[0, 0])\nprint(Y[5])\n\n\n\ndata_train, data_test, labels_train, labels_test = \\\n train_test_split(X, Y,\n test_size=0.2, random_state=42)\n\nprint(\"Building model...\")\nmodel = Sequential()\n\n\nmodel.add(Bidirectional(LSTM(100, return_sequences=True), input_shape=(MAX_SENTENCE_LENGTH, EMBEDDING_VECTOR_DIM),))\nmodel.add(TimeDistributed(Dense(1, activation='sigmoid')))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\nprint(\"Training model...\")\nmodel.fit(data_train, labels_train, nb_epoch=1, batch_size=64)\n# Final evaluation of the model\nscores = model.evaluate(data_test, labels_test, verbose=0)\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\ny_predict = model.predict(data_test)\nlabels_test_r = labels_test.reshape(labels_test.shape[0] * labels_test.shape[1])\ny_predict = y_predict.reshape(y_predict.shape[0] * y_predict.shape[1])\npred = []\nprint(y_predict.shape)\nprint(y_predict)\nfor i in range(y_predict.shape[0]):\n if y_predict[i] >= 0.5:\n pred.append(1)\n else:\n pred.append(0)\n\nprint(f1_score(labels_test_r, pred))\nprint(classification_report(labels_test_r, pred))\n\n\n# f = open('../models/lstm_my_embeddings.pickle', 'wb')\n# pickle.dump(model, f)\n# f.close()\n\n\n\n","repo_name":"DzvinkaYarish/NER_Ukrainian","sub_path":"trainings/lstm_without_embedding.py","file_name":"lstm_without_embedding.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22947027763","text":"\nfrom vsg.rules import token_indent\n\nfrom vsg import token\n\nlTokens = []\nlTokens.append(token.enumeration_type_definition.enumeration_literal)\n\n\nclass rule_005(token_indent):\n '''\n This rule checks the indent of multiline enumerated types.\n\n **Violation**\n\n .. code-block:: vhdl\n\n type state_machine is (\n idle,\n write,\n read,\n done);\n\n **Fix**\n\n .. code-block:: vhdl\n\n type state_machine is (\n idle,\n write,\n read,\n done);\n '''\n\n def __init__(self):\n token_indent.__init__(self, 'type', '005', lTokens)\n self.solution = 'Ensure proper indentation.'\n","repo_name":"jeremiah-c-leary/vhdl-style-guide","sub_path":"vsg/rules/type_definition/rule_005.py","file_name":"rule_005.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"61"} +{"seq_id":"20459754166","text":"import sims4.hash_util\nimport sims4.log\nlogger = sims4.log.Logger('Tuning')\nINSTANCE_TUNABLES = 'INSTANCE_TUNABLES'\nREMOVE_INSTANCE_TUNABLES = 'REMOVE_INSTANCE_TUNABLES'\nTUNING_FILE_MODULE_NAME = 'sims4.tuning.class.instances'\n\nclass TunedInstanceMetaclass(type):\n __qualname__ = 'TunedInstanceMetaclass'\n\n def __new__(cls, name, bases, *args, **kwargs):\n manager = None\n for base in bases:\n while isinstance(base, TunedInstanceMetaclass):\n manager = base.tuning_manager\n break\n if 'manager' in kwargs:\n manager = kwargs.pop('manager')\n if 'custom_module_name' in kwargs:\n cls.__module__ = kwargs.pop('custom_module_name')\n tuned_instance = super().__new__(cls, name, bases, *args, **kwargs)\n tuned_instance.tuning_manager = manager\n if cls.__module__ != TUNING_FILE_MODULE_NAME:\n manager.register_class_template(tuned_instance)\n for (name, tunable) in tuned_instance.get_tunables(ignore_tuned_instance_metaclass_subclasses=True).items():\n setattr(tuned_instance, name, tunable.default)\n tuned_instance.reloadable = True\n return tuned_instance\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args)\n\n def get_parents(cls, ignore_tuned_instance_metaclass_subclasses=False):\n parents = cls.mro()\n if ignore_tuned_instance_metaclass_subclasses:\n for (i, c) in enumerate(parents[1:], 1):\n while isinstance(c, TunedInstanceMetaclass):\n parents = parents[:i]\n break\n return parents\n\n def get_tunables(cls, **kwargs):\n tuning = {}\n for base_cls in reversed(cls.get_parents(**kwargs)):\n cls_vars = vars(base_cls)\n if REMOVE_INSTANCE_TUNABLES in cls_vars:\n remove_instance_tunables = cls_vars[REMOVE_INSTANCE_TUNABLES]\n for key in remove_instance_tunables:\n while key in tuning:\n del tuning[key]\n while INSTANCE_TUNABLES in cls_vars:\n instance_tunables = cls_vars[INSTANCE_TUNABLES]\n tuning.update(instance_tunables)\n return tuning\n\n def get_invalid_removals(cls):\n tuning = None\n parents = cls.mro()\n valid_remove = set()\n missing_remove = set()\n for base_cls in reversed(parents):\n cls_vars = vars(base_cls)\n if REMOVE_INSTANCE_TUNABLES in cls_vars:\n remove_instance_tunables = cls_vars[REMOVE_INSTANCE_TUNABLES]\n for key in remove_instance_tunables:\n if key in tuning:\n del tuning[key]\n valid_remove.add(key)\n else:\n while tuning is not None:\n missing_remove.add(key)\n while INSTANCE_TUNABLES in cls_vars:\n instance_tunables = cls_vars[INSTANCE_TUNABLES]\n if tuning is None:\n tuning = {}\n tuning.update(instance_tunables)\n return missing_remove - valid_remove\n\n def get_removed_tunable_names(cls):\n removed_tuning = []\n for base_cls in cls.get_parents():\n cls_vars = vars(base_cls)\n if isinstance(base_cls, TunedInstanceMetaclass) and base_cls is not cls:\n return removed_tuning\n while REMOVE_INSTANCE_TUNABLES in cls_vars:\n remove_instance_tunables = cls_vars[REMOVE_INSTANCE_TUNABLES]\n while True:\n for key in remove_instance_tunables:\n removed_tuning.append(key)\n return removed_tuning\n\n def add_tunable_to_instance(cls, tunable_name, tunable):\n cls_vars = vars(cls)\n if INSTANCE_TUNABLES in cls_vars:\n cls_vars[INSTANCE_TUNABLES][tunable_name] = tunable\n else:\n setattr(cls, INSTANCE_TUNABLES, {tunable_name: tunable})\n setattr(cls, tunable_name, tunable.default)\n\n def generate_tuned_type(cls, name, *args, **kwargs):\n tuning_class_instance = type(cls)(name, (cls,), {}, custom_module_name=TUNING_FILE_MODULE_NAME)\n return tuning_class_instance\n\nclass HashedTunedInstanceMetaclass(TunedInstanceMetaclass):\n __qualname__ = 'HashedTunedInstanceMetaclass'\n\n def generate_tuned_type(cls, name, *args, **kwargs):\n inst = super().generate_tuned_type(name, *args, **kwargs)\n inst.guid = sims4.hash_util.hash32(name)\n if not hasattr(inst, 'guid64'):\n inst.guid64 = sims4.hash_util.hash64(name)\n return inst\n\ndef lock_instance_tunables(cls, **kwargs):\n for (key, value) in kwargs.items():\n setattr(cls, key, value)\n remove_tunables = set(cls.__dict__.get(REMOVE_INSTANCE_TUNABLES, ()))\n remove_tunables.update(kwargs.keys())\n setattr(cls, REMOVE_INSTANCE_TUNABLES, remove_tunables)\n\ndef prohibits_instantiation(cls):\n return vars(cls).get('INSTANCE_SUBCLASSES_ONLY', False)\n\n","repo_name":"johndpope/sims4-ai-engine","sub_path":"core/sims4/tuning/instances.py","file_name":"instances.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"4855112592","text":"# Huffman tree node\nclass Node():\n def __init__(self, num_str=None, frequency=None):\n self.num_str = num_str\n self.frequency = frequency\n self.left = None\n self.right = None\n def set_left(self, left_node):\n self.left = left_node\n def set_right(self, right_node):\n self.right = right_node\n# Huffman tree\nclass tree():\n # create a Huffman tree, set root\n def __init__(self, node_list):\n while len(node_list) != 1:\n father = Node(\n num_str=node_list[0].num_str + node_list[1].num_str,\n frequency=node_list[0].frequency + node_list[1].frequency\n )\n father.set_left(node_list[0])\n father.set_right(node_list[1])\n node_list = node_list[2:]\n node_list.append(father)\n node_list.sort(key=lambda node:node.frequency)\n self.root = node_list[0]\n #print('----> root:', self.root.num_str, self.root.frequency)\n # encode all tree(recursive call)\n def encode(self):\n code = ''\n self.code_recoder_dict = {}\n self.encode_single_node(self.root, code)\n return self.code_recoder_dict\n def encode_single_node(self, node, code):\n # if only one number input\n if self.root.left == None and self.root.right == None:\n self.code_recoder_dict[self.root.num_str] = '0'\n # if basic node\n if node.left == None and node.right == None:\n self.code_recoder_dict[node.num_str] = code\n else:\n self.encode_single_node(node.left, code+'0')\n self.encode_single_node(node.right, code+'1')\n\n\nif __name__ == '__main__':\n input_str = input()\n frequency_dict = {}\n for ch in input_str:\n frequency = frequency_dict.get(ch, -1)\n if frequency == -1:\n frequency_dict[ch] = 1\n else:\n frequency_dict[ch] += 1\n sorted_frequency_list = sorted(frequency_dict.items(), key=lambda x:x[1])\n node_list = [Node(piece[0], piece[1]) for piece in sorted_frequency_list]\n new_tree = tree(node_list)\n code_recoder_dict = new_tree.encode()\n\n sorted_code_recoder_list = sorted(code_recoder_dict.items(), key=lambda x:int(x[1]))\n for i in range(len(sorted_code_recoder_list)):\n print('{}:{}'.format(sorted_code_recoder_list[i][0], sorted_code_recoder_list[i][1]))","repo_name":"110621013/algorithm","sub_path":"hw/HW10_0601/Huffman_Code.py","file_name":"Huffman_Code.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26128485314","text":"from collections import Counter\nimport string\nimport json\nimport zmq\nimport click\n\n\ndef process_text(text):\n punct_to_space_table = {ord(c): ' ' for c in string.punctuation}\n cleaned_text = text.lower().translate(punct_to_space_table)\n words_list = cleaned_text.split()\n return words_list\n\n\n@click.command()\n@click.option('--broker-host', default='localhost', help='Host of broker')\n@click.option('--broker-port', default=12346, help='Port of broker backend socket')\ndef worker_run(broker_host, broker_port):\n ''' Start worker process, which converts raw text into frequency map.\n '''\n context = zmq.Context()\n socket = context.socket(zmq.REP)\n broker_url = 'tcp://{0}:{1}'.format(broker_host, broker_port)\n socket.connect(broker_url)\n\n while True:\n message = socket.recv_string()\n print('.')\n\n words_list = process_text(message)\n counter = Counter(words_list)\n json_str = json.dumps(counter)\n\n socket.send_string(json_str)\n\n\nif __name__ == '__main__':\n worker_run()\n","repo_name":"a-milogradov/wcpy","sub_path":"wcpy/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34023584272","text":"#!/usr/bin/env python3.9\nfrom msdsl import *\nimport numpy as np\n\nw = 10\nk = w**2\n\ndt = 1e-5\nm = MixedSignalModel(\"vco_model\")\nv_in = m.add_analog_input(\"v_in\")\nclk = m.add_digital_input(\"clk\")\nrst = m.add_digital_input(\"rst\")\n\nA = 2048\n\nv_out = m.add_digital_output(\"v_out\", width=25, signed=True, min_val=-1.5, max_val=1.5)\nx = m.add_analog_state(\"x\", range_=1.5, init=0)\nv = m.add_analog_state(\"v\", range_=(w**2) * 1.5, init=1)\nv_prime = m.add_analog_state(\"v_prime\", range_=(w**2) * 1.5, init=0)\n\nm.set_next_cycle(v_prime, -(w**2) * x, clk=clk, rst=rst)\nm.set_next_cycle(v, v + v_prime * dt, clk=clk, rst=rst)\nm.set_next_cycle(x, x + v * dt, clk=clk, rst=rst)\nm.set_this_cycle(v_out, to_sint(x, width=25))\n\nm.compile_and_print(VerilogGenerator())\n","repo_name":"CyanoKobalamyne/msdsl-examples","sub_path":"generate_vco_model.py","file_name":"generate_vco_model.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4887365751","text":"from . import data_units as du\n\nfrom typing import Optional\n\n\nclass DiskView(object):\n def __init__(self, disk, begin: int, size: int,\n sector_size: Optional[int] = None, cluster_size: Optional[int] = None):\n assert not isinstance(disk, DiskView)\n\n self.disk = disk\n self.begin = begin\n self.end = begin + size\n\n assert self.begin <= self.end\n assert sector_size is None or sector_size > 0\n assert cluster_size is None or cluster_size > 0\n\n if sector_size is not None:\n self.sector_size = sector_size\n self.sectors = du.DataUnits(self, sector_size, self.size // sector_size)\n if cluster_size is not None:\n self.cluster_size = cluster_size\n self.clusters = du.DataUnits(self, cluster_size, self.size // cluster_size)\n\n @property\n def size(self):\n return self.end - self.begin\n\n def _seek(self, offset):\n location = self.begin + offset\n\n assert location >= self.begin and location < self.end\n self.disk.seek(location)\n\n def read(self, size, offset=None):\n if offset is not None:\n self._seek(offset)\n assert self.disk.tell() + size - 1 < self.end\n return self.disk.read(size)\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n return ''.format(self.begin, self.size)\n","repo_name":"xinhuang/PyFFF","sub_path":"fff/disk_view.py","file_name":"disk_view.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1325943004","text":"\nfrom django.conf.urls import url\n\nfrom cart import views\n\nurlpatterns = [\n # 加入购物车\n url(r'add_cart/', views.add_cart, name='add_cart'),\n # 购物车\n url(r'cart/', views.cart, name='cart'),\n # 修改购物车\n url(r'change/(\\d+)/', views.change_cart, name='change_cart'),\n\n]","repo_name":"z66980437/ttsx_front","sub_path":"cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31122139512","text":"import os\n\nfrom PIL import Image\nimport numpy as np\n\nfrom exception import CantSolveError\nfrom nonogram import Mark, Nonogram\nfrom nonogram_solver import NonogramSolver\n\n\nclass NonogramGenerator:\n def __init__(self, image_path, width, height, threshold=200):\n self.image_path = image_path\n self.width = width\n self.height = height\n self.threshold = threshold\n self.image_array = []\n self.__load_image()\n\n def __load_image(self):\n image = Image.open(self.image_path)\n resized_image = image.resize((self.width, self.height), Image.Resampling.LANCZOS)\n bw_image = resized_image.convert(\"L\")\n\n bw_array = np.array(bw_image)\n self.image_array = np.where(bw_array >= self.threshold, Mark.WHITE, Mark.BLACK)\n\n image.close()\n resized_image.close()\n bw_image.close()\n\n def get_image_as_array(self):\n return self.image_array\n\n def get_rows_and_columns_nonogram_header(self):\n rows = [self.__find_lengths(row) for row in self.image_array]\n transposed_array = list(map(list, zip(*self.image_array)))\n columns = [self.__find_lengths(column) for column in transposed_array]\n return rows, columns\n\n def __find_lengths(self, arr):\n lengths = []\n count = 0\n for value in arr:\n if value == Mark.BLACK:\n count += 1\n elif count > 0:\n lengths.append(count)\n count = 0\n if count > 0:\n lengths.append(count)\n if len(lengths) == 0:\n lengths.append(0)\n return lengths\n\n def is_nonogram_solvable(self):\n nonogram = self.get_nonogram()\n solver = NonogramSolver(nonogram)\n try:\n solver.solve()\n except CantSolveError:\n return False\n return True\n\n def get_nonogram(self):\n rows, columns = self.get_rows_and_columns_nonogram_header()\n file_name_with_extension = os.path.basename(self.image_path)\n file_name, file_extension = os.path.splitext(file_name_with_extension)\n nonogram = Nonogram(rows, columns, file_name)\n return nonogram\n","repo_name":"noapr/Nonogram","sub_path":"nonogram_generator.py","file_name":"nonogram_generator.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"15365165918","text":"import torch\nimport torchvision\nimport cv2\nimport numpy as np\nfrom utils import process_image\n\ndef inference():\n ckpt_path = \"models/photosketch_model_jit.pth\"\n # ckpt_path = \"models/model_jit.pth\"\n # test_img_dir = \"dataset/stickers_png/batch_1_cleaned/\"\n test_img_dir = \"outputs/processed_results4/\"\n # test_img_dir = \"/home/adrian/Repositories/PhotoSketch/examples/\"\n # output_dir = \"outputs/photosketch_result1/\"\n output_dir = \"outputs/photosketch_result2/\"\n img_list = process_image.get_img_list(test_img_dir, 1)\n \n model = torch.jit.load(ckpt_path, map_location=torch.device(\"cpu\"))\n model.eval()\n for i in range(len(img_list)):\n img = img_list[i]\n # img = cv2.resize(img, (256,256), interpolation=cv2.INTER_CUBIC)\n img = 255-img\n img = torchvision.transforms.ToTensor()(img)\n # img = torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(img)\n img = img.unsqueeze(0)\n print(img.shape)\n exit()\n output = model(img)\n result = (output.detach().numpy()).squeeze()\n result = (result + 1) / 2.0 * 255.0\n result = 255 - result\n result = result.astype(np.uint8)\n cv2.imwrite(output_dir + \"test_\" + str(i).zfill(3) + \".png\", result)\n print(i)\n return\n\ninference()","repo_name":"arawndinog/stickersearch","sub_path":"test_photosketch.py","file_name":"test_photosketch.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26412673119","text":"import unittest\nfrom telegram_bot import TelegramBot\nfrom responses import TextResponses\n\n\nclass Tests(unittest.TestCase):\n def test_get_direct_response_on_text(self):\n self.assertEqual({\"text\": \"test yourself\", \"reply\": True, \"parse_mode\": \"HTML\"},\n TextResponses()._get_direct_response(\"test\"))\n\n self.assertEqual({\"text\": \"Nooooooo\", \"reply\": True},\n TextResponses()._get_direct_response(\"yes\"))\n\n self.assertEqual({\"text\": \"not lol\", \"reply\": True},\n TextResponses()._get_direct_response(\"lol\"))\n\n def test_get_regular_response_on_text(self):\n self.assertEqual({\"regex\": r\"hello\", \"text\": \"hello you too\", \"reply\": True},\n TextResponses()._get_regular_response(\"hello\"))\n\n def test_telegram_bot_get_response_on_text(self):\n self.assertEqual({\"text\": \"test yourself\", \"reply\": True, \"parse_mode\": \"HTML\"},\n TelegramBot._get_response_on_text(\"test\"))\n\n self.assertEqual({\"regex\": r\"hello\", \"text\": \"hello you too\", \"reply\": True},\n TelegramBot._get_response_on_text(\"hello\"))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kl09/telegram_bot_responder","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34977409916","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# **Histogram classifier based on a direct comparison with templates (i.e. reference histograms)**\n\n\n\n### imports\n\n# external modules\nimport sys\nimport numpy as np\nimport pickle\nimport importlib\n\n# local modules\nfrom HistogramClassifier import HistogramClassifier\nsys.path.append('../../utils')\n\n\n\n\ndef mseTopN_templates( histograms, templates, n=-1 ):\n ### calculate the mse between each histogram in histograms and each histogram in templates\n # input arguments:\n # - histograms: 2D numpy array of shape (nhistograms, nbins)\n # - templates: 2D numpy array of shape (ntemplates,nbins)\n # - n: integer representing the number of (sorted) bin squared errors to take into account (default: all)\n # output:\n # 2D numpy array of shape (nhistograms,ntemplates) holding the mseTopN between each\n \n nhistograms,nbins = histograms.shape\n ntemplates,_ = templates.shape\n res = np.zeros( (nhistograms,ntemplates) )\n for i in range(ntemplates):\n temp = np.tile( templates[i,:], (nhistograms,1) )\n sqdiff = np.power(histograms-temp,2)\n sqdiff[:,::-1].sort()\n if n>0: sqdiff = sqdiff[:,:n]\n mean = np.mean(sqdiff,axis=-1)\n res[:,i] = mean\n return res\n\ndef mseTopN_min( histograms, templates, n=-1 ):\n ### calculate the mse betwee a histogram and each template and return the minimum\n # input arguments:\n # - histograms: 2D numpy array of shape (nhistograms, nbins)\n # - templates: 2D numpy array of shape (ntemplates,nbins)\n # - n: integer representing the number of (sorted) bin squared errors to take into account (default: all)\n # output:\n # 1D numpy array of shape (nhistograms) holding the minimum mseTopN for each histogram\n \n allmses = mseTopN_templates( histograms, templates, n=n )\n return np.amin( allmses, axis=-1 )\n\ndef mseTop10_min( histograms, templates ):\n ### special case of above with n=10\n return mseTopN_min( histograms,templates,n=10)\n\ndef mseTopN_avg( histograms, templates, n=-1 ):\n ### calculate the mse betwee a histogram and each template and return the average\n # input arguments:\n # - histograms: 2D numpy array of shape (nhistograms, nbins)\n # - templates: 2D numpy array of shape (ntemplates,nbins)\n # - n: integer representing the number of (sorted) bin squared errors to take into account (default: all)\n # output:\n # 1D numpy array of shape (nhistograms) holding the average mseTopN for each histogram\n \n allmses = mseTopN_templates( histograms, templates, n=n )\n return np.mean( allmses, axis=-1 )\n\ndef mseTop10_avg( histograms, templates ):\n ### special case of above with n=10\n return mseTopN_avg( histograms,templates,n=10)\n\n\n\n\nclass TemplateBasedClassifier(HistogramClassifier):\n ### histogram classifier based on a direct comparison with templates (i.e. reference histograms)\n \n def __init__( self, comparemethod='minmse' ):\n ### initializer\n # input arguments:\n # - comparemethod: string representing the method by which to compare a histogram with a set of templates\n # currently supported methods are:\n # - minmse: minimum mean square error between histogram and all templates\n # - avgmse: average mean square error between histogram and all templates\n \n self.methods = ({'minmse':mseTopN_min,\n 'minmsetop10': mseTop10_min,\n 'avgmse':mseTopN_avg,\n 'avgmsetop10': mseTop10_avg })\n if not comparemethod in self.methods.keys():\n raise Exception('ERROR in TemplateBasedClassifier.__init__: comparemethod not recognized: {}'.format(comparemethod))\n self.comparemethod = comparemethod\n \n def train( self, templates ):\n ### 'train' the classifier, i.e. set the templates (reference histograms)\n # input arguments:\n # - templates: a 2D numpy array of shape (nhistograms,nbins)\n super(TemplateBasedClassifier,self).train( templates )\n self.templates = templates\n \n def evaluate( self, histograms ):\n ### classification of a collection of histograms based on their deviation from templates\n super(TemplateBasedClassifier,self).evaluate( histograms )\n return self.methods[self.comparemethod]( histograms, self.templates )\n\n def save( self, path ):\n ### save the classifier\n super( TemplateBasedClassifier,self ).save( path )\n with open( path, 'wb' ) as f:\n pickle.dump( self, f )\n\n @classmethod\n def load( self, path, **kwargs ):\n ### get a TemplateBasedClassifier instance from a pkl file\n super( TemplateBasedClassifier, self ).load( path )\n with open( path, 'rb' ) as f:\n obj = pickle.load( f )\n return obj","repo_name":"LukaLambrecht/ML4DQMDC-PixelAE","sub_path":"src/classifiers/TemplateBasedClassifier.py","file_name":"TemplateBasedClassifier.py","file_ext":"py","file_size_in_byte":4843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23407173841","text":"\"\"\"\nProblem: A\n\n\nThis script was written for Python 3.3.\nIt\n * reads from standard input\n * writes to standard output\n * logs to standard error\n\n@author: edong\n\"\"\"\n\n# Python built-in libraries\nimport itertools\nimport logging\nimport math\nimport sys\n\n# External libraries\n# NumPY \n#import numpy\n\n# Log to standard error\nlevel=logging.DEBUG\n#level=logging.INFO\nlogging.basicConfig(stream=sys.stderr, level=level, \\\n format='%(asctime)s %(levelname)-7s %(message)s')\n\nclass TestCase(object):\n \"\"\"\n Container for the inputs of a test case.\n \"\"\"\n def __init__(self):\n pass\n \n def __str__(self):\n \"\"\"\n Returns a representation.\n \"\"\"\n return str(self.__dict__)\n \ndef parse_test_case():\n \"\"\"\n Parses the inputs for a test case from standard input\n and returns the result.\n \"\"\"\n case = TestCase()\n case.s, nstr = nextstr().split(' ')\n case.n = int(nstr)\n return case\n\ndef solve(case):\n \"\"\"\n Solves a single test case, and returns the result.\n \"\"\"\n s = case.s\n n = case.n\n length = len(s)\n \n was_vowel = False\n counts = []\n count = 0\n start = 0\n \n for i, c in enumerate(s):\n is_vowel = c == 'a' or c == 'e' or c == 'i' or c == 'o' or c == 'u'\n if was_vowel != is_vowel:\n if count > 0:\n counts.append((count, was_vowel, start))\n \n count = 0\n was_vowel = is_vowel\n start = i\n \n count += 1\n counts.append((count, was_vowel, start))\n \n logging.info(\"Counts: %s\", counts)\n \n total = 0\n \n for i in range(len(counts)):\n # Start from the left\n # Find first position where we find n consecutive\n first_match = None\n next_match = None\n for k in range(i, len(counts)):\n c = counts[k]\n if not c[1] and c[0] >= n:\n if first_match is None:\n first_match = counts[k]\n continue\n elif next_match is None:\n next_match = counts[k]\n break\n \n logging.debug(\"i=%d, first_match=%s, next_match=%s\", i, first_match, next_match)\n \n if first_match is None:\n continue\n \n for start in range(counts[i][2], counts[i][2] + counts[i][0]):\n if first_match == counts[i]:\n if start <= counts[i][2] + counts[i][0] - n:\n logging.debug(\"Special case: adding %d\", length - (start + n - 1))\n # Special case\n total += length - (start + n - 1) \n elif next_match is not None:\n logging.debug(\"Special case: next adding %d\", length - (next_match[2] + n - 1))\n total += length - (next_match[2] + n - 1)\n else:\n logging.debug(\"Adding %d\", length - (first_match[2] + n - 1))\n total += length - (first_match[2] + n - 1)\n \n return total\n \n\n##############################################################\n# Utility functions\n \ndef nextstr():\n \"\"\"\n Returns the next line from standard input,\n without any trailing newlines.\n \"\"\"\n l = sys.stdin.readline()\n if l[-1] == '\\n':\n l = l[:-1]\n return l\n \ndef nextint():\n \"\"\"\n Returns the next line from standard input as an integer.\n \"\"\"\n return int(nextstr())\n\ndef nextints():\n \"\"\"\n Returns the next line from standard input as a list of integers,\n where the input is split by ' '.\n \"\"\"\n return [int(t) for t in nextstr().split(' ')]\n \ndef main():\n \"\"\"\n Main function.\n \"\"\"\n \n # Log module filename\n mainmod = sys.modules['__main__']\n if mainmod and hasattr(mainmod, '__file__'):\n logging.info(\"Running %s\", mainmod.__file__)\n\n import time\n start_time = time.time()\n \n num_cases = nextint()\n\n for i in range(1, num_cases+1):\n test_case_start_time = time.time()\n case = parse_test_case()\n logging.info(\"Case #%d has inputs: %s\", i, case)\n output = solve(case)\n print(\"Case #{}: {}\".format(i, output))\n test_case_end_time = time.time()\n logging.info(\"Case #%d has output: %s\", i, output)\n logging.debug(\"Case #%d running time: %0.1f s\", \\\n i, test_case_end_time-test_case_start_time)\n sys.stdin.close()\n \n end_time = time.time()\n logging.info(\"Total running time: %0.1f s for %d test cases\", \\\n end_time-start_time, num_cases)\n\nif __name__ == '__main__':\n main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_126/630.py","file_name":"630.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44787603131","text":"import numpy as np\nimport os\nfrom PIL import Image\nfrom pathlib import Path\nfrom torchvision.utils import draw_segmentation_masks\nimport torch\nimport torchvision.transforms.functional as F\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom catalyst.dl import SupervisedRunner\nfrom const import DIR_PATH, TEST_DIR, BATCH_SIZE\nfrom model import DeepLabv3\nfrom dataset import TestCeleb\n\n#getting predictions\ndef save_img(\n TEST_DIR: Path,\n DIR_PATH: Path):\n \"\"\"\n Returns saved predictions (image with mask overlapped)\n in 'DIR_PATH / predictions'.\n \"\"\"\n val_transform = A.Compose([\n A.Resize(512, 512),\n A.Normalize(\n mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)\n ),\n ToTensorV2()\n ])\n\n test_dataset = TestCeleb(\n TEST_DIR,\n transform=val_transform\n )\n\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=0\n )\n\n model = DeepLabv3()\n\n runner = SupervisedRunner(\n input_key='features',\n output_key='scores',\n target_key='targets',\n loss_key='loss'\n )\n\n predictions = np.vstack(list(map(\n lambda x: x[\"scores\"].cpu().numpy(),\n runner.predict_loader(\n loader=test_loader,\n model=model,\n resume=Path(DIR_PATH, 'model.best.pth')\n )\n )))\n transform_img = A.Compose([\n A.Resize(512, 512),\n ToTensorV2()\n ])\n\n test_images = os.listdir(TEST_DIR)\n\n for i, img in enumerate(test_images):\n test_img = np.array(\n Image.open(Path(TEST_DIR, f'{img}')).convert('RGB'))\n transformed = transform_img(image=test_img)\n test_img = transformed[\"image\"]\n preds = draw_segmentation_masks(\n image=test_img.type(torch.uint8),\n masks=torch.gt(torch.from_numpy(predictions[i]), 0.1),\n alpha=0.7,\n colors='#8000ff'\n )\n preds = preds.detach()\n preds = F.to_pil_image(preds)\n preds.save(Path(DIR_PATH, 'predictions', f'{i}.jpg'))\n\nsave_img(TEST_DIR, DIR_PATH)","repo_name":"ashimatyuk/face_segmentation","sub_path":"save_image.py","file_name":"save_image.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"2114737020","text":"from datetime import timedelta\n\nfrom django.utils import timezone\nfrom rest_framework import serializers\n\nfrom schedule_tasks.models import ScheduledTask, TaskExecutionHistory\n\n\ndef validate_schedule_time(schedule_time):\n if schedule_time <= timezone.now() or schedule_time > timezone.now() + timedelta(days=365):\n raise serializers.ValidationError(\"schedule_time must be within a year in the future\")\n return schedule_time\n\n\nclass TaskExecutionHistorySerializer(serializers.ModelSerializer):\n class Meta:\n model = TaskExecutionHistory\n fields = (\n \"status\", \"created\",\n )\n read_only_fields = fields\n ordering = [\"-created\"]\n\n\nclass ScheduledTaskSerializer(serializers.ModelSerializer):\n\n schedule_time = serializers.DateTimeField(validators=[validate_schedule_time])\n\n class Meta:\n model = ScheduledTask\n fields = (\n \"id\", \"title\", \"description\", \"schedule_time\", \"status\", \"created\", \"updated\",\n )\n read_only_fields = [\n \"status\", \"created\", \"updated\",\n ]\n ordering = [\"-created\"]\n\n\nclass ScheduledTaskListParamsSerializer(serializers.Serializer):\n status = serializers.CharField(required=False)\n schedule_time_start = serializers.DateTimeField(required=False)\n schedule_time_end = serializers.DateTimeField(required=False)\n sort_by = serializers.CharField(required=False)\n\n def validate_status(self, status):\n task_status_choices = [\n task_status_choice[0] for task_status_choice in ScheduledTask.TaskStatus.CHOICES\n ]\n if status not in task_status_choices:\n err_msg = f\"Invalid value '{status}' for status. Allowed values are {task_status_choices}\"\n raise serializers.ValidationError(err_msg)\n return status\n\n def validate_sort_by(self, sort_by):\n sort_by_choices = [\"schedule_time\", \"-schedule_time\"]\n if sort_by not in sort_by_choices:\n err_msg = f\"Invalid value '{sort_by}' for sort_by. Allowed values are {sort_by_choices}\"\n raise serializers.ValidationError(err_msg)\n return sort_by\n\n def validate(self, data):\n data = super().validate(data)\n if (\n \"schedule_time_start\" in data and\n \"schedule_time_end\" in data and\n data[\"schedule_time_start\"] >= data[\"schedule_time_end\"]\n ):\n err_msg = \"schedule_time_end must be greater than schedule_time_start\"\n raise serializers.ValidationError(err_msg)\n return data\n\n\nclass ReScheduleTaskDataSerializer(serializers.Serializer):\n schedule_time = serializers.DateTimeField(validators=[validate_schedule_time])\n","repo_name":"saqlainsyed007/Tracebloc-TaskScheduler","sub_path":"TaskScheduler/schedule_tasks/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23186865468","text":"from datetime import datetime, timedelta\n\nfrom requests import get\nimport json\n\nfrom golf_app import utils\nfrom golf_app.models import Tournament, ScoreDict, Group, Field, Golfer\n\n\nclass ESPNData(object):\n '''takes an optinal dict and provides funcitons to retrieve espn golf data,\n all_data is a list of dicts\n event_data is the data for the event but most is in competition\n competition_data varoius datat about the tournament\n field_data is the actual golfers in the tournament'''\n\n #only use event_data for match play events, other data not reliable.\n def __init__(self, t=None, data=None, force_refresh=False, setup=False, update_sd=True):\n start = datetime.now()\n\n if t:\n self.t = t \n else:\n self.t = Tournament.objects.get(current=True)\n\n #with open('byron_nelson_r2.json') as json_file:\n # data = json.load(json_file)\n #self.all_data = data\n\n if data:\n self.all_data = data \n elif self.t.complete and not force_refresh:\n sd = ScoreDict.objects.get(tournament=self.t)\n self.all_data = sd.espn_api_data\n else:\n pre_data = datetime.now()\n headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Mobile Safari/537.36'}\n url = \"https://site.web.api.espn.com/apis/site/v2/sports/golf/leaderboard?league=pga\"\n #url = 'https://site.web.api.espn.com/apis/site/v2/sports/golf/leaderboard?event=401243007' #match play 2021 for testing\n self.all_data = get(url, headers=headers).json()\n print ('post refresh data dur: ', datetime.now() - pre_data)\n\n if not setup:\n sd = ScoreDict.objects.get(tournament=self.t)\n self.saved_data = sd.espn_api_data\n else:\n sd = ScoreDict()\n\n data_start = datetime.now()\n\n self.event_data = {}\n self.competition_data = {}\n self.field_data = {}\n\n try:\n self.event_data = [v for v in self.all_data.get('events') if v.get('id') == self.t.espn_t_num][0]\n except Exception as e:\n print ('ERROR espn api didnt find tournament, trying by espn num: ', self.t.name, self.t.espn_t_num)\n try: \n url = 'https://site.web.api.espn.com/apis/site/v2/sports/golf/leaderboard?event=' + str(self.t.espn_t_num)\n self.all_data = get(url, headers=headers).json()\n\n print(url)\n self.event_data = [v for v in self.all_data.get('events') if v.get('id') == self.t.espn_t_num][0]\n except Exception as f:\n print (print ('ERROR espn api didnt find t twice: ', self.t.name, self.t.espn_t_num))\n raise Exception('ESPN API failed to initialize, tournamant number not in events') \n \n self.competition_data = self.event_data.get('competitions')[0]\n \n if self.t.pga_tournament_num == '470':\n self.field_data = self.competition_data[0].get('competitors')\n else:\n self.field_data = self.competition_data.get('competitors')\n\n pre_sd = datetime.now()\n \n if len(self.field_data) >0 and update_sd and not data:\n print ('UPDATING SD DATA')\n sd, created = ScoreDict.objects.get_or_create(tournament=self.t)\n sd.espn_api_data = self.all_data\n sd.save()\n\n #print ('sd save dur: ', datetime.now() - pre_sd)\n #print ('data set up: ', datetime.now() - data_start)\n #print ('espn API Init complete, field len: ', len(self.field_data), ' dur: ', datetime.now() - start)\n\n\n def get_t_name(self): #need to test this to confirm it works\n return self.event_data.get('name')\n \n def get_round(self):\n \n return self.competition_data.get('status').get('period')\n\n def get_round_status(self):\n #return self.competition_data.get('status').get('type').get('state')\n return self.competition_data.get('status').get('type').get('description')\n\n\n def started(self):\n if self.event_data and self.event_data.get('status').get('type').get('state') != 'pre':\n return True\n \n return False\n\n def tournament_complete(self):\n #print (self.event_data.get('status'))\n if self.t.pga_tournament_num == '470':\n completed = 0\n\n for c in self.event_data.get('competitions'):\n if c[0].get('description') in [\"Third Place\", \"Championship\", \"Finals\"]:\n if c[0].get('competitors')[0].get('status').get('type').get('completed'): \n completed += 1\n\n if completed == 2:\n return True\n else:\n return False\n\n return self.event_data.get('status').get('type').get('completed')\n\n def playoff(self):\n playoff = [v for v in self.field_data if v.get('status').get('playoff')]\n if len(playoff) > 1:\n return True\n else:\n return False\n \n\n def player_started(self, espn_num):\n if self.t.complete: #required as api data may not exist between tournaments\n return True\n if Field.objects.filter(tournament=self.t, golfer__espn_number=espn_num, withdrawn=True).exists():\n return False\n if self.get_round() > 1:\n return True\n player = [x for x in self.field_data if x.get('id') == espn_num]\n\n\n if len(player) == 0:\n return False\n\n if player[0].get('status').get('period') > 1:\n return True\n elif player[0].get('status').get('period') == 1 and \\\n player[0].get('status').get('type').get('name') == \"STATUS_SCHEDULED\":\n return False\n elif player[0].get('status').get('period') == 1 and \\\n player[0].get('status').get('type').get('name') in [\"STATUS_IN_PROGRESS\", \"STATUS_PLAY_COMPLETE\", \"STATUS_CUT\", \"STATUS_FINISH\"]:\n return True\n print ('cant tell if started, return False: ', espn_num, player[0].get('status'))\n return False\n\n def started_golfers_list(self):\n return [v.get('id') for v in self.field_data if self.player_started(v.get('id'))]\n \n \n def all_golfers_started(self):\n if not self.t.started():\n return False\n \n if self.get_round() > 1:\n return True\n elif len([v.get('id') for v in self.field_data if v.get('status').get('period') == 1 and v.get('status').get('type').get('name') == \"STATUS_SCHEDULED\"]) == 0:\n return True\n else:\n return False\n \n\n def field(self):\n return self.field_data\n\n\n def golfer_data(self, espn_num=None):\n try:\n if espn_num:\n return [x for x in self.field_data if x.get('id') == espn_num][0]\n else:\n return None\n except Exception as e:\n if self.t.pga_tournament_num == '018':\n return [x for x in self.field_data if str(espn_num) in [str(x.get('roster')[0].get('playerId')), str(x.get('roster')[1].get('playerId'))]][0]\n \n print ('espnApi golfer_data issue, espn_num: ', espn_num, e)\n return None\n\n \n def get_all_data(self):\n return self.all_data\n\n def cut_num(self):\n '''gives cut num wihtout group penalty. returns an int and need to add group penalty seperately'''\n if not self.started():\n return self.t.saved_cut_num\n\n if self.t.cut_score and self.t.cut_score.isdigit():\n return int(self.t.cut_score) + 1\n\n #clean this up, added for round 1 based on espn not having a cut round or score. they have cutRound == 0 \n if self.t.has_cut and int(self.get_round()) <= int(self.t.saved_cut_round) and self.event_data.get('tournament').get('cutRound') == 0:\n #move this to be the cut_line funciton\n return min(int(x.get('status').get('position').get('id')) for x in self.field_data \\\n if int(x.get('status').get('position').get('id')) > int(self.t.saved_cut_num)) \n\n if self.event_data.get('tournament').get('cutCount') != 0:\n return self.event_data.get('tournament').get('cutCount') + 1\n elif self.t.has_cut and int(self.get_round()) <= int(self.t.saved_cut_round):\n try:\n return min(int(x.get('status').get('position').get('id')) for x in self.field_data \\\n if int(x.get('status').get('position').get('id')) > int(self.t.saved_cut_num)) \n except Exception as e:\n #print ('issue wiht cut num, returning saved model num', e)\n return self.t.saved_cut_num\n else:\n #changed to not cut - should only be here for no cut events\n cuts = [v for v in self.field_data if v.get('status').get('type').get('id') != '3']\n return len(cuts) + 1 \n \n\n def get_rank(self, espn_number):\n golfer_data = self.golfer_data(espn_number)\n \n if not golfer_data:\n return self.cut_num()\n if golfer_data.get('status').get('type').get('id') in ['3']:\n return self.cut_num()\n #return golfer_data.get('status').get('type').get('shortDetail')\n else:\n return golfer_data.get('status').get('position').get('id')\n\n\n def get_rank_display(self, espn_number):\n golfer_data = self.golfer_data(espn_number)\n if not golfer_data:\n return \"WD\"\n else:\n return golfer_data.get('status').get('position').get('displayName')\n\n\n def group_stats(self, groups=None):\n '''takes a espn api obj and queryset of groups, returns a dict with best in group and group cut counts'''\n d = {}\n\n if not groups:\n groups = Group.objects.filter(tournament=self.t) \n \n #for g in Group.objects.filter(tournament=self.t):\n for g in groups:\n try:\n #golfers = g.get_golfers()\n golfers = self.made_cut_golfers(g.get_golfers())\n \n if self.t.pga_tournament_num == '018':\n min_score = min([int(x.get('status').get('position').get('id')) for x in self.field_data if str(x.get('roster')[0].get('playerId')) in golfers or str(x.get('roster')[1].get('playerId')) in golfers])\n best = []\n best_0 = [x.get('roster')[0].get('playerId') for x in self.field_data if str(x.get('roster')[0].get('playerId')) in golfers and int(x.get('status').get('position').get('id')) == min_score]\n best_1 = [x.get('roster')[1].get('playerId') for x in self.field_data if str(x.get('roster')[1].get('playerId')) in golfers and int(x.get('status').get('position').get('id')) == min_score]\n for zero in best_0:\n best.append(zero)\n for one in best_1:\n best.append(one)\n cuts = self.cut_count(g)\n else:\n min_score = min([int(self.get_rank(x.get('id'))) - Field.objects.values('handi').get(tournament=self.t, golfer__espn_number=x.get('id')).get('handi') for x in self.field_data if x.get('id') in golfers])\n best = [x.get('athlete').get('id') for x in self.field_data if x.get('id') in golfers and int(self.get_rank(x.get('id'))) - Field.objects.values('handi').get(tournament=self.t, golfer__espn_number=x.get('id')).get('handi') == min_score]\n cuts = len(self.regular_cut_golfers(g.get_golfers()))\n #cuts = len([x.get('athlete').get('id') for x in self.field_data if x.get('id') in golfers and x.get('status').get('type').get('id') == '3'])\n ## change cuts to use the function after testing\n golfer_list = []\n golfer_espn_num_list = []\n \n for b in best:\n if self.t.pga_tournament_num == '018':\n b_name = [x.get('roster')[0].get('athlete').get('displayName') for x in self.field_data if str(x.get('roster')[0].get('playerId')) == str(b)]\n if len(b_name) == 0:\n g_name = [x.get('roster')[1].get('athlete').get('displayName') for x in self.field_data if str(x.get('roster')[1].get('playerId')) == str(b)][0]\n else:\n g_name = b_name[0]\n golfer_list.append(g_name)\n else:\n golfer_list.append(self.golfer_data(b).get('athlete').get('displayName'))\n golfer_espn_num_list.append(str(b))\n \n d[str(g.number)] = {'golfers': golfer_list,\n 'golfer_espn_nums': golfer_espn_num_list,\n 'cuts': cuts,\n 'total_golfers': g.playerCnt\n }\n\n g.cutCount = cuts\n g.save()\n except Exception as e:\n print ('espn api group stats issue: ', g, e)\n d[str(g.number)] = {'golfers': [],\n 'golfer_espn_nums': [],\n 'cuts': 0,\n 'total_golfers': g.playerCnt\n }\n\n return d\n\n\n def cut_penalty(self, p):\n '''takes a field obj and a score obj, returns an int'''\n if not p.group.cut_penalty():\n return 0\n\n #cuts = p.group.cut_count(espn_api_data=self.field_data)\n cuts = p.group.cut_count(espn_api_data=self)\n if cuts == 0:\n return 0\n else:\n return p.group.playerCnt - cuts\n\n \n def get_movement(self, golfer_data):\n return golfer_data.get('movement')\n\n\n #def get_player_hole():\n # pass\n\n\n def pre_cut_wd(self):\n return len([x.get('athlete').get('id') for x in self.field_data if x.get('status').get('type').get('id') == '3' and \\\n int(x.get('status').get('period')) <= self.t.saved_cut_round]) \n \n \n def post_cut_wd(self):\n l = self.t.not_playing_list()\n l.remove('CUT')\n return len([x.get('athlete').get('id') for x in self.field_data if x.get('status').get('type').get('id') == '3' \\\n and x.get('status').get('type').get('shortDetail') in l and int(x.get('status').get('period')) > self.t.saved_cut_round]) \n\n \n def golfers_post_cut_wd(self, espn_num):\n '''takes a list, returns a list'''\n l = self.t.not_playing_list()\n l.remove('CUT')\n #print ('espn_api golfers post cut wd', len(espn_num))\n return [x.get('athlete').get('id') for x in self.field_data if x.get('status').get('type').get('id') == '3' \\\n and x.get('status').get('type').get('shortDetail') in l and int(x.get('status').get('period')) > self.t.saved_cut_round \\\n and x.get('athlete').get('id') in espn_num]\n\n\n def regular_cut_golfers(self, espn_num):\n '''takes a list, returns a list'''\n \n return [x.get('athlete').get('id') for x in self.field_data if x.get('status').get('type').get('id') == '3' \\\n and x.get('athlete').get('id') in espn_num and x.get('athlete').get('id') not in self.golfers_post_cut_wd(espn_num)] \n\n\n def made_cut_golfers(self, golfers):\n '''takes a list of golfers, returns a list'''\n post_cut_wd = self.golfers_post_cut_wd(golfers)\n regular_made = [x.get('athlete').get('id') for x in self.field_data if x.get('status').get('type').get('id') != '3' \\\n and x.get('athlete').get('id') in golfers]\n\n return post_cut_wd + regular_made\n\n def post_cut_wd_score(self):\n return len([x for x in self.field_data if x.get('status').get('type').get('id') != '3']) + 1\n\n\n def first_tee_time(self):\n #if self.get_round() in [1, 0]:\n try: #for pre-start and before round 1 completes\n times = [datetime.strptime(x.get('linescores')[0].get('teeTime')[:-1], '%Y-%m-%dT%H:%M') for x in self.field() if x.get('status').get('period') == 1]\n print ('times len: ', len(times))\n return min(times)\n except Exception as e: # after round 1 completes\n print ('first tee time exception logic')\n times = [[datetime.strptime(t.get('teeTime')[:-1], '%Y-%m-%dT%H:%M') for t in x.get('linescores') if t.get('period') == 1][0] for x in self.field()]\n print ('times len: ', len(times))\n \n return min(times)\n\n\n def winner(self):\n '''takes an espn api and returns a list'''\n #return [x.get('id') for x in self.field_data if x.get('status').get('position').get('id') == '1']\n return [x.get('id') for x in self.field_data if self.get_rank(x.get('id')) == '1'] #change the rest if this works\n\n def second_place(self):\n '''takes an espn api and returns a list'''\n return [x.get('id') for x in self.field_data if x.get('status').get('position').get('id') == '2']\n\n def third_place(self):\n '''takes an espn api and returns a list'''\n return [x.get('id') for x in self.field_data if x.get('status').get('position').get('id') == '3']\n\n def get_leaderboard(self):\n d = {}\n #print (self.golfer_data('9780'))\n if self.t.pga_tournament_num == '018':\n for data in self.field_data:\n #golfer_data = self.golfer_data(data.get('roster')[0].get('athlete').get('id'))\n #thru = self.get_thru(data.get('roster')[0].get('athlete').get('id'))\n \n #print (golfer_data)\n #print ('data: ', data.get('status'))\n d[data.get('sortOrder')] = {\n 'rank': data.get('status').get('position').get('displayName'),\n 'r1': self.get_round_score(data.get('roster')[0].get('athlete').get('id'), 1),\n 'r2': self.get_round_score(data.get('roster')[0].get('athlete').get('id'), 2),\n 'r3': self.get_round_score(data.get('roster')[0].get('athlete').get('id'), 3),\n 'r4': self.get_round_score(data.get('roster')[0].get('athlete').get('id'), 4),\n 'total_score': self.to_par(data.get('roster')[0].get('athlete').get('id')),\n 'change': '-', #golfer_data.get('movement'),\n 'thru': '-', #thru,\n 'curr_round_score': '-', #self.current_round_to_par(data.get('id')),\n 'golfer_name': data.get('team').get('displayName'), #golfer_data.get('athlete').get('displayName'),\n 'espn_num': '' #data.get('id') \n \n } \n else:\n for data in self.field_data:\n #print ('LB DATA: ', data.get('id'), self.get_thru(data.get('id')))\n golfer_data = self.golfer_data(data.get('id'))\n thru = self.get_thru(data.get('id'))\n d[golfer_data.get('sortOrder')] = {\n #'rank': self.get_rank(data.get('id')),\n 'rank': self.get_rank_display(data.get('id')),\n 'r1': self.get_round_score(data.get('id'), 1),\n 'r2': self.get_round_score(data.get('id'), 2),\n 'r3': self.get_round_score(data.get('id'), 3),\n 'r4': self.get_round_score(data.get('id'), 4),\n #'total_score': golfer_data.get('score').get('displayValue'),\n 'total_score': self.to_par(data.get('id')),\n 'change': golfer_data.get('movement'),\n #'thru': golfer_data.get('status').get('type').get('shortDetail'),\n 'thru': thru,\n 'curr_round_score': self.current_round_to_par(data.get('id')),\n 'golfer_name': golfer_data.get('athlete').get('displayName'),\n 'espn_num': data.get('id') \n\n }\n #print ('leaderboard: ', d)\n return d\n\n\n def get_thru(self, espn_num):\n golfer_data = self.golfer_data(espn_num)\n if golfer_data:\n if str(golfer_data.get('status').get('type').get('id')) == '1':\n thru = golfer_data.get('status').get('displayThru')\n elif str(golfer_data.get('status').get('type').get('id')) == '0':\n thru = golfer_data.get('status').get('teeTime')\n else:\n thru = golfer_data.get('status').get('type').get('shortDetail')\n else:\n thru = \"WD\"\n return thru\n\n def to_par(self, espn_num):\n return self.golfer_data(espn_num).get('statistics')[0].get('displayValue')\n\n def get_round_score(self, espn_num, r):\n try:\n return [int(x.get('value')) for x in self.golfer_data(espn_num).get('linescores') if x.get('period') == r][0]\n except Exception as e:\n return '--'\n\n def leaders(self):\n try:\n return [v.get('athlete').get('displayName') for v in self.field_data if self.get_rank(v.get('id')) == '1']\n except Exception as e:\n print ('espn api leaders exception: ', e)\n if self.t.pga_tournament_num == '018':\n return [v.get('team').get('displayName') for v in self.field_data if self.get_rank(v.get('id')) == '1']\n return ['No leaders available']\n\n def leader_score(self):\n try:\n return [self.to_par(v.get('id')) for v in self.field_data if self.get_rank(v.get('id')) == '1'][0]\n #return [v.get('score').get('displayValue') for v in self.field_data if self.get_rank(v.get('id')) == '1'][0]\n except Exception as e:\n print ('espn api leader score exception: ', e)\n return ['']\n\n def post_cut(self):\n if not self.t.has_cut:\n return False\n \n if self.event_data.get('tournament').get('cutRound') == 0:\n return False\n\n if len([x.get('athlete').get('id') for x in self.field_data if x.get('status').get('type').get('id') == '3' \\\n and x.get('status').get('type').get('shortDetail') == \"CUT\"]) > 0:\n return True\n\n return False\n\n def cut_line(self):\n cut_info = {'line_type': '',\n 'cut_score': 'No Cut Line'}\n\n if self.event_data.get('tournament').get('cutRound') and int(self.event_data.get('tournament').get('cutRound')) < int(self.get_round()):\n cut_info.update({'line_type': 'Actual',\n 'cut_score': self.event_data.get('tournament').get('cutScore')})\n\n elif self.event_data.get('tournament').get('cutRound') and int(self.event_data.get('tournament').get('cutRound')) == int(self.get_round()) \\\n and self.competition_data.get('status').get('type').get('state') == \"post\":\n cut_info.update({'line_type': 'Actual',\n 'cut_score': self.event_data.get('tournament').get('cutScore')})\n\n elif self.t.has_cut and int(self.get_round()) <= int(self.t.saved_cut_round): #and self.event_data.get('tournament').get('cutRound') == 0:\n max_rank = max(int(x.get('status').get('position').get('id')) for x in self.field_data \\\n if int(x.get('status').get('position').get('id')) <= int(self.t.saved_cut_num)) \n #cut_score = [x.get('score').get('displayValue') for x in self.field_data if self.get_rank(x.get('id')) == str(max_rank)][0]\n cut_score = [self.golfer_data(x.get('id')).get('statistics')[0].get('displayValue') for x in self.field_data if str(self.get_rank(x.get('id'))) == str(max_rank)][0]\n cut_info.update({'line_type': 'Projected', 'cut_score': cut_score})\n #print ('cut stuff: ', max_rank, cut_score)\n #print ([(self.golfer_data(x.get('id')).get('status').get('position').get('id'), x.get('score').get('displayValue'), self.golfer_data(x.get('id')).get('statistics')[0].get('displayValue')) for x in self.field_data if str(self.get_rank(x.get('id'))) == str(max_rank)])\n return cut_info\n\n def needs_update(self):\n #sd = ScoreDict.objects.get(tournament=self.t)\n saved_data = ESPNData(t=self.t, data=self.saved_data)\n\n c_data = self.event_data.get('competitions')[0].get('competitors')\n saved_c = saved_data.event_data.get('competitions')[0].get('competitors')\n\n if saved_data.event_data == self.event_data:\n print ('NO UPDATE required')\n return False\n \n if c_data == saved_c:\n print (\"Competition data same but other diffs skipping calculating scores\")\n return False\n\n return True\n\n def current_round_to_par(self, espn_num):\n data = self.golfer_data(espn_num)\n curr_round = self.get_round()\n try:\n return [x.get('displayValue') for x in self.golfer_data(espn_num).get('linescores') if x.get('period') == curr_round][0]\n except Exception as e:\n return '-'\n\n ## match play functions ##\n\n def mp_golfers_per_round(self):\n d = {}\n for matches in self.event_data.get('competitions'):\n for match in matches:\n if not d.get(match.get('description')):\n d[match.get('description')] = []\n for golfer in match.get('competitors'):\n d.get(match.get('description')).append(golfer.get('athlete').get('id'))\n #if match.get('description') == 'Third Place':\n # print ('thrird: ', golfer.get('score').get('winner'))\n if match.get('description') == 'Third Place' and golfer.get('score').get('winner'):\n d['third'] = golfer.get('athlete').get('id')\n elif match.get('description') == 'Third Place' and not golfer.get('score').get('winner'):\n d['fourth'] = golfer.get('athlete').get('id')\n #elif match.get('description') == 'Championship' and golfer.get('score').get('winner'):\n elif match.get('description') == 'Finals' and golfer.get('score').get('winner'):\n d['first'] = golfer.get('athlete').get('id')\n #elif match.get('description') == 'Championship' and golfer.get('score').get('winner') == False:\n elif match.get('description') == 'Finals' and golfer.get('score').get('winner') == False:\n d['second'] = golfer.get('athlete').get('id')\n\n return d\n\n def get_mp_records(self):\n c = self.event_data.get('competitions')\n d = {'Wednesday Group Play': {'winners': [], 'losers': [], 'draws': []},\n 'Thursday Group Play': {'winners': [], 'losers': [], 'draws': []},\n 'Friday Group Play': {'winners': [], 'losers': [], 'draws': []},\n }\n\n for s in c:\n for m in s:\n if d.get(m.get('description')):\n if m.get('competitors')[0].get('status').get('type').get('id') == '2' and m.get('competitors')[0].get('score').get('draw'):\n d.get(m.get('description')).get('draws').append(m.get('competitors')[0].get('athlete').get('id'))\n d.get(m.get('description')).get('draws').append(m.get('competitors')[1].get('athlete').get('id'))\n elif m.get('competitors')[0].get('score').get('winner'):\n d.get(m.get('description')).get('winners').append(m.get('competitors')[0].get('athlete').get('id'))\n d.get(m.get('description')).get('losers').append(m.get('competitors')[1].get('athlete').get('id'))\n elif m.get('competitors')[1].get('score').get('winner'):\n d.get(m.get('description')).get('winners').append(m.get('competitors')[1].get('athlete').get('id'))\n d.get(m.get('description')).get('losers').append(m.get('competitors')[0].get('athlete').get('id'))\n return d\n\n def mp_golfer_results(self, golfer, records=None):\n '''takes a golfer object returns a list'''\n if not records:\n records = self.get_mp_records()\n wins = len([v.get('winners') for k,v in records.items() if golfer.espn_number in v.get('winners')])\n loss = len([v.get('losers') for k,v in records.items() if golfer.espn_number in v.get('losers')])\n draw = len([v.get('draws') for k,v in records.items() if golfer.espn_number in v.get('draws')])\n\n return [wins, loss, draw]\n\n\n def mp_group_rank(self, golfer, records=None):\n '''takes a field object'''\n if not records:\n records = self.get_mp_records()\n d = {}\n for f in Field.objects.filter(group=golfer.group):\n rec = self.mp_golfer_results(f.golfer, records)\n ranking = rec[0] + (rec[2] * .5) - rec[1]\n d[f.pk] = ranking\n\n r = {key: rank for rank, key in enumerate(sorted(set(d.values()), reverse=True), 1)}\n ranked_d = {k: r[v] for k,v in d.items()}\n\n return ranked_d\n\n def cut_count(self, group=None):\n '''gets cut golfers for a group'''\n golfers = group.get_golfers()\n\n if self.t.pga_tournament_num == '018':\n return len([x for x in self.field_data if (str(x.get('roster')[0].get('playerId')) in golfers or str(x.get('roster')[1].get('playerId')) in golfers) and x.get('status').get('type').get('id') == '3'])\n else:\n return len([x for x in self.field_data if x.get('id') in golfers and x.get('status').get('type').get('id') == '3'])\n\n def total_making_cut(self):\n return len([x for x in self.field_data if x.get('status').get('type').get('id') != '3']) - self.post_cut_wd()\n \n def hole_by_hole(self, espn_num):\n r = self.get_round()\n\n print (self.golfer_data(espn_num))\n return \n \n def zurich_golfer_rank(self, golfer):\n '''takes a golfer object returns a string'''\n pass","repo_name":"jflynn87/games","sub_path":"golf_app/espn_api.py","file_name":"espn_api.py","file_ext":"py","file_size_in_byte":30792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"435388792","text":"import os\n\nfrom flask import Flask, abort, g, jsonify\nfrom flask_cors import CORS\nfrom supertokens_python import (\n get_all_cors_headers,\n init,\n)\nfrom supertokens_python.framework.flask import Middleware\nfrom supertokens_python.recipe.session.framework.flask import verify_session\nimport config\n\ninit(\n supertokens_config=config.supertokens_config,\n app_info=config.app_info,\n framework=config.framework,\n recipe_list=config.recipe_list,\n)\n\napp = Flask(__name__)\n\nMiddleware(app)\nCORS(\n app=app,\n supports_credentials=True,\n origins=\"http://localhost:3000\",\n allow_headers=[\"Content-Type\"] + get_all_cors_headers(),\n)\n\n\n@app.route(\"/sessioninfo\", methods=[\"GET\"]) # type: ignore\n@verify_session()\ndef get_session_info():\n session_ = g.supertokens\n return jsonify(\n {\n \"sessionHandle\": session_.get_handle(),\n \"userId\": session_.get_user_id(),\n \"accessTokenPayload\": session_.get_access_token_payload(),\n }\n )\n\n\n# This is required since if this is not there, then OPTIONS requests for\n# the APIs exposed by the supertokens' Middleware will return a 404\n@app.route(\"/\", defaults={\"u_path\": \"\"}) # type: ignore\n@app.route(\"/\") # type: ignore\ndef catch_all(u_path: str): # pylint: disable=unused-argument\n abort(404)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=int(\"3001\"), debug=True)\n","repo_name":"supertokens/dashboard","sub_path":"server/python/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"74021831874","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport re\n\ntry:\n import urlparse\n from SocketServer import ThreadingMixIn\n from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nexcept ImportError:\n import urllib.parse as urlparse\n from socketserver import ThreadingMixIn\n from http.server import BaseHTTPRequestHandler, HTTPServer\n\n\nclass ServerHandler(BaseHTTPRequestHandler):\n protocol_version = \"HTTP/1.1\"\n get_routes = []\n\n url_clean_regex = ((re.compile(r\"\\\\\"), \"/\"), (re.compile(r\"/{2,}\"), \"/\"))\n url_placeholders_patterns = ((re.escape(\"{w}\"), \"([^/]+)\"), (re.escape(\"{p}\"), \"(.+)\"))\n\n @classmethod\n def add_get_route(cls, pattern, handle):\n cls.get_routes.append((cls.generate_pattern(pattern), handle))\n\n @classmethod\n def generate_pattern(cls, s):\n pattern = s\n for regex, repl in cls.url_clean_regex:\n pattern = regex.sub(repl, pattern)\n pattern = re.escape(pattern)\n for p in cls.url_placeholders_patterns:\n pattern = pattern.replace(*p)\n return re.compile(pattern + \"$\")\n\n # noinspection PyPep8Naming\n def do_GET(self):\n self._handle_request(self.get_routes)\n\n def _handle_request(self, routes):\n try:\n self.url = urlparse.urlparse(self.path)\n self.query = dict(urlparse.parse_qsl(self.url.query))\n\n self.url_path = self.url.path\n for r, s in self.url_clean_regex:\n self.url_path = r.sub(s, self.url_path)\n\n for pattern, handler in routes:\n match = pattern.match(self.url_path)\n if match:\n handler(self, *match.groups())\n break\n else:\n self.send_response_and_end(404)\n except Exception as e:\n logging.error(e, exc_info=True)\n self.send_response_and_end(500)\n\n def log_message(self, fmt, *args):\n logging.debug(fmt, *args)\n\n def send_response_with_data(self, data, content_type, code=200):\n self.send_response(code)\n self.send_header(\"Content-Type\", content_type)\n self.send_header(\"Content-Length\", str(len(data)))\n self.end_headers()\n self.wfile.write(data)\n\n def send_response_and_end(self, code, message=None):\n self.send_response(code, message=message)\n self.send_header(\"Content-Length\", \"0\")\n self.end_headers()\n\n def send_redirect(self, url, code=301):\n self.send_response(code)\n self.send_header(\"Location\", url)\n self.send_header(\"Content-Length\", \"0\")\n self.end_headers()\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"\n Handle requests in a separate thread.\n \"\"\"\n daemon_threads = True\n\n\ndef threaded_http_server(host, port):\n return ThreadedHTTPServer((host, port), ServerHandler)\n\n\ndef add_get_route(pattern):\n def wrapper(func):\n ServerHandler.add_get_route(pattern, func)\n return func\n\n return wrapper\n","repo_name":"i96751414/repository.github","sub_path":"lib/httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"38862044168","text":"from typing import NamedTuple\nfrom bs4 import BeautifulSoup, NavigableString, Tag\n\n\nclass MovieInformationRaw(NamedTuple):\n title: NavigableString | Tag\n synopsis: NavigableString | Tag\n info_items: list[Tag]\n where_to_watch: list[Tag]\n\n\ndef find_element_by_data_qa(\n soup: BeautifulSoup,\n tag: str,\n data_qa_value: str,\n) -> Tag | NavigableString:\n return soup.find(tag, attrs={\"data-qa\": data_qa_value}) or Tag(name=\"\")\n\n\ndef find_elements_by_data_qa(\n soup: BeautifulSoup,\n tag: str,\n data_qa_value: str,\n) -> list[Tag]:\n return soup.findAll(tag, attrs={\"data-qa\": data_qa_value})\n\n\ndef extract_where_to_watch(\n soup: BeautifulSoup,\n) -> list[Tag]:\n return soup.findAll(\"where-to-watch-bubble\")\n\n\ndef extract_movie_info(\n soup: BeautifulSoup,\n) -> MovieInformationRaw:\n return MovieInformationRaw(\n title=find_element_by_data_qa(soup, \"h1\", \"score-panel-title\"),\n synopsis=find_element_by_data_qa(soup, \"p\", \"movie-info-synopsis\"),\n info_items=find_elements_by_data_qa(soup, \"li\", \"movie-info-item\"),\n where_to_watch=extract_where_to_watch(soup),\n )\n\n\ndef extract_info_items_values(\n info_items: list[Tag],\n) -> dict[str, str]:\n extracted_info = {}\n\n for item in info_items: # To be a list chomp later.\n label_element = item.find(attrs={\"data-qa\": \"movie-info-item-label\"})\n value_element = item.find(attrs={\"data-qa\": \"movie-info-item-value\"})\n\n label_text = (\n label_element.get_text().rstrip() if label_element else None\n )\n value_text = (\n value_element.get_text().rstrip() if value_element else None\n )\n\n if label_text is not None:\n extracted_info[label_text] = value_text\n\n return extracted_info\n","repo_name":"finger-guns/whereisit","sub_path":"formatter/src/operations/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34932901109","text":"from flask import Blueprint, render_template, request\nfrom functions import search_posts\nimport logging\n\nlogging.basicConfig(filename=\"basic.log\", level=logging.INFO, encoding=\"utf8\")\n\n# блюпринт для главной страницы\nmain_blueprint = Blueprint('main_blueprint', __name__)\n\n\n@main_blueprint.route('/')\ndef main_page():\n return render_template('index.html')\n\n\n# страница с результатами поиска по слову\n@main_blueprint.route('/search')\ndef search_page():\n s = request.args['s']\n logging.info(f\"Выполняется поиск по слову {s}\")\n posts = search_posts(s)\n return render_template('post_list.html', posts=posts, s=s)\n","repo_name":"tanyayogini/Flask-Post-Project","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25508109573","text":"from turtle import Turtle, Screen, color\nimport random\n\nis_race_on = False \nscreen = Screen()\nscreen.setup(width=500, height=400)\ncolors = (\"red\", \"orange\", \"yellow\", \"blue\", \"green\")\nturtles = []\n\nfor index in range(0, 5):\n turtles.append(Turtle(shape=\"turtle\", visible=False))\n turtles[index].color(colors[index])\n turtles[index].pu()\n turtles[index].goto(x=-230, y=100 - (index * 50))\n turtles[index].showturtle()\n\n\nuser_bet = screen.textinput(\"Place your bet\", \"Who will win? Enter a color:\")\n\nif user_bet:\n is_race_on = True\n\nwhile is_race_on:\n random_distance = random.randint(0,10)\n turtle_index = random.randint(0, 4)\n turtles[turtle_index].forward(random_distance)\n if turtles[turtle_index].xcor() >= 230:\n winner_color = turtles[turtle_index].pencolor()\n if user_bet == winner_color:\n print(\"You've won!\")\n else:\n print(\"You've lost!\")\n is_race_on = False\n\nscreen.exitonclick()","repo_name":"sdearth/pythoncourse","sub_path":"day019/race.py","file_name":"race.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13177375537","text":"from pydub import AudioSegment\nfrom scipy.io.wavfile import write\nimport musical_scales\nimport random\nimport numpy as np\nimport string\n\noctave = [\"C\", \"C#\", \"D\", \"D#\", \"E\", \"F\", \"F#\", \"G\", \"G#\", \"A\", \"A#\", \"B\"]\nsamplerate = 44100\n\ndef get_piano_notes():\n base_freqs = {\"0\": 16.35, \"1\": 32.70, \"2\": 65.41, \"3\": 130.81, \"4\": 261.63, \"5\": 523.25, \"6\": 1046.50, \"7\": 2093.00, \"8\": 4186.01}\n base_freq = 261.63 #Frequency of Note C4\n note_freqs = {}\n for i in range(len(octave)):\n note_freqs[octave[i]] = base_freq * pow(2,(i/12))\n for freq in base_freqs.keys():\n for i in range(len(octave)):\n note_freqs[octave[i] + freq] = base_freqs[freq] * pow(2,(i/12))\n note_freqs[''] = 0.0\n note_freqs['blank'] = 0.0\n return note_freqs\n \ndef get_wave(freq, duration=0.5):\n amplitude = 4096\n t = np.linspace(0, duration, int(samplerate * duration))\n wave = amplitude * np.sin(2 * np.pi * freq * t)\n \n return wave\n \ndef get_song_data(music_notes):\n note_freqs = get_piano_notes()\n song = []\n for note in music_notes.split(\"-\"):\n if \":\" in note:\n notesplit = note.split(\":\")\n note = notesplit[0]\n duration = float(notesplit[1])\n else:\n duration = 0.5\n song.append(get_wave(note_freqs[note], duration=duration))\n song = np.concatenate(song)\n return song.astype(np.int16)\n \ndef get_chord_data(chords):\n chords = chords.split('-')\n note_freqs = get_piano_notes()\n chord_data = []\n for chord in chords:\n if \":\" in chord:\n chordsplit = chord.split(\":\")\n chord = chordsplit[0]\n duration = float(chordsplit[1])\n else:\n duration = 0.5\n data = sum([get_wave(note_freqs[note], duration=duration) for note in chord.split(\"+\")])\n chord_data.append(data)\n chord_data = np.concatenate(chord_data, axis=0) \n return chord_data.astype(np.int16)\n\ndef save_song_data(music_notes, filename):\n data = get_song_data(music_notes)\n data = data * (16300/np.max(data))\n write(filename, samplerate, data.astype(np.int16))\n\ndef save_chord_data(chords, filename):\n data = get_chord_data(chords)\n data = data * (16300/np.max(data))\n write(filename, samplerate, data.astype(np.int16))\n\ndef join_audio(sounds, output_path):\n sound1 = sounds[0]\n sound1 = AudioSegment.from_wav(sound1)\n sound2 = sounds[1]\n sound2 = AudioSegment.from_wav(sound2)\n combined = sound1.overlay(sound2)\n for i in range(2):\n del sounds[0]\n for sound in sounds:\n sound = AudioSegment.from_wav(sound)\n combined = combined.overlay(sound)\n combined.export(output_path, format='wav')\n\ndef melody_maker(tonality):\n scale_tonic = random.choice(octave)\n if tonality == \"major\":\n print(\"major\")\n scale = musical_scales.scale(scale_tonic)\n else:\n print(\"minor\")\n scale = musical_scales.scale(scale_tonic, \"harmonic minor\")\n length = random.choice([0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1.5, 1.5, 2])\n first_note = random.choice(scale)\n length_melody = length\n melody = f\"{first_note}:{length}-\"\n while length_melody < 13:\n length = random.choice([0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1.5, 1.5, 2])\n note = random.choice(scale)\n melody += f\"{note}:{length}-\"\n length_melody += length\n length = random.choice([0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1.5, 1.5, 2])\n melody += f\"{first_note}:{length}\"\n return melody\n\ndef random_id():\n characters = string.ascii_letters + string.digits\n password = ''.join(random.choice(characters) for i in range(15))\n return password","repo_name":"VulcanWM/melody-maker-python","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15048681978","text":"import pytest\nimport numpy as np\nfrom nskit import NA, NucleicAcid\nfrom nskit.exceptions import InvalidStructure, InvalidAdjacency\n\n\nasymmetric_adjacency = np.array([\n [0, 0, 1], \n [0, 0, 0], \n [0, 0, 0], \n])\n\nvalues_bt1 = np.array([\n [0, 0, 2], \n [0, 0, 0], \n [2, 0, 0], \n])\n\nvalues_lt0 = np.array([\n [0, 0, -1], \n [0, 0, 0], \n [-1, 0, 0], \n])\n\nvalues_float = np.array([\n [0, 0, 1.1], \n [0, 0, 0], \n [1.1, 0, 0], \n])\n\nmultiple_bonds = np.array([\n [0, 0, 1, 0, 1], \n [0, 0, 0, 0, 0], \n [1, 0, 0, 0, 0], \n [0, 0, 0, 0, 0], \n [1, 0, 0, 0, 0], \n])\n\n\nclass TestAdjacency:\n\n @pytest.mark.parametrize(\n \"struct\",\n [\n '....', \n '..((....))..',\n '..(((...))..).',\n '..((.[[.)).].]',\n '([{)]}',\n '(((..)))'\n ]\n )\n def test_recreation(self, struct):\n na1 = NA(struct)\n na2 = NucleicAcid.from_adjacency(na1.get_adjacency())\n assert na1.struct==na2.struct\n\n\n def test_asymmetry(self):\n with pytest.raises(InvalidAdjacency):\n _ = NucleicAcid.from_adjacency(asymmetric_adjacency)\n\n @pytest.mark.parametrize(\n \"adj\",\n [\n values_bt1,\n values_lt0,\n values_float\n ]\n )\n def test_values(self, adj):\n with pytest.raises(InvalidAdjacency):\n _ = NucleicAcid.from_adjacency(adj)\n\n\n def test_multiple_bonds(self):\n with pytest.raises(InvalidAdjacency):\n _ = NucleicAcid.from_adjacency(multiple_bonds)\n\n \n \n \n \n \n ","repo_name":"Arty40m/nskit","sub_path":"test/test_adjacency.py","file_name":"test_adjacency.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22080126844","text":"import numpy as np\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport os\nimport csv\nfrom CESP.data_loading.data_io import create_directories, backup_history\nfrom CESP.utils.plotting import plot_validation\nfrom CESP.evaluation.detailed_validation import detailed_validation\n\n\ndef cross_validation(sample_list, model, k_fold=3, epochs=20,\n iterations=None, evaluation_path=\"evaluation\",\n draw_figures=False, run_detailed_evaluation=False,\n callbacks=[], save_models=True, return_output=False):\n \"\"\"\n Function for an automatic k-fold Cross-Validation of the Neural Network model by\n running the whole pipeline several times with different data set combinations.\n\n :param sample_list: A list of sample indicies which will be used for validation.\n :param model: Instance of a Neural Network model class instance.\n :param k_fold: The number of k-folds for the Cross-Validation. By default, a 3-fold Cross-Validation is performed.\n :param epochs: Number of epochs. A single epoch is defined as one iteration through the complete data set.\n :param iterations: Number of iterations (batches) in a single epoch.\n :param evaluation_path: Path to the evaluation data directory. This directory will be created and used for storing\n all kinds of evaluation results during the validation processes.\n :param draw_figures: Option if evaluation figures should be automatically plotted in the evaluation directory.\n :param run_detailed_evaluation: Option if a detailed evaluation (additional prediction) should be performed.\n :param callbacks: A list of Callback classes for custom evaluation.\n :param save_models: Option if fitted models should be stored or thrown away.\n :param return_output: Option, if computed evaluations will be output as the return of this function or\n if the evaluations will be saved on disk in the evaluation directory.\n :return:\n \"\"\"\n # Initialize result cache\n if return_output : validation_results = []\n # Randomly permute the sample list\n samples_permuted = np.random.permutation(sample_list)\n # Split sample list into folds\n folds = np.array_split(samples_permuted, k_fold)\n fold_indices = list(range(len(folds)))\n # Start cross-validation\n for i in fold_indices:\n # Reset Neural Network model weights\n model.reset_weights()\n # Subset training and validation data set\n training = np.concatenate([folds[x] for x in fold_indices if x!=i],\n axis=0)\n validation = folds[i]\n # Initialize evaluation subdirectory for current fold\n subdir = create_directories(evaluation_path, \"fold_\" + str(i))\n # Save model for each fold\n cb_model = ModelCheckpoint(os.path.join(subdir, \"model.hdf5\"),\n monitor=\"val_loss\", verbose=1,\n save_best_only=True, mode=\"min\")\n if save_models == True : cb_list = callbacks + [cb_model]\n else : cb_list = callbacks\n # Run training & validation\n history = model.evaluate(training, validation, epochs=epochs,\n iterations=iterations, callbacks=cb_list)\n # Backup current history dictionary\n if return_output : validation_results.append(history.history)\n else : backup_history(history.history, subdir)\n # Draw plots for the training & validation\n if draw_figures:\n plot_validation(history.history, model.metrics, subdir)\n # Make a detailed validation of the current cv-fold\n if run_detailed_evaluation:\n detailed_validation(validation, model, subdir)\n # Return the validation results\n if return_output : return validation_results\n\n\ndef split_folds(sample_list, k_fold=3, evaluation_path=\"evaluation\"):\n \"\"\"\n Function for splitting a data set into k-folds. The splitting will be saved\n in files, which can be used for running a single fold run.\n In contrast to the normal cross_validation() function, this allows running\n folds parallelized on multiple GPUs.\n :param sample_list: A list of sample indicies which will be used for validation.\n :param k_fold: The number of k-folds for the Cross-Validation. By default, a 3-fold Cross-Validation is performed.\n :param evaluation_path:\n :return:\n \"\"\"\n # Randomly permute the sample list\n samples_permuted = np.random.permutation(sample_list)\n # Split sample list into folds\n folds = np.array_split(samples_permuted, k_fold)\n fold_indices = list(range(len(folds)))\n # Iterate over each fold\n for i in fold_indices:\n # Subset training and validation data set\n training = np.concatenate([folds[x] for x in fold_indices if x!=i],\n axis=0)\n validation = folds[i]\n # Initialize evaluation subdirectory for current fold\n subdir = create_directories(evaluation_path, \"fold_\" + str(i))\n fold_cache = os.path.join(subdir, \"sample_list.csv\")\n # Write sampling to disk\n write_fold2csv(fold_cache, training, validation)\n\n\ndef run_fold(fold, model, epochs=20, iterations=None, evaluation_path=\"evaluation\", draw_figures=True, callbacks=[],\n save_models=True):\n \"\"\"\n Function for running a single fold of a cross-validation. In contrast to the normal cross_validation() function,\n this allows running folds parallelized on multiple GPUs.\n :param fold: The integer of the desired fold, which should be validated (starting with 0).\n :param model: Instance of a Neural Network model class instance.\n :param epochs: Number of epochs. A single epoch is defined as one iteration through the complete data set.\n :param iterations: Number of iterations (batches) in a single epoch.\n :param evaluation_path: Path to the evaluation data directory. This directory will be created and\n used for storing all kinds of evaluation results during the validation processes.\n :param draw_figures: Option if evaluation figures should be automatically plotted in the evaluation directory.\n :param callbacks: A list of Callback classes for custom evaluation.\n :param save_models: Option if fitted models should be stored or thrown away.\n :return:\n \"\"\"\n # Load sampling fold from disk\n fold_path = os.path.join(evaluation_path, \"fold_\" + str(fold),\n \"sample_list.csv\")\n training, validation = load_csv2fold(fold_path)\n # Reset Neural Network model weights\n model.reset_weights()\n # Initialize evaluation subdirectory for current fold\n subdir = os.path.join(evaluation_path, \"fold_\" + str(fold))\n # Save model for each fold\n cb_model = ModelCheckpoint(os.path.join(subdir, \"model.hdf5\"),\n monitor=\"val_loss\", verbose=1,\n save_best_only=True, mode=\"min\")\n if save_models == True : cb_list = callbacks + [cb_model]\n else : cb_list = callbacks\n # Run training & validation\n history = model.evaluate(training, validation, epochs=epochs,\n iterations=iterations, callbacks=cb_list)\n # Backup current history dictionary\n backup_history(history.history, subdir)\n # Draw plots for the training & validation\n if draw_figures:\n plot_validation(history.history, model.metrics, subdir)\n\n\ndef write_fold2csv(file_path, training, validation):\n \"\"\"\n Subfunction for writing a fold sampling to disk\n :param file_path:\n :param training:\n :param validation:\n :return:\n \"\"\"\n with open(file_path, \"w\") as csvfile:\n writer = csv.writer(csvfile, delimiter=\" \")\n writer.writerow([\"TRAINING:\"] + list(training))\n writer.writerow([\"VALIDATION:\"] + list(validation))\n\ndef load_csv2fold(file_path):\n \"\"\"\n Subfunction for loading a fold sampling from disk\n :param file_path:\n :return:\n \"\"\"\n training = None\n validation = None\n with open(file_path, \"r\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\" \")\n for row in reader:\n if not training : training = row[1:]\n else : validation = row[1:]\n return training, validation\n","repo_name":"UriShavit/CystEarlyStagePrediction","sub_path":"CESP/evaluation/cross_validation.py","file_name":"cross_validation.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11608333442","text":"# Human, dog and cat recognition from a dataset + data training for recognition -----\r\n#In order to train the data, follow the steps in the video from minute 1:5:00 (https://www.youtube.com/watch?v=OxTOzSr2NZ0)\r\n\r\n\r\n\r\n#import NumPy , OpenCV and matplotlib\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\n# Load dog, human and cat recognition files and store them in separate variables\r\nface_cascade=cv2.CascadeClassifier('mydogdetector.xml')\r\nface_cascade2=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\nface_cascade3=cv2.CascadeClassifier('mycatdetector2.xml')\r\n\r\n# Read the image, convert it into a gray image, and show it\r\nimg=cv2.imread('family.bmp')\r\ngray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\r\n# Determine the type of font that will be used to write on the image\r\nfont=cv2.FONT_HERSHEY_SIMPLEX\r\n\r\n# recognize the dogs, human, and cats in the images \r\nfaces=face_cascade.detectMultiScale(gray,1.345,5,75)\r\nfaces2=face_cascade2.detectMultiScale(gray,1.3,5)\r\nfaces3=face_cascade3.detectMultiScale(gray,1.3,2,75)\r\n\r\n# Draw a box around each dog discovered in the picture and write the word “dog” next to it\r\nfor(x,y,w,h) in faces:\r\n\timg=cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\r\n\tcv2.putText(img,'Dog',(x,y),font,0.9,(0,255,0),2)\r\n\r\n# Draw a square around each human being discovered in the picture and write the word \"Human\" next to it\r\nfor(z,v,b,n) in faces2:\r\n\timg=cv2.rectangle(img,(z,v),(z+b,v+n),(0,0,255),2)\r\n\tcv2.putText(img,'Human',(z,v),font,0.9,(0,0,255),2)\r\n\r\n# Draw a square around each cat discovered in the image and write the word \"Cat\" next to it\r\nfor(q,w,e,r) in faces3:\r\n\timg=cv2.rectangle(img,(q,w),(q+e,w+r),(255,0,0),2)\r\n\tcv2.putText(img,'Cat',(q,w),font,0.9,(255,0,0),2)\r\n\t\r\n#Change the order of the color channels in the image from BGR to RGB\r\np,l,m=cv2.split(img)\r\nimg=cv2.merge([m,l,p])\r\n\r\n# Show image \r\nplt.imshow(img)\r\nplt.show()\r\n\r\n# Waiting to press any key on the keyboard to close open windows\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n","repo_name":"Raneen111/Image-processing-with-opencv","sub_path":"AI10.py","file_name":"AI10.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8102978368","text":"import math\nimport time\n\nfrom scipy.stats import entropy\n\nfrom clocq.FaginsAlgorithm import FaginsThresholdAlgorithm\nfrom clocq.WikidataSearch import CandidateList, WikidataSearch\n\n\nclass TopkProcessor:\n \"\"\"\n Operator that computes the top-k KB items for\n one specific question word. There is one such operator\n for each question word, i.e. m parallel operators.\n \"\"\"\n\n def __init__(\n self,\n kb,\n wiki2vec,\n connectivity_graph,\n coherence_graph,\n question_word_index,\n question_words,\n h_match=0.4,\n h_rel=0.3,\n h_conn=0.2,\n h_coh=0.1,\n d=20,\n k=\"AUTO\",\n wikidata_search_cache=None,\n verbose=False,\n ):\n self.kb = kb\n self.verbose = verbose\n # initialize question words\n self.question_word_index = question_word_index\n self.question_words = question_words\n self.question_word = question_words[question_word_index]\n self.number_of_question_words = len(question_words)\n # initialize required structures\n self.connectivity_graph = connectivity_graph\n self.coherence_graph = coherence_graph\n self.wiki2vec = wiki2vec\n # used for computing k (if applicable)\n self._initialize_item_retrieval(d, wikidata_search_cache)\n # hyperparameters\n self.h_match = h_match\n self.h_rel = h_rel\n self.h_conn = h_conn\n self.h_coh = h_coh\n # other parameters\n self.d = d\n self.k = k\n # internal variable\n self.top_k = (\n None # top-k list as returned by FaginsAlgorithm.apply() method, structure: [score, id, score[1-4]]\n )\n # initialize candidate list\n self.candidate_list = CandidateList(\n self.question_word, kb, list_depth=d, wikidata_search_cache=wikidata_search_cache\n )\n # priority queues for individual scores\n self.queue_matching_score = list()\n self.queue_connectivity_score = list()\n self.queue_relevance_score = list()\n self.queue_coherence_score = list()\n # set k automatically for question word\n if k == \"AUTO\":\n self.k = self._set_k()\n else:\n self.k = int(k)\n\n def _initialize_item_retrieval(self, depth, wikidata_search_cache):\n \"\"\"\n Initialize a Wikidata search. The search can be initialized\n with existing search results for (better) reproducibility of results.\n \"\"\"\n if wikidata_search_cache:\n self.search = WikidataSearch(depth, wikidata_search_cache)\n else:\n self.search = WikidataSearch(depth)\n\n def add_candidates_to_graph(self):\n \"\"\"Add candidate KB items to graphs (connectivity and coherence).\"\"\"\n # check if candidates already initialized (in k=AUTO setting)\n if not self.candidate_list.get_items():\n self.candidate_list.initialize()\n # add items to graphs\n for node in self.candidate_list.get_items():\n self.connectivity_graph.add_node(node, self.question_word_index)\n self.coherence_graph.add_node(node, self.question_word_index)\n\n def get_candidates(self):\n \"\"\"Return all candidate KB items (left) in the list.\"\"\"\n return self.candidate_list.get_items()\n\n def _set_k(self):\n \"\"\"\n Determine the k parameter for the given question word.\n The current implementation is based on the ambiguity of the word,\n which relates to the uncertainty of the disambiguation.\n This uncertainty is computed by the entropy of the frequency\n distribution of candidate KB items in the KB.\n \"\"\"\n self.candidate_list.initialize()\n search_result = self.candidate_list.get_items()\n frequencies = list()\n # determine frequencies\n for item in search_result:\n freqs = self.kb.get_frequency(item)\n freq = sum(freqs)\n frequencies.append(freq)\n sum_frequency = sum(frequencies)\n if sum_frequency == 0:\n k = 0\n return k\n # transform to probabilities\n probabilities = [float(freq) / float(sum_frequency) for freq in frequencies]\n ent = entropy(probabilities, base=2)\n # compute k\n k = math.floor(ent) + 1\n return k\n\n def initialize_scores(self):\n \"\"\"\n Creates a list for each score, in which KB items are\n sorted in score-descending order.\n \"\"\"\n start = time.time()\n other_question_words = [\n word for i, word in enumerate(self.question_words) if not i == self.question_word_index\n ]\n other_question_words_vectors = self.wiki2vec.get_word_vectors(other_question_words)\n for i in range(self.d):\n item = self.candidate_list.scan()\n if item is None:\n break\n item, score = item\n # matching\n matching_score = score\n # matching_score = self.wiki2vec.matching(item, self.question_word) # alternative to 1/rank\n matching_score = round(matching_score, 4)\n self.queue_matching_score.append((item, matching_score))\n # relevance\n relevance_score = self.wiki2vec.get_question_relevance_score(item, other_question_words_vectors)\n relevance_score = round(relevance_score, 4)\n self.queue_relevance_score.append((item, relevance_score))\n # connectivity\n connectivity_score, max_weights = self.connectivity_graph.get_single_connectivity_score(\n item, self.number_of_question_words, self.question_word_index\n )\n connectivity_score = round(connectivity_score, 4)\n self.queue_connectivity_score.append((item, connectivity_score))\n # coherence\n coherence_score, max_weights = self.coherence_graph.get_single_coherence_score(\n item, self.number_of_question_words, self.question_word_index\n )\n coherence_score = round(coherence_score, 4)\n self.queue_coherence_score.append((item, coherence_score))\n # sort the individual queues\n self.queue_matching_score = sorted(self.queue_matching_score, key=lambda j: j[1], reverse=True)\n self.queue_relevance_score = sorted(self.queue_relevance_score, key=lambda j: j[1], reverse=True)\n self.queue_connectivity_score = sorted(self.queue_connectivity_score, key=lambda j: j[1], reverse=True)\n self.queue_coherence_score = sorted(self.queue_coherence_score, key=lambda j: j[1], reverse=True)\n self._print_verbose(f\"Time (initialize_scores): {time.time() - start}\")\n\n def compute_top_k(self, connectivity_graph, coherence_graph):\n \"\"\"\n Compute the top-k KB items for the question term, given the\n connectivity graph, coherence graph and initialized matching\n and coherence scores.\n First, the queues are established and sorted in score-descending\n order, then Fagin's Threshold Algorithm (TA) is applied.\n \"\"\"\n self.connectivity_graph = connectivity_graph\n self.coherence_graph = coherence_graph\n self.initialize_scores()\n start = time.time()\n fagins = FaginsThresholdAlgorithm()\n self.top_k = fagins.apply(\n self.queue_matching_score,\n self.queue_relevance_score,\n self.queue_connectivity_score,\n self.queue_coherence_score,\n (self.h_match, self.h_rel, self.h_conn, self.h_coh),\n k=self.k,\n )\n self._print_verbose(f\"Time (FaginsAlgorithm) {time.time() - start}\")\n\n def get_top_k(self):\n \"\"\"Returns the top-k KB items for the question term.\"\"\"\n return self.top_k\n\n def scan(self):\n \"\"\"Returns the next top-k KB item for the question term.\"\"\"\n return self.top_k.pop()\n\n def _print_verbose(self, string):\n \"\"\"Print only if verbose is set.\"\"\"\n if self.verbose:\n print(string)\n","repo_name":"GracePeterMutiibwa/CLOCQ","sub_path":"clocq/TopkProcessor.py","file_name":"TopkProcessor.py","file_ext":"py","file_size_in_byte":8071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23636620081","text":"#!/bin/python\r\n\r\nimport fileinput\r\n\r\nsampleOut = \"qa zoo our language is impossible to understand there are twenty six factorial possibilities so it is okay if you want to just give up\"\r\nsampleIn = \"zy qee ejp mysljylc kd kxveddknmc re jsicpdrysi rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd de kr kd eoya kw aej tysr re ujdr lkgc jv\"\r\n\r\ntrans = str.maketrans(sampleIn, sampleOut)\r\n\r\nlines = fileinput.input(files=['tongues.in'])\r\nN = int(lines[0])\r\nfor index in range(1, N + 1):\r\n line = lines[index].rstrip()\r\n out = line.translate(trans)\r\n print('Case #%d: %s' % (index, out))\r\nlines.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/841.py","file_name":"841.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5700646836","text":"# SW Expert Academy - 1949. [모의 SW 역량테스트] 등산로 조성\n\n\ndef dfs(x, y, cut, length):\n global answer\n # 길이 최대인지 지속 체크 \n answer = max(answer, length)\n visited[x][y] = True # 방문\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or nx >= N or ny < 0 or ny >= N:\n continue\n \n if not visited[nx][ny]:\n # 등산로 조성 시 현재보다 길이가 짧다면 진행\n if board[x][y] > board[nx][ny]:\n dfs(nx, ny, cut, length + 1)\n else:\n # 다음 위치 값이 현재 값보다 크고, 다음 위치 값 - 현재 위치 값 < 최대 깎을 수 있는 값\n # 위와 같이 성립하고 깎지 않은 경우에는 \"최대 길이의 등산로를 조성\"해야하므로 \n # 다음 위치 값에 현재 위치 값 - 1으로 깎은 후 진행한다\n if cut == 0 and board[nx][ny] - board[x][y] < K:\n original = board[nx][ny] # 등산로 조성한 이후에 값을 다시 바꿔줘야 하므로 변수에 저장\n board[nx][ny] = board[x][y] - 1\n dfs(nx, ny, 1, length + 1)\n board[nx][ny] = original\n visited[x][y] = False\n\n\nT = int(input())\n# 상하좌우\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\nfor tc in range(1, T + 1):\n N, K = map(int, input().split())\n board = [list(map(int, input().split())) for _ in range(N)]\n visited = [[False] * N for _ in range(N)]\n max_list = []\n max_val = 0\n answer = 0\n for i in range(N):\n max_val = max(max_val, max(board[i]))\n for i in range(N):\n for j in range(N):\n if board[i][j] == max_val:\n max_list.append((i, j))\n for x, y in max_list:\n dfs(x, y, False, 0)\n print('#{} {}'.format(tc, answer + 1))\n","repo_name":"wnstj-yang/Algorithm","sub_path":"SWEA/SWEA_1949.py","file_name":"SWEA_1949.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10165051535","text":"from django.urls import path, re_path\nfrom .views import HomePageView, LakesView, catchall, lakes_list_view, lakes_geojson_view, contours_geojson_view\n\napp_name = 'lakes'\n\nurlpatterns = [\n #path('',HomePageView.as_view(), name='home'),\n path('lakes/', LakesView.as_view(), name='lakes'),\n path('lakes/list', lakes_list_view, name='lakes_list'),\n path('lakes/geojson', lakes_geojson_view, name='lakes_geojson'),\n path('contours//geojson', contours_geojson_view, name='contours'),\n re_path(r'', catchall),\n]\n","repo_name":"donohoea/xander_lakes","sub_path":"backend/lakes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36150352009","text":"import os\n\nopts = lux.getImportOptions()\nprint(opts)\n\n\nopts['accurate_tessellation'] = False\nopts['adjust_camera_look_at'] = False\nopts['adjust_environment'] = True\nopts['applyLibraryMaterials'] = False\nopts['camera_import'] = True\nopts['center_geometry'] = True\nopts['compute_normals'] = True\nopts['frame'] = 0\nopts['geometry_scale'] = 10\nopts['geometry_units'] = 1000.0\nopts['group_by'] = 2\nopts['group_by_shader'] = False\nopts['include_hidden_surfaces'] = False\nopts['include_nurbs'] = False\nopts['include_single_surfaces'] = True\nopts['material_name_from_color'] = False\nopts['mayaForceVersion'] = ''\nopts['merge_groups'] = False\nopts['merge_objects'] = False\nopts['new_import'] = False\nopts['retain_materials'] = True\nopts['same_coordinates'] = True\nopts['separate_materials'] = True\nopts['separate_parts'] = True\nopts['snap_to_ground'] = True\nopts['tessellation_quality'] = 0.20000000298023224\nopts['up_vector'] = 1\nopts['update_mode'] = False\n\n\ncwd = os.getcwd()\nprint(cwd) # print current working directory\n\nfor root, dirs, files in os.walk(\".\", topdown=False):\n for name in files:\n file_path = os.path.join(root, name)\n if file_path.endswith('.obj'):\n \tprint( 'Loading file ' + file_path + '\\n' )\n \tlux.importFile(file_path, opts = opts)\n\n\n\n\n","repo_name":"xuhaocuhk/PointCloudRenderingScript","sub_path":"batch_import_keyshot.py","file_name":"batch_import_keyshot.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74191339073","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\n# from matplotlib import pyplot as plt\r\n# from matplotlib import gridspec\r\nimport numpy as np\r\nimport args\r\nfrom dataset.load_data import load_data\r\n# from torchsummary import summary\r\n\r\n# torch.cuda.set_device(0)\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\nprint(device)\r\nbatch_size = args.batch_size\r\nepochs = args.epochs\r\n\r\ntrain_loader, test_loader = load_data('oppo')\r\n\r\nimage_channels = 1\r\nshow_im = False\r\nalpha = 1\r\nenc_shape = []\r\n\r\n\r\nclass DeepConvLSTM(torch.nn.Module):\r\n def __init__(self, image_channels, n_classes):\r\n super(DeepConvLSTM, self).__init__()\r\n self.cnn = nn.Sequential(\r\n nn.Conv2d(image_channels, 64, kernel_size=(5, 1), stride=(1, 1)),\r\n nn.ReLU(),\r\n # nn.BatchNorm2d(64),\r\n nn.Conv2d(64, 64, kernel_size=(5, 1), stride=(1, 1)),\r\n nn.ReLU(),\r\n # nn.BatchNorm2d(64),\r\n nn.Conv2d(64, 64, kernel_size=(5, 1), stride=(1, 1)),\r\n nn.ReLU(),\r\n # nn.BatchNorm2d(64),\r\n nn.Conv2d(64, 64, kernel_size=(5, 1), stride=(1, 1)),\r\n nn.ReLU(),\r\n # nn.BatchNorm2d(64),\r\n )\r\n self.lstm = nn.LSTM(\r\n input_size=113 * 64,\r\n hidden_size=128,\r\n num_layers=2,\r\n batch_first=True\r\n )\r\n self.fc = nn.Linear(128, n_classes)\r\n\r\n def forward(self, x):\r\n cnn_x = self.cnn(x)\r\n # print(cnn_x.shape)\r\n cnn_x = cnn_x.transpose(dim0=1, dim1=2)\r\n # print(cnn_x.shape)\r\n cnn_x = cnn_x.reshape([-1, 8, 64 * 113])\r\n # print(cnn_x.shape)\r\n lstm_x, (h_n, c_n) = self.lstm(cnn_x)\r\n # print(lstm_x.shape)\r\n z = self.fc(lstm_x[:, -1, :])\r\n # print(z.shape)\r\n return F.log_softmax(z, dim=1)\r\n\r\n\r\ncnn = DeepConvLSTM(image_channels=image_channels, n_classes=args.n_classes).cuda()\r\n# model.load_state_dict(torch.load('vae.torch', map_location='cpu'))\r\noptimizer = torch.optim.Adam([{\"params\": cnn.parameters()}],\r\n lr=1e-3,\r\n weight_decay=0.01)\r\n# summary(cnn, (1, 128, 113), args.batch_size)\r\n# summary(classifier, 256)\r\nif __name__ == '__main__':\r\n print('The encoder model: \\n', cnn)\r\n loss_ = nn.NLLLoss()\r\n for epoch in range(epochs):\r\n for idx, (train_x, train_y) in enumerate(train_loader):\r\n cnn.train()\r\n\r\n train_x = train_x.cuda()\r\n train_y = train_y.cuda()\r\n train_y = train_y.long()\r\n prb = cnn(train_x)\r\n # print(images.shape, recon_images.shape, labels.shape, pred.shape)\r\n loss = loss_(prb, train_y)\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # if (epoch + 1) % 1 == 0:\r\n # to_print = \"Epoch[{}/{}] Loss: total-{:.3f} \". \\\r\n # format(epoch + 1,\r\n # epochs,\r\n # loss.item() / len(train_x))\r\n # print(to_print)\r\n\r\n if (epoch + 1) % 1 == 0:\r\n cnn.eval()\r\n correct = 0 # 初始化预测正确的数据个数为0\r\n for test_x, test_y in test_loader:\r\n test_x, test_y = test_x.cuda(), test_y.cuda()\r\n prb = cnn(test_x)\r\n pred = prb.data.max(1, keepdim=True)[1] # get the index of the max log-probability\r\n correct += pred.eq(test_y.data.view_as(pred)).cpu().sum() # 对预测正确的数据个数进行累加\r\n print('\\nIn epoch[{}] Test set: Accuracy: {}/{} ({:.2f}%)\\n'.format(\r\n epoch + 1, correct, len(test_loader.dataset),\r\n 100.0 * float(correct) / float(len(test_loader.dataset))))\r\n","repo_name":"KennCoder7/Self-Attention-for-HAR","sub_path":"Self_Attn/deep_conv_lstm.py","file_name":"deep_conv_lstm.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"29388644396","text":"import rake\nimport operator\n\n#have the text in the document \"test.txt\"\nwith open('output.txt', 'r') as myfile:\n text = myfile.read().replace('\\n', '')\n\n#using constraint where each keyword appears in text at least twice\nrake_object = rake.Rake(\"SmartStoplist.txt\", 3, 3, 2)\nkeywords = rake_object.run(text)\nprint(keywords)\n\n#using constraint where each keyword appears in text at least three times\nrake_object = rake.Rake(\"SmartStoplist.txt\", 3, 3, 3)\nkeywords = rake_object.run(text)\nprint(keywords)\n","repo_name":"brianzhan/LinksRelevantInfo","sub_path":"RAKE/rakeTest.py","file_name":"rakeTest.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3996634057","text":"import os\nfrom typing import *\nimport sys\n\nfrom PySide2.QtWidgets import QApplication, QWidget\nfrom PySide2.QtWebEngine import QtWebEngine\nimport citrine_client\n\nfrom citrine_ui import ui, util, interface_pkg, js_bridge, config\n\n\n_root = None # type: Optional[ui.MainWrapper]\n\n\ndef build_window() -> QWidget:\n global _root\n _root = ui.MainWrapper()\n _root.set_panel(ui.startup.StartupPage)\n _root.show()\n return _root\n\n\ndef get_root() -> ui.MainWrapper:\n return _root\n\n\ndef get_stylesheet():\n if config.get_config('ui.hidpi'):\n rname = 'citrine_hidpi.css'\n else:\n rname = 'citrine.css'\n with open(util.get_resource(rname), 'r') as in_f:\n style = in_f.read()\n return style\n\n\ndef init():\n interface_pkg.init_interfaces()\n util.init_threadpool()\n js_bridge.init_server()\n ui.error.init_errors()\n\n \ndef display_error(message: Union[str, citrine_client.errors.CitrineClientError, Dict]):\n _root.display_error(message)\n\n\ndef main():\n QtWebEngine.initialize()\n app = QApplication(sys.argv)\n app.setStyleSheet(get_stylesheet())\n init()\n # If you don't init the main window and exec the app in the same context, the whole thing freezes / refuses to start\n # I think it's doing some kind of frame introspection in exec_ to figure out how to assign things to the app\n # Super annoying, but if this is still here I haven't figured out how to get around it\n w = build_window()\n exit_code = app.exec_()\n print('Cleaning up...')\n sys.exit(exit_code)\n","repo_name":"antonpaquin/citrine","sub_path":"citrine-ui/citrine_ui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33304396632","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 11 09:22:09 2022\n\n@author: therm\n\"\"\"\n\n# Hall resistance\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.rcParams['font.size'] = 12.5\nfrom scipy.optimize import curve_fit\nimport math\n\ndef truncate(number, digits) -> float:\n stepper = 10.0 ** digits\n return math.trunc(stepper * number) / stepper\n\ndef linear(x, m, c):\n return m * x + c\n\ndef carrier_density(grad, grad_err, t, t_err=0):\n e = 1.6e-19\n density = abs(1/(grad * e * t))\n \n rel_grad_err = grad_err / grad\n rel_t_err = t_err / t\n tot_rel_err = rel_grad_err + rel_t_err\n \n density_err = density * tot_rel_err \n \n return density, density_err\n\ndef mobility(n,rho):\n e = 1.6e-19\n return 1 / (e * n * rho)\n\nresistivity = [5.35e-5, 2.91e-4, 8.229e-5]\n \n\ndF1 = pd.read_excel('N_Type_GaS_Hall_Resistance.xlsx')\nt1 = 3e-6\n\nmfs = np.array(dF1['Magnetic Field Strength (mT)']) / 1000\nhall_res = np.array(dF1['Hall_Res (ohm)']) *-1\nmfs_err = np.array(dF1['MFS Err (mT)']) / 1000 \nhall_res_err = np.array(dF1['Hall_Res_err (ohm)'])\n\npar1, cov1 = curve_fit(linear, mfs, hall_res, sigma = hall_res_err, absolute_sigma = True)\nfit1 = linear(mfs, *par1)\n\ngrad = truncate(par1[0], 3)\ngrad_err = truncate(cov1[0][0] ** 0.5, 5)\ninterp = truncate(par1[1], 3)\ninterp_err = truncate(cov1[1][1] ** 0.5, 5)\n\nrho, rho_err = carrier_density(par1[0], grad_err, t1)\n\ncar_den = truncate(rho, 3)\ncar_den_err = truncate(rho_err, 3)\n\nprint(\"Gradient:\", grad, grad_err)\nprint(\"Intercept:\", interp, interp_err)\nprint(\"Carrier Density:\", car_den, car_den_err)\n\nplt.errorbar(mfs, hall_res, yerr = hall_res_err, xerr = mfs_err, capsize = 2, fmt = 'o', label = 'N-Type GaAs Data')\nplt.plot(mfs, fit1, label = \"Fit\")\ntext_x = -0.3\ntext_y = -1\nplt.text(text_x, text_y, \"Gradient: {} $\\pm$ {}\".format(grad, grad_err), fontsize = 12)\nplt.text(text_x, text_y - 0.3, \"Intercept: {} $\\pm$ {}\".format(interp, interp_err), fontsize = 12)\n# plt.text(0.1, -1, \"Carrier Density: {}\".format(carrier_density), fontsize = 12)\n\nplt.minorticks_on()\nplt.xlabel(\"Magnetic Field Strength (T)\")\nplt.ylabel(\"Hall Resistance ($\\Omega$)\")\nplt.grid(which = 'minor', alpha = 0.2)\nplt.grid(which = 'major')\nplt.legend()\nplt.savefig('N_type_GaAs_HallPlot',dpi = 300, bbox_inches=\"tight\")\nplt.show()\n\n\n#%%\n\"\"\"\nNow for P-Type GaS\n\"\"\"\n\ndF2 = pd.read_excel('P_Type_GaS_Hall_Resistance.xlsx')\nt2 = 2.7e-6\n\nmfs = np.array(dF2['Magnetic Field Strength (mT)']) / 1000\nhall_res_P = np.array(dF2['Hall_Res (ohm)'])*-1\nmfs_err_P = np.array(dF2['MFS Err (mT)']) / 1000\nhall_res_err_P = np.array(dF2['Hall_Res_err (ohm)'])\n\npar2, cov2 = curve_fit(linear, mfs, hall_res_P, sigma = hall_res_err_P, absolute_sigma = True)\nfit2 = linear(mfs, *par2)\n\ngrad_P = truncate(par2[0], 3)\ngrad_err_P = truncate(cov2[0][0] ** 0.5, 5)\ninterp_P = truncate(par2[1], 3)\ninterp_err_P = truncate(cov2[1][1] ** 0.5, 5)\n\nrho_P, rho_err_P = carrier_density(par2[0], grad_err_P, t2)\n\ncarrier_density_P = truncate(rho_P, 3)\ncar_den_err_P = truncate(rho_err_P, 3)\n\nprint(\"Gradient:\", grad_P, grad_err_P)\nprint(\"Intercept:\", interp_P, interp_err_P)\nprint(\"Carrier Density:\", carrier_density_P, car_den_err_P)\n\nplt.errorbar(mfs, hall_res_P, yerr = hall_res_err_P, xerr = mfs_err_P, capsize = 2, fmt = 'o', label = 'P-Type GaAs Data')\nplt.plot(mfs, fit2, label = \"Fit\")\ntext_x = -0.04\ntext_y = -0.3\nplt.text(text_x, text_y, \"Gradient: {} $\\pm$ {}\".format(grad_P, grad_err_P), fontsize = 12)\nplt.text(text_x, text_y - 0.1, \"Intercept: {} $\\pm$ {}\".format(interp_P, interp_err_P), fontsize = 12)\n# plt.text(0.1, -1, \"Carrier Density: {}\".format(carrier_density), fontsize = 12)\n\nplt.minorticks_on()\nplt.xlabel(\"Magnetic Field Strength (T)\")\nplt.ylabel(\"Hall Resistance ($\\Omega$)\")\nplt.grid(which = 'minor', alpha = 0.2)\nplt.grid(which = 'major')\nplt.legend()\nplt.savefig('P_type_GaAs_HallPlot',dpi = 300, bbox_inches=\"tight\")\nplt.show()\n\n#%%\n\"\"\"\nNow for InSb\n\"\"\"\n\ndF3 = pd.read_excel('InSb_Hall_Resistance.xlsx')\nt3 = 1e-6\n\nmfs = np.array(dF3['Magnetic Field Strength (mT)']) / 1000\nhall_res_InSb = np.array(dF3['Hall_Res (ohm)'])\nmfs_err_InSb = np.array(dF3['MFS Err (mT)']) / 1000\nhall_res_err_InSb = np.array(dF3['Hall_Res_err (ohm)']) * 100\n\npar3, cov3 = curve_fit(linear, mfs, hall_res_InSb, sigma = hall_res_err_InSb, absolute_sigma = True)\nfit3 = linear(mfs, *par3)\n\ngrad_InSb = truncate(par3[0], 3)\ngrad_err_InSb = truncate(cov3[0][0] ** 0.5, 5)\ninterp_InSb = truncate(par3[1], 3)\ninterp_err_InSb = truncate(cov3[1][1] ** 0.5, 5)\n\nrho_InSb, rho_err_InSb = carrier_density(par3[0], grad_err_InSb, t3)\n\ncarrier_density_InSb = truncate(rho_InSb, 3)\ncar_den_err_InSb = truncate(rho_err_InSb, 3)\n\nprint(\"Gradient:\", grad_InSb, grad_err_InSb)\nprint(\"Intercept:\", interp_InSb, interp_err_InSb)\nprint(\"Carrier Density:\", carrier_density_InSb, car_den_err_InSb)\n\nplt.errorbar(mfs, hall_res_InSb, yerr = hall_res_err_InSb, xerr = mfs_err_InSb, capsize = 2, fmt = 'o', label = 'InSb Data')\nplt.plot(mfs, fit3, label = \"Fit\")\ntext_x = -0.3\ntext_y = -50\nplt.text(text_x, text_y, \"Gradient: {} $\\pm$ {}\".format(grad_InSb, grad_err_InSb), fontsize = 12)\nplt.text(text_x, text_y - 15, \"Intercept: {} $\\pm$ {}\".format(interp_InSb, interp_err_InSb), fontsize = 12)\n# plt.text(0.1, -1, \"Carrier Density: {}\".format(carrier_density), fontsize = 12)\n\nplt.minorticks_on()\nplt.xlabel(\"Magnetic Field Strength (T)\")\nplt.ylabel(\"Hall Resistance ($\\Omega$)\")\nplt.grid(which = 'minor', alpha = 0.2)\nplt.grid(which = 'major')\nplt.legend()\nplt.savefig('InSb_HallPlot', dpi = 300, bbox_inches=\"tight\")\nplt.show()\n\n#%%\n\"\"\"\nCalculating Mobilities\n\"\"\"\nprint('Mobility N-type (cm^2 V^-1 s^-1):', mobility(car_den, resistivity[0])*10000)\nprint('Mobility P-type (cm^2 V^-1 s^-1):', mobility(carrier_density_P, resistivity[1])*10000)\nprint('Mobility InSb (cm^2 V^-1 s^-1):', mobility(carrier_density_InSb, resistivity[2])*10000)\n\n\n#%%\n\ndef res(mfs, hall_res, y_err, fit):\n hres = 0\n hres = hall_res - fit\n \n plt.errorbar(mfs, hres, yerr = y_err, fmt = 'o', label = 'InSb Residuals', marker = '.', markersize = 4, c = 'black', capsize = 2)\n \n plt.minorticks_on()\n plt.xlabel(\"Magnetic Field Strength (T)\")\n plt.ylabel(\"Hall Resistance Residuals ($\\Omega$)\")\n plt.grid(which = 'minor', alpha = 0.2)\n plt.grid(which = 'major')\n plt.legend()\n plt.savefig('InSb_HallPlot')\n plt.show()\n return hres\n\n\n#%%\n\npar4, cov4 = curve_fit(linear, mfs[4:9], hall_res_InSb[4:9], sigma = hall_res_err_InSb[4:9], absolute_sigma = True)\nfit4 = linear(mfs, *par4)\n\n\ngrad_InSb = truncate(par4[0], 3)\ngrad_err_InSb = truncate(cov4[0][0] ** 0.5, 5)\ninterp_InSb = truncate(par4[1], 3)\ninterp_err_InSb = truncate(cov4[1][1] ** 0.5, 5)\n\nrho_InSb, rho_err_InSb = carrier_density(par4[0], grad_err_InSb, t3)\n\ncarrier_density_InSb = truncate(rho_InSb, 3)\ncar_den_err_InSb = truncate(rho_err_InSb, 3)\n\nprint(\"Gradient:\", grad_InSb, grad_err_InSb)\nprint(\"Intercept:\", interp_InSb, interp_err_InSb)\nprint(\"Carrier Density:\", carrier_density_InSb, car_den_err_InSb)\n\nplt.errorbar(mfs, hall_res_InSb, yerr = hall_res_err_InSb, xerr = mfs_err_InSb, capsize = 2, fmt = 'o', label = 'InSb Data')\nplt.plot(mfs, fit4, label = \"Fit\")\ntext_x = -0.3\ntext_y = -50\nplt.text(text_x, text_y, \"Gradient: {} $\\pm$ {}\".format(grad_InSb, grad_err_InSb), fontsize = 12)\nplt.text(text_x, text_y - 15, \"Intercept: {} $\\pm$ {}\".format(interp_InSb, interp_err_InSb), fontsize = 12)\n# plt.text(0.1, -1, \"Carrier Density: {}\".format(carrier_density), fontsize = 12)\n\nplt.minorticks_on()\nplt.xlabel(\"Magnetic Field Strength (T)\")\nplt.ylabel(\"Hall Resistance ($\\Omega$)\")\nplt.grid(which = 'minor', alpha = 0.2)\nplt.grid(which = 'major')\nplt.legend()\nplt.show()\n\n#%%\n\ndef cubic(x,a,b,c,d):\n return a*x**3 + b*x**2 + c*x + d\n \n\ndef cubic_fit(x,y, y_err, color_line, color_dot, save = False):\n guess = [0,0,0,0]\n par, cov = curve_fit(cubic, x, y, guess, absolute_sigma = True)\n fit = cubic(x, par[0], par[1], par[2], par[3])\n \n x_1 = np.linspace(min(x), max(x), 1000)\n y_1 = cubic(x_1, par[0], par[1], par[2], par[3])\n print(par)\n \n plt.plot(x_1,y_1, c = color_line)\n plt.errorbar(x, y, yerr = y_err, fmt = 'o', label = 'InSb Temperature Dependence',marker = '.', markersize = 4, c = color_dot, capsize = 2)\n plt.minorticks_on()\n \n # plt.xlabel(\"Magnetic Field Strength (T)\")\n # plt.ylabel(\"Hall Resistance ($\\Omega$)\")\n plt.xlabel(\"Temperature ($^{o}$C)\")\n plt.ylabel(\"Resistance ($\\Omega$)\")\n \n plt.grid(which = 'minor', alpha = 0.2)\n plt.grid(which = 'major')\n plt.legend()\n \n if save:\n plt.savefig('mag_res.png',dpi = 300, bbox_inches=\"tight\") \n \n plt.show()\n \n#%%\n\"\"\"\nTemperature dependence \n\"\"\"\n\ndF_temp = pd.read_excel('Indium_Temperature.xlsx')\n\ntemp = np.array(dF_temp['Temperature (C)'])\nresistance = np.array(dF_temp['Voltage (mV)'])\nerr = np.ones(len(temp)) * hall_res_err_InSb[0]\n\nplt.errorbar(temp, resistance, yerr = err, fmt = 'o', label = '', marker = '.', markersize = 4, c = 'red', capsize = 2)\n\nplt.minorticks_on()\nplt.xlabel(\"Temperature ($^{o}$C)\")\nplt.ylabel(\"Hall Resistance ($\\Omega$)\")\nplt.grid(which = 'minor', alpha = 0.2)\nplt.grid(which = 'major')\nplt.legend()\nplt.show()\n\n#%%\n\nx = np.array([1,1.000000001])\ny = np.array([0,1000])\n\nplt.plot(x*23.4, y, linestyle = \"--\", c = 'black')\nplt.plot(x*24.4, y, linestyle = \"--\", c = 'black')\nplt.xlim([21.8, 26.4])\nplt.ylim([27.35, 29.9])\n\nplt.savefig('temp_dependence.png')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"reign-of-panda/Hall_Effect_Lab","sub_path":"Hall_Resistance.py","file_name":"Hall_Resistance.py","file_ext":"py","file_size_in_byte":9472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19621695424","text":"from django.shortcuts import get_object_or_404, redirect, render\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Book\nfrom .forms import BookForm\n# Create your views here.\n\ndef list_books(request):\n books = Book.objects.all()\n return render(request, \"books/list_books.html\", {\"books\": books})\n\n#check for is_admin\ndef add_book(request):\n #allow an admin to add books to the website\n if request.method == \"POST\":\n form = BookForm(data=request.POST)\n if form.is_valid():\n book = form.save(commit=False)\n book.save()\n return redirect(\"show_book\", pk=book.pk)\n else:\n form = BookForm()\n return render(request, \"books/add_book.html\", {\"form\": form})\n\ndef show_book(request, pk):\n book = get_object_or_404(Book, pk=pk)\n return render(request, \"books/show_book.html\", {\"book\": book})","repo_name":"Momentum-Team-10/django-freeshelf-badmicro","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70339484355","text":"from __future__ import print_function\nimport numpy as np\nimport os\nfrom SimDataDB import SimDataDB\n\n\nfrom latent_sim import LatentSim\nfrom eoshub import EOSHub\n\nfrom test_cfg import *\n\ndef curried_latentssim(eos,network):\n \"\"\"Curried constructor for LatentSim with the hub and \n eos registry.\"\"\"\n scale_file = eoses[eos]['scale_file']\n logp = eoses[eos]['logp']\n ls = LatentSim(hub+'training_'+eos+'/'+network,scale_file,logp)\n return ls\n\n\ndef solve_a_problem_arch(problem_name, eos, network=None):\n problem = all_test_problems[problem_name]\n ls = EOSHub.LatentSim(eos,network)\n q0 = ls.find_point(**problem.initial)\n ls.set_params(**problem.params)\n time_series = ls.integrate(problem.t_max, q0, \n schedule=problem.schedule,verbose=False)\n return time_series \n\ndef solve_a_problem(problem_name,eos, result_dir='.'):\n sdb = SimDataDB(result_dir+'/{0}_testing.db'.format(eos))\n @sdb.Decorate(eos,[('problem','string'),('network','string')],\n [('series','array')],memoize=False)\n def _solve(problem_name,network):\n print(\"Testing {0}:{1} on {2}\".format(eos,network,problem_name))\n time_series = solve_a_problem_arch(problem_name,eos,network)\n return {'series':time_series}\n def _job(arch):\n try:\n _solve(problem_name,arch)\n except Exception as e:\n print(f\"The network {arch} threw an error:\\n {e}\")\n for arch in EOSHub[eos].archs:\n #_solve(problem_name,arch)\n _job(arch)\n\n#\n# Deprecated scripting before EOSHub\n#\ndef DEP_run_one_simulation(eos,network,problem_name,verbose=True):\n \"\"\"Run one of the tests in an environment we can embed into.\"\"\"\n scale_file = eoses[eos]['scale_file']\n logp = eoses[eos]['logp']\n problem = problems[problem_name]\n ls = LatentSim(hub+'training_'+eos+'/'+network,scale_file,logp)\n q0 = ls.find_point(**problem.initial)\n ls.set_params(**problem.params)\n time_series = ls.integrate(problem.t_max, q0, \n schedule=problem.schedule,\n verbose=verbose)\n return time_series, ls\n \ndef DEP_perform_tests_for_eos(eos, result_dir='.'): # dep\n \"\"\"Perform all of the tests and generate a report.\"\"\"\n networks = os.listdir(hub+'/training_'+eos)\n problem_list = eoses[eos]['problem_list']\n scale_file = eoses[eos]['scale_file']\n logp = eoses[eos]['logp']\n \n sdb = SimDataDB(result_dir+'{0}_testing.db'.format(eos))\n \n @sdb.Decorate(eos,[('problem','string'),('network','string')],\n [('series','array')],memoize=False)\n def solve_a_problem(problem_name, network):\n print(\"Testing {0}:{1} on {2}\".format(eos,network,problem_name))\n problem = problems[problem_name]\n ls = LatentSim(hub+'training_'+eos+'/'+network,scale_file,logp)\n q0 = ls.find_point(**problem.initial)\n ls.set_params(**problem.params)\n time_series = ls.integrate(problem.t_max, q0, schedule=problem.schedule)\n return {'series':time_series}\n \n for n in networks:\n try:\n for p in problem_list:\n solve_a_problem(p,n)\n except Exception as e:\n print(\"The network\", n, \" threw an error: \", e)\n \n# This is now a library; batching requires clever forking before imports\n# if __name__==\"__main__\":\n# for k in eoses:\n# try:\n# perform_tests_for_eos(k, hub+'test_databases/')\n# except FileNotFoundError:\n# pass\n\n","repo_name":"afqueiruga/LatentPrimaries","sub_path":"batch_test_latent_sim.py","file_name":"batch_test_latent_sim.py","file_ext":"py","file_size_in_byte":3553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2070376169","text":"import glob, sys, os\nimport pandas as pd\nimport numpy as np\nimport statsmodels.formula.api as sm\nimport bct\nimport matplotlib.pyplot as plt\n\ndef readfiles(datafile: str, civpath: str, civL: str, civR: str, measure: str) -> pd.DataFrame:\n data = pd.read_csv(datafile, dtype=str, index_col=['eid'])\n\n # Read civet files matching data\n Lfiles = [glob.glob(civpath + '*' + str(i) + '*' + civL) for i,row in data.iterrows()]\n Lfiles.sort()\n Lfiles = [item for sublist in Lfiles for item in sublist]\n Rfiles = [glob.glob(civpath + '*' + str(i) + '*' + civR) for i,row in data.iterrows()]\n Rfiles.sort()\n Rfiles = [item for sublist in Rfiles for item in sublist]\n\n # Read text files\n left_dfs = [pd.read_csv(f, header=None).T for f in Lfiles]\n leftdata = pd.concat(left_dfs,ignore_index=True)\n right_dfs = [pd.read_csv(f, header=None).T for f in Rfiles]\n rightdata = pd.concat(right_dfs,ignore_index=True)\n\n # Create dataframe\n df=pd.concat([leftdata,rightdata], axis=1)\n df.columns=[\"V\"+str(i) for i in range(1, df.shape[1] + 1)]\n\n df.index=data.index.copy()\n mean_anat = pd.DataFrame(df.mean(axis=1), columns=[\"mean_\" + measure], index=df.index.copy())\n data = data.join(mean_anat)\n total_anat = pd.DataFrame(df.sum(axis=1), columns=[\"total_\" + measure], index=df.index.copy())\n data = data.join(total_anat)\n df.to_pickle(measure + '.pkl')\n return data, df\n\ndef parcellate(DKTfile: str, outdir: str, data: pd.DataFrame, df = pd.DataFrame) -> pd.DataFrame:\n dkt = pd.read_csv(DKTfile, header=None)\n dkt.columns = ['roi']\n rois = dkt.roi.unique()\n rois = np.sort(rois)\n parc = pd.DataFrame(index=data.index.copy())\n\n for r in rois:\n means = pd.DataFrame(df.iloc[:,dkt.index[dkt.roi==r].tolist()].mean(axis=1),columns=[\"DKT_\"+str(r)], index=data.index.copy())\n parc = pd.concat([parc,means], axis = 1)\n data_parc = pd.concat([data,parc], axis = 1)\n data_parc = data_parc.drop(['DKT_6', 'DKT_106'], axis=1)\n parc = parc.drop(['DKT_6', 'DKT_106'], axis=1)\n\n parc.to_csv(outdir + 'dkt_parcellation.csv') # parcellated data\n data_parc.to_csv(outdir + 'data_dkt_parcellation.csv') # parcellated data combined with behavioural data\n return parc, data_parc\n\ndef apply_score(measure: str, paramfile: str, parc: pd.DataFrame, data_parc: pd.DataFrame, outdir: str) -> pd.DataFrame:\n data_parc['mean_' + measure] = pd.to_numeric(data_parc['mean_' + measure])\n for r in parc.columns:\n data_parc[r] = pd.to_numeric(data_parc[r])\n params = pd.read_csv(paramfile, index_col=0)\n resids = pd.DataFrame(index=data_parc.index.copy())\n conn = np.zeros((len(parc.columns), len(parc.columns), len(data_parc)))\n\n # Loop through ROIs and apply regression parameters\n for i in range(len(parc.columns)):\n r1 = parc.columns[i]\n for j in range(len(parc.columns)):\n r2 = parc.columns[j]\n if r1 != r2:\n reg = params.loc['Intercept', r1 + '_' + r2] + params.loc['train[r2]', r1 + '_' + r2]*data_parc[r2] + params.loc['mean_thickness', r1 + '_' + r2]*data_parc['mean_' + measure]\n residual = parc[r1] - reg\n for pp in range(len(data_parc)):\n conn[i,j,pp] = residual.iloc[pp]\n\n # Save residuals and regression parameters\n np.save(outdir + 'SCoRe_3Dconnectivity_matrix.npy', conn)\n return conn\n\ndef graphmes(conn: pd.DataFrame, data: pd.DataFrame, outdir: str) -> pd.DataFrame:\n strengths = np.zeros((np.size(conn,2),np.size(conn,0)))\n efficiency = np.zeros((np.size(conn,2),1))\n\n for p in range(np.size(conn, 2)):\n W = conn[:,:,p]\n W = np.abs(W)\n st = bct.strengths_dir(W)\n efficiency[p] = bct.efficiency_wei(W, local=False)\n for r in range(np.size(st)):\n strengths[p,r] = st[r]\n\n eff = [item for sublist in efficiency for item in sublist]\n e = pd.DataFrame(data = eff, columns = ['Global_Efficiency'], index = data.index.copy())\n np.size(strengths,1)\n s = pd.DataFrame(data = strengths, index = data.index.copy())\n s.columns=[\"Strength_\"+str(i) for i in range(1, s.shape[1] + 1)]\n data_parc_conn = data.join(e)\n data_parc_conn = data_parc_conn.join(s) \n data_parc_conn.to_csv(outdir + 'apply_score_output.csv')\n\nif __name__ == \"__main__\":\n measure = \"thickness\"\n datafile = '/scratch/katie/ukbb/Katie_2022-03-23.csv'\n civpath = \"/project/def-mlepage/UKBB/civet/thickness/\"\n civL = \"*native_rms_rsl_tlaplace_20mm_left.txt\"\n civR = \"*native_rms_rsl_tlaplace_20mm_right.txt\"\n DKTfile = \"/project/def-mlepage/UKBB/civet/CIVET_2.0_DKT.txt\"\n outdir = \"/scratch/katie/score/\"\n paramfile = \"/scratch/katie/score/SCoRe_regression_parameters.csv\"\n\n data, df = readfiles(datafile, civpath, civL, civR, measure)\n parc, data_parc = parcellate(DKTfile, outdir, data, df)\n conn = apply_score(measure, paramfile, parc, data_parc, outdir)\n graphmes(conn, data_parc, outdir)\n","repo_name":"katielavigne/score","sub_path":"apply_score.py","file_name":"apply_score.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"10050196673","text":"import sys\n\ndef get_number(number=0):\n while True:\n if number == 0:\n number = input('Compute primes up to what number? ')\n try:\n number = int(number)\n return number\n except ValueError:\n print('You did not enter a number!')\n number = 0\n \nif len(sys.argv) > 1:\n maxprime = get_number(sys.argv[1])\nelse:\n maxprime = get_number()\n\n# \"waste\" two slots at the beginning so that the numbers in the list\n# correspond to their index within the list.\n#\n# In other words, nums[2] == 2, nums[3] == 3, etc.\nnums = [0, 0] + list(range(2, maxprime + 1))\n\nfor number in nums:\n if number == 0:\n continue\n # if I'm here, what does it mean?\n # nums[index] is != 0, but also prime\n # so now we want to remove all multiples of this prime\n #print('Removing multiples of', index)\n # visit each multiple of the current number (prime) \n for multiple in range(number * 2, maxprime + 1, number):\n #print('Removing', multiple)\n nums[multiple] = 0\n\n# print(nums)\n# print([num for num in nums if num])\nnums = sorted(set(nums) - {0})\nprint(nums)\n#print(len(nums) - nums.count(0), 'primes found.')","repo_name":"davewadestein/ADI-Reskilling-Academy","sub_path":"primer.py","file_name":"primer.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5580686417","text":"# -*- coding: utf-8 -*-\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nimport numpy as np\n\nfrom hermespy.modem import DuplexModem, RootRaisedCosineWaveform, BitErrorEvaluator, BlockErrorEvaluator, FrameErrorEvaluator, ThroughputEvaluator\nfrom hermespy.simulation import SimulatedDevice\nfrom hermespy.core.monte_carlo import Evaluator, GridDimension, register\n\n__author__ = \"Jan Adler\"\n__copyright__ = \"Copyright 2023, Barkhausen Institut gGmbH\"\n__credits__ = [\"Jan Adler\"]\n__license__ = \"AGPLv3\"\n__version__ = \"1.1.0\"\n__maintainer__ = \"Jan Adler\"\n__email__ = \"jan.adler@barkhauseninstitut.org\"\n__status__ = \"Prototype\"\n\n\nclass InvestigatedObject(object):\n \n def __init__(self) -> None:\n \n self.dim = 0\n \n @property\n def dimension(self) -> int:\n \n return self.dim\n \n @dimension.setter\n def dimension(self, value: int) -> None:\n \n self.dim = value\n\n\nclass TestEvaluators(TestCase):\n \n def setUp(self) -> None:\n \n waveform = RootRaisedCosineWaveform(symbol_rate=1, num_preamble_symbols=0, num_data_symbols=100,\n modulation_order=64, oversampling_factor=1)\n self.device = SimulatedDevice()\n \n self.modem = DuplexModem()\n self.modem.waveform_generator = waveform\n self.modem.device = self.device\n\n investigated_object = InvestigatedObject()\n self.dimension = GridDimension(investigated_object, 'dimension', [0], 'title')\n\n def _test_evaluator(self, evaluator: Evaluator) -> None:\n \"\"\"Generate a result from a given evaluator and test its plotting routine.\"\"\"\n \n transmission = self.modem.transmit()\n self.device.process_input(transmission.signal)\n _ = self.modem.receive()\n \n try:\n \n evaluation = evaluator.evaluate()\n \n artifact = evaluation.artifact()\n artifact_grid = np.empty(1, dtype=object)\n artifact_grid[0] = [artifact, artifact]\n \n result = evaluator.generate_result([self.dimension], artifact_grid)\n \n with patch('matplotlib.pyplot.figure'):\n _ = result.plot()\n \n except BaseException as e:\n self.fail(msg=str(e))\n\n def test_bit_error_evaluator(self) -> None:\n \"\"\"Test the bit error communication evaluation\"\"\"\n \n ber = BitErrorEvaluator(self.modem, self.modem)\n self._test_evaluator(ber)\n\n def test_block_error_evaluator(self) -> None:\n \"\"\"Test the block error communication evaluation\"\"\"\n \n ber = BlockErrorEvaluator(self.modem, self.modem)\n self._test_evaluator(ber)\n\n def test_frame_error_evaluator(self) -> None:\n \"\"\"Test the frame error communication evaluation\"\"\"\n \n ber = FrameErrorEvaluator(self.modem, self.modem)\n self._test_evaluator(ber)\n\n def test_throughput_evaluator(self) -> None:\n \"\"\"Test the throughput communication evaluation\"\"\"\n \n ber = ThroughputEvaluator(self.modem, self.modem)\n self._test_evaluator(ber)\n","repo_name":"Barkhausen-Institut/hermespy","sub_path":"tests/integration_tests/test_evaluation.py","file_name":"test_evaluation.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"26211692396","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\n\nq = []\nn = int(input())\n\nnum = 0\nfor i in range(n):\n num = int(input())\n if num > 0:\n heapq.heappush(q, -num)\n else:\n if q:\n p = heapq.heappop(q)\n print(-p)\n else:\n print(0)","repo_name":"KUcodemaster/Problem_Solving","sub_path":"boj/11279.py","file_name":"11279.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12509951505","text":"from ui import uiAddCourse, uiShowSchedules\r\nimport pygame\r\nfrom datetime import datetime\r\n\r\nclass Course():\r\n \r\n def __init__(self, name, startTime, endTime, MWF, section, location, TR):\r\n \r\n self.name = name\r\n self.startTime = startTime\r\n self.endTime = endTime\r\n self.MWF = MWF\r\n self.section = section\r\n self.location = location\r\n self.TR = TR\r\n\r\n def machineFit(self, machines):\r\n # iterate through all the machines \r\n return\r\n\r\n# ESTF\r\n# create new machine if course cannot be scheduled in the existing machines i.e the startTime of the course to be scheduled is < endTime of courses in all machine\r\n# course can be scheduled to a machine if startTime of course to be scheduled is > endTime of a course in a machine\r\n\r\n\r\nclass Machine():\r\n \r\n def __init__(self, number):\r\n self.number = number\r\n self.valid = False\r\n self.courses = []\r\n\r\n def addCourse(self, course):\r\n \r\n self.courses.append(course)\r\n return\r\n \r\n def checkInsert(self, course):\r\n # check if the course can fit into the machine\r\n return\r\n\r\nclass daySchedule:\r\n def __init__(self, day):\r\n self.day = day\r\n\r\n\r\ndef estf(machines, course): # ESTF takes a list of all machines (and the machine contains a list of courses) and a course as input to run ESTF on\r\n day_dict = {\"m\" : 0, \"t\":1, \"w\": 2, \"r\": 3, \"f\": 4}\r\n \r\n if course[\"ls\"] == True:\r\n day_ind = day_dict[course[\"ls day\"]]\r\n if len(machines[day_ind].courses) == 0:\r\n machines[day_ind].courses.append(course)\r\n elif len(machines[day_ind].courses) != 0:\r\n print(\"h\")\r\n \r\n if (machines[0].courses[len(machines[0].courses) - 1][\"end\"] < course[\"start\"]): \r\n # check latest course scheduled on Monday, if it's endTime is < the course to be scheduled's startTime, \r\n # schedule this course\r\n machines[day_ind].courses.append(course)\r\n \r\n \r\n\r\n else:\r\n\r\n if (course[\"mwf\"]) == True:\r\n if len(machines[0].courses) == 0: # check if Monday has a course scheduled, since it is jointly scheduled with Wednesday Friday, we do not need to check the other days\r\n machines[0].courses.append(course) # add the course to the empty machine (days 0, 2, 4 (MWF)) Monday\r\n machines[2].courses.append(course) # Wednesday\r\n machines[4].courses.append(course) # Friday\r\n elif len(machines[0].courses) != 0:\r\n # if there are courses in the schedule \r\n try:\r\n if (machines[0].courses[len(machines.courses) - 1][\"end\"] < course[\"start\"]): # check latest course scheduled on Monday, if it's endTime is < the course to be scheduled's startTime, schedule this course\r\n machines[0].courses.append(course) # Monday\r\n machines[2].courses.append(course) # Wednesday\r\n machines[4].courses.append(course) # Friday\r\n except:\r\n return -1\r\n \r\n # repeat the same for Tuesday Thursday\r\n elif (course[\"tr\"]) == True:\r\n if len(machines[1].courses) == 0: # \r\n machines[1].courses.append(course) # Tuesday\r\n machines[3].courses.append(course) # Thursday\r\n elif len(machines[1].courses) != 0:\r\n # if there are courses in the schedule \r\n try:\r\n if (machines[1].courses[len(machines.courses) - 1][\"end\"] < course[\"start\"]): \r\n machines[1].courses.append(course) # Tuesday\r\n machines[3].courses.append(course) # Thursday\r\n except:\r\n return -1\r\n \r\ndef userInput():\r\n \r\n courseList = []\r\n\r\n stop = False\r\n\r\n while not stop:\r\n courseName = input(\"course name: \")\r\n startTime = input(\"start time: \")\r\n endTime = input(\"end time: \")\r\n days = input(\"days of week (mtwhf), separate by comma: \")\r\n\r\n courseList.append(Course(courseName,startTime,endTime, days))\r\n \r\n stopword = input(\"stop? (y/n): \")\r\n\r\n if stopword == \"y\":\r\n stop = True\r\n \r\n \r\n return courseList\r\n\r\n\r\ndef main():\r\n \r\n choices = uiAddCourse()\r\n\r\n machine1 = Machine(1)\r\n machine2 = Machine(2)\r\n machine3 = Machine(3)\r\n machine4 = Machine(4)\r\n machine5 = Machine(5)\r\n machines = [machine1,machine2,machine3,machine4,machine5]\r\n for i in choices:\r\n res = estf(machines, i)\r\n if res == -1:\r\n break\r\n\r\n pygame.quit()\r\n\r\n if res == -1:\r\n uiShowSchedules(-1)\r\n else:\r\n uiShowSchedules([machine1.courses, machine2.courses, machine3.courses, machine4.courses, machine5.courses])\r\n \r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"jvkchow/Schedulers","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14994777065","text":"import time\nfrom polyglot_turtle import PolyglotTurtleXiao, PinDirection, PinPullMode\n\n\nif __name__ == \"__main__\":\n pt = PolyglotTurtleXiao()\n\n button_pin = 1\n red_led_pin = 2\n green_led_pin = 3\n\n pt.gpio_set_direction(button_pin, PinDirection.INPUT)\n pt.gpio_set_pull(button_pin, PinPullMode.NONE)\n\n pt.gpio_set_direction(red_led_pin, PinDirection.OUTPUT)\n pt.gpio_set_direction(green_led_pin, PinDirection.OUTPUT)\n\n while 1:\n pt.gpio_set_level(red_led_pin, True)\n pt.gpio_set_level(green_led_pin, False)\n time.sleep(0.5)\n\n pt.gpio_set_level(red_led_pin, False)\n pt.gpio_set_level(green_led_pin, True)\n time.sleep(0.5)\n\n while pt.gpio_get_level(button_pin):\n time.sleep(0.01)\n","repo_name":"jeremyherbert/python-polyglot-turtle","sub_path":"gpio_blinky_example.py","file_name":"gpio_blinky_example.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8383020749","text":"with open(\"assignment_4/rna_codon_table.txt\") as d:\n d = d.read().split(\" \")\n table = {}\n for i in d:\n table.update({i.split()[0]: i.split()[1]})\n print(table)\n\nwith open(\"rosalind_prot.txt\") as f:\n f = f.read().strip()\n\nresult = \"\"\ncodon = \"\"\nfor i in f:\n if len(codon) == 3:\n result += table[codon]\n if codon in [\"UAG\", \"UGA\", \"UAA\"]:\n break\n else:\n codon = \"\"\n\n codon += i\n\nprint(result)","repo_name":"cl3mente/pocs2_assignments","sub_path":"misc/prot.py","file_name":"prot.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34930205733","text":"\"\"\"\nCode that tests the funcitons in http_server.py\n\ncan be run with py.test\n\"\"\"\n\nfrom __future__ import unicode_literals\n\n# import pytest # used for the exception testing\n\nimport io\nfrom concurrent_servers import ConcurrentServer\nimport gevent.socket as socket # Makes sockets nonblocking\nimport gevent # Not at all friendly with threading, so\n # had to revise testing procedures\nimport time\n\n\ndef test_concurrent_server():\n testfile = io.open(\"webroot/sample.txt\")\n content = testfile.read()\n server = ConcurrentServer()\n\n def clientslow():\n start = time.time()\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM,\n socket.IPPROTO_IP)\n client_socket.connect(('127.0.0.1', 8888))\n gevent.sleep(1) # demonstrates it's not closing the connection\n return \"Connected and didn't send anything\", time.time() - start\n\n def clientnorm(message):\n start = time.time()\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM,\n socket.IPPROTO_IP)\n client_socket.connect(('127.0.0.1', 8888))\n client_socket.sendall(message)\n client_socket.shutdown(socket.SHUT_WR)\n response = client_socket.recv(4096)\n client_socket.close()\n server.stop() # This stops the serverthread\n return response, time.time() - start\n\n serverthread = gevent.spawn(server.serve_forever)\n client1thread = gevent.spawn(clientslow)\n client2thread = gevent.spawn(clientnorm, \"GET /sample.txt HTTP/1.1\\r\\n\")\n\n serverthread.start()\n client1thread.start()\n gevent.sleep(0.1) # Let the slow thread get a connection, but not exit\n client2thread.start()\n\n client2thread.join()\n client1thread.join()\n serverthread.join()\n #These will hang if anything is still waiting\n\n response, elapsed = client2thread.get()\n assert response == \"HTTP/1.1 200 OK\\r\\n\" + \\\n \"Content-Type: text/plain\\r\\n\" + \\\n \"Content-Length: 96\\r\\n\\r\\n\" +\\\n content\n assert elapsed < 1 # Faster than the sleeping thread\n response, elapsed = client1thread.get()\n assert response == \"Connected and didn't send anything\"\n assert elapsed > 1\n","repo_name":"jbbrokaw/network-tools","sub_path":"test_concurrent_server.py","file_name":"test_concurrent_server.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33202787261","text":"import pickle\r\n\r\ninfile = open(\"names_pickle_file_write.dat\", \"rb\") # rb b/c its binary--> read binary\r\n\r\nnames = pickle.load(infile)\r\n\r\nprint(type(names))\r\n\r\nprint(names)\r\n\r\nname = input(\"Add a name to the list: \")\r\nnames.append(name)\r\n\r\n# the ^name^ would be added to the list in terminal but NOT in the pickle file\r\n# (we didn't dump it again)\r\nprint(names)\r\n\r\n# here's the update (below) that would dump the update to the pickle file\r\npickle.dump(\"names_pickle_file_write.dat\", \"wb\")\r\npickle.dump(names, outfile)\r\n\r\n\r\n# pickle works w/ ANY python object\r\n","repo_name":"n33simon/MyDictionaries","sub_path":"5_pickle_example_read.py","file_name":"5_pickle_example_read.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29676487777","text":"# CODE CHALLENGE: Solve the Change Problem.\n# Input: An non-negative integer money and a non-empty integer array of coin types (coin1, ..., coind).\n# Output: The minimum number of coins with denominations coins that changes money. \n\nimport unittest\n\ndef change(money, coins): \n '''Returns the minimal number of coins in the Change Problem. '''\n assert money >= 0 and len(coins) > 0\n \n cache = {0: 0}\n coins = set(coins) #so the 'in' operator takes const time (large problem optimization)\n types_used = set() #types of coins used\n for m in range(1, money + 1):\n if m in coins:\n types_used.add(m)\n cache[m] = min( (cache[m - coin] + 1 for coin in types_used) )\n return cache[money]","repo_name":"markedz/bioinformatics","sub_path":"Sequence Alignment (Dynamic Programming)/change_problem.py","file_name":"change_problem.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2759040438","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pylab as pl\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef mvn2d(x, y, sigma):\n xx, yy = np.meshgrid(x, y)\n u = np.array([np.mean(x), np.mean(y)])\n xy = np.c_[xx.ravel(), yy.ravel()]\n sigma_inv = np.linalg.inv(sigma)\n z = np.dot((xy - u), sigma_inv)\n z = np.sum(z * (xy - u), axis=1)\n z = np.exp(-0.5 * z)\n return z / (2 * np.pi * np.linalg.det(sigma) ** 0.5)\n\nfig = pl.figure()\nax = Axes3D(fig)\nx = np.linspace(-5, 5, 100)\ny = np.linspace(-5, 5, 100)\nsigma = np.array([[1, 0], [0, 1]])\nz = mvn2d(x, y, sigma)\nxx, yy = np.meshgrid(x, y)\n\n#plot figure\nax.plot_surface(xx, yy, z.reshape(100, 100),\n rstride=1, cstride=1, cmap=pl.cm.hot)\npl.savefig('gaussPlot2Ddemo_1.png')\npl.figure()\npl.contour(xx, yy, z.reshape(100, 100))\npl.savefig('gaussPlot2Ddemo_2.png')\n\nsigma1 = np.array([[2, 0], [0, 1]])\nz = mvn2d(x, y, sigma1)\npl.figure()\npl.contour(xx, yy, z.reshape(100, 100))\npl.savefig('gaussPlot2Ddemo_3.png')\n\nsigma2 = np.array([[1, 1], [0, 1]])\nz = mvn2d(x, y, sigma2)\npl.figure()\npl.contour(xx, yy, z.reshape(100, 100))\npl.savefig('gaussPlot2Ddemo_4.png')\npl.show()\n","repo_name":"david78k/stock","sub_path":"pmtk3/python/demos/ch02/gaussPlot2Ddemo.py","file_name":"gaussPlot2Ddemo.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"9479803693","text":"from libqtile.config import Key\nfrom libqtile.lazy import lazy\nfrom libqtile.utils import guess_terminal\nfrom libqtile import hook\n\nfrom groups import groups\n\n# bsp layout resize fix\ndef resize(qtile, direction):\n layout = qtile.current_layout\n child = layout.current\n parent = child.parent\n\n while parent:\n if child in parent.children:\n layout_all = False\n\n if (direction == \"left\" and parent.split_horizontal) or (\n direction == \"up\" and not parent.split_horizontal\n ):\n parent.split_ratio = max(5, parent.split_ratio - layout.grow_amount)\n layout_all = True\n elif (direction == \"right\" and parent.split_horizontal) or (\n direction == \"down\" and not parent.split_horizontal\n ):\n parent.split_ratio = min(95, parent.split_ratio + layout.grow_amount)\n layout_all = True\n\n if layout_all:\n layout.group.layout_all()\n break\n\n child = parent\n parent = child.parent\n\n\n@lazy.function\ndef resize_left(qtile):\n resize(qtile, \"left\")\n\n\n@lazy.function\ndef resize_right(qtile):\n resize(qtile, \"right\")\n\n\n@lazy.function\ndef resize_up(qtile):\n resize(qtile, \"up\")\n\n\n@lazy.function\ndef resize_down(qtile):\n resize(qtile, \"down\")\n\nmod = \"mod4\"\nterminal = guess_terminal()\n\nkeys = [\n # Move Focus\n Key([mod], \"h\", lazy.layout.left()),\n Key([mod], \"l\", lazy.layout.right()),\n Key([mod], \"j\", lazy.layout.down()),\n Key([mod], \"k\", lazy.layout.up()),\n Key([mod], \"space\", lazy.layout.next(), desc=\"Move window focus to other window\"),\n Key([mod, \"shift\"], \"j\", lazy.layout.shuffle_down(), desc=\"Move window down\"),\n Key([mod, \"shift\"], \"k\", lazy.layout.shuffle_up(), desc=\"Move window up\"),\n Key(\n [mod, \"shift\"],\n \"h\",\n lazy.layout.shuffle_left(),\n lazy.layout.swap_left(),\n lazy.layout.client_to_previous(),\n desc=\"Move windows left in current stack\",\n ),\n Key(\n [mod, \"shift\"],\n \"l\",\n lazy.layout.shuffle_right(),\n lazy.layout.swap_right(),\n lazy.layout.client_to_next(),\n desc=\"Move windows right in the current stack\",\n ),\n Key([mod, \"control\"], \"h\", resize_left),\n Key([mod, \"control\"], \"l\", resize_right),\n Key([mod, \"control\"], \"j\", resize_up),\n Key([mod, \"control\"], \"k\", resize_down),\n Key([mod, \"control\"], \"m\", lazy.window.toggle_minimize()),\n #Key([mod, \"control\"], \"n\", lazy.window.toggle_maximize()),\n Key([mod], \"n\", lazy.layout.normalize(), desc=\"Reset all window sizes\"),\n Key([mod], \"f\", lazy.window.toggle_fullscreen()),\n Key([mod], \"t\", lazy.window.toggle_floating()),\n Key(\n [mod, \"shift\"],\n \"Return\",\n lazy.layout.toggle_split(),\n desc=\"Toggle between split and unsplit sides of stack\",\n ),\n Key([mod], \"Return\", lazy.spawn(terminal), desc=\"Launch terminal\"),\n Key([mod], \"Tab\", lazy.next_layout(), desc=\"Toggle between layouts\"),\n Key([mod], \"w\", lazy.window.kill(), desc=\"Kill focused window\"),\n Key([\"control\", \"shift\"], \"l\", lazy.spawn(\"i3lock\")), # lazy.spawn(\"~/scripts/i3lock-script/i3lock-multimonitor -b\")\n Key([mod, \"control\"], \"r\", lazy.reload_config(), desc=\"Reload the config\"),\n Key([mod, \"control\"], \"q\", lazy.shutdown(), desc=\"Shutdown Qtile\"),\n Key([mod], \"r\", lazy.spawn(\"rofi -show run -theme minimal\"), desc=\"Spawn rofi launcher\"),\n Key([mod, \"mod1\"], \"b\", lazy.spawn(\"firefox\"), desc=\"Spawn firefox\"),\n Key([mod, \"mod1\"], \"f\", lazy.spawn(\"thunar\"), desc=\"Spawn thunar\"),\n Key([mod, \"mod1\"], \"c\", lazy.spawn(\"code\"), desc=\"Spawn vscode\"),\n Key([mod, \"mod1\"], \"m\", lazy.spawn(\"masterpassword-gui\"), desc=\"Spawn Password Manager\"),\n Key([mod, \"shift\"], \"s\", lazy.spawn(\"flameshot gui\"), desc=\"Take Screenshot\"),\n # Key([mod], \"r\", lazy.spawncmd(), desc=\"Spawn a command using a prompt widget\"),\n]\n\nfor i in groups:\n keys.extend(\n [\n Key(\n [mod],\n i.name,\n lazy.group[i.name].toscreen(),\n desc=\"Switch to group {}\".format(i.name),\n ),\n Key(\n [mod, \"shift\"],\n i.name,\n lazy.window.togroup(i.name, switch_group=True),\n desc=\"Switch to & move focused window to group {}\".format(i.name),\n ),\n ]\n )\n","repo_name":"ahlaulhee/qtile-dotfiles","sub_path":"qtile/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":4450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37470165540","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nimport sys, math, time, thread\r\n\r\nclass OpenGLWindow:\r\n x = 0.2\r\n y = 0.2\r\n z = 0.2\r\n\r\n def __init__(self, width = 640, height = 480, title = \"PyOpenGL\"):\r\n glutInit(sys.argv)\r\n glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)\r\n glutInitWindowSize(width, height)\r\n self.window = glutCreateWindow(title)\r\n glutDisplayFunc(self.Draw)\r\n glutIdleFunc(self.Draw)\r\n self.InitGL(width, height)\r\n\r\n def Draw(self):\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n glLoadIdentity()\r\n glTranslatef(1.5, 0.0, -7.0)\r\n glRotatef(self.x, 1.0, 1.0, 1.0)\r\n glRotatef(self.y, 0.0, 1.0, 0.0)\r\n glRotatef(self.z, 0.0, 0.0, 1.0)\r\n glBegin(GL_QUADS)\r\n glColor3f(1.0, 0.0, 0.0)\r\n glVertex3f(1.0, 1.0, -1.0)\r\n glVertex3f(-1.0, 1.0, -1.0)\r\n glVertex3f(-1.0, 1.0, 1.0)\r\n glVertex3f(1.0, 1.0, 1.0)\r\n glColor3f(0.0, 1.0, 0.0)\r\n glVertex3f(1.0, -1.0, 1.0)\r\n glVertex3f(-1.0, -1.0, 1.0)\r\n glVertex3f(-1.0, -1.0, -1.0)\r\n glVertex3f(1.0, -1.0, -1.0)\r\n glColor3f(0.0, 0.0, 1.0)\r\n glVertex3f(1.0, 1.0, 1.0)\r\n glVertex3f(-1.0, 1.0, 1.0)\r\n glVertex3f(-1.0, -1.0, 1.0)\r\n glVertex3f(1.0, -1.0, 1.0)\r\n glColor3f(1.0, 1.0, 0.0)\r\n glVertex3f(1.0, -1.0, -1.0)\r\n glVertex3f(-1.0, -1.0, -1.0)\r\n glVertex3f(-1.0, 1.0, -1.0)\r\n glVertex3f(1.0, 1.0, -1.0)\r\n glColor3f(0.0, 1.0, 1.0)\r\n glVertex3f(-1.0, 1.0, 1.0)\r\n glVertex3f(-1.0, 1.0, -1.0)\r\n glVertex3f(-1.0, -1.0, -1.0)\r\n glVertex3f(-1.0, -1.0, 1.0)\r\n glColor3f(1.0, 0.0, 1.0)\r\n glVertex3f(1.0, 1.0, -1.0)\r\n glVertex3f(1.0, 1.0, 1.0)\r\n glVertex3f(1.0, -1.0, 1.0)\r\n glVertex3f(1.0, -1.0, -1.0)\r\n glEnd()\r\n glutSwapBuffers()\r\n self.x += 0.01\r\n self.y += 0.01\r\n self.z += 0.01\r\n\r\n def InitGL(self, width, height):\r\n glClearColor(0.25, 0.25, 0.25, 0.0)\r\n glClearDepth(1.0)\r\n glDepthFunc(GL_LESS)\r\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)\r\n glEnable(GL_DEPTH_TEST)\r\n glShadeModel(GL_SMOOTH)\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluPerspective(45.0, float(width) / float(height), 0.1, 100.0)\r\n glMatrixMode(GL_MODELVIEW)\r\n\r\n def MainLoop(self):\r\n # thread.start_new_thread(self.whirl, ())\r\n glutMainLoop()\r\n\r\nif __name__ == \"__main__\":\r\n window = OpenGLWindow()\r\n window.MainLoop()\r\n","repo_name":"markcs64/sansi","sub_path":"myLab/GA/wx/gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29773486493","text":"import shlex\nfrom subprocess import Popen, PIPE\n\nBLASTN_FIELDS = ['qseqid', 'sseqid', 'nident', 'length', 'qstart', 'qend',\n 'sstart', 'send', 'qlen', 'slen', 'evalue' 'bitscore']\n\n\nclass BlastError(Exception):\n \"\"\"An error raised due to a problem running BLAST.\"\"\"\n pass\n\n\ndef blastn(query, subject):\n fields = \" \".join(BLASTN_FIELDS)\n command = f\"blastn -query {query} -subject {subject} -outfmt '6 \" \\\n f\"{fields}' -task blastn -evalue 0.001 -dust no -culling_limit 1\"\n\n with Popen(shlex.split(command), stdout=PIPE, stderr=PIPE) as proc:\n stdout = proc.stdout.read().decode(\"utf-8\")\n stderr = proc.stdout.read().decode(\"utf-8\")\n\n if stderr:\n raise BlastError(stderr)\n\n return stdout\n\n\ndef blast_multiple(query, subjects, d):\n outname = d.joinpath(f\"{query.stem}.tsv\")\n\n for subject in subjects:\n with open(outname, \"a\") as fh:\n fh.write(blastn(query, subject))\n\n return outname\n\n\nif __name__ == \"__main__\":\n import pathlib\n import sys\n\n from phamclust.parallel_process import parallelize, CPUS\n from phamclust.matrix import matrix_from_adjacency, matrix_to_squareform\n\n indir = pathlib.Path(sys.argv[1]).resolve()\n\n outdir = indir.parent.joinpath(\"blastn\")\n if not outdir.is_dir():\n outdir.mkdir()\n\n fnas = [x for x in indir.iterdir() if x.suffix == \".fna\"]\n jobs, temp_outs = list(), list()\n for query in fnas:\n outfile = outdir.joinpath(f\"{query.stem}.tsv\")\n if outfile.is_file():\n temp_outs.append(outfile)\n continue\n jobs.append((query, fnas, outdir))\n\n temp_outs.extend(parallelize(blast_multiple, jobs, CPUS))\n\n outfile = outdir.parent.joinpath(\"blastn_adjacency.tsv\")\n if not outfile.is_file():\n\n blastn_map = dict()\n for temp_out in temp_outs:\n with open(temp_out, \"r\") as temp_reader:\n for row in temp_reader:\n source, target, *(data) = row.rstrip().split(\"\\t\")\n data = [int(x) for x in data]\n if source in blastn_map:\n if target in blastn_map[source]:\n blastn_map[source][target].append(data)\n else:\n blastn_map[source][target] = [data]\n else:\n blastn_map[source] = {target: [data]}\n\n fh = open(outfile, \"w\")\n\n nodes = sorted(blastn_map.keys())\n for i, source in enumerate(nodes):\n for target in nodes[i:]:\n source_data = blastn_map[source].get(target, dict())\n target_data = blastn_map[target].get(source, dict())\n if not source_data and not target_data:\n weight = 0.0\n elif not source_data:\n numerator = sum([int(x[0]) for x in target_data])\n denominator = target_data[0][-2]\n weight = min([numerator/denominator, 1.0])\n elif not target_data:\n numerator = sum([int(x[0]) for x in source_data])\n denominator = source_data[0][-2]\n weight = min([numerator/denominator, 1.0])\n else:\n numerator = sum([int(x[0]) for x in source_data])\n numerator += sum([int(x[0]) for x in target_data])\n denominator = int(source_data[0][-2])\n denominator += int(target_data[0][-2])\n weight = min([numerator/denominator, 1.0])\n\n fh.write(f\"{source}\\t{target}\\t{1.0 - weight:.6f}\\n\")\n\n fh.close()\n\n distmat = matrix_from_adjacency(outfile)\n matrix_to_squareform(distmat,\n outfile.with_name(\"blastn_distance_matrix.tsv\"),\n lower_triangle=True)\n","repo_name":"chg60/phamclust","sub_path":"src/phamclust/blastn.py","file_name":"blastn.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"8117251506","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\nmpl.use('TkAgg');\r\n\r\nif __name__ == '__main__':\r\n\tgpus = tf.config.experimental.list_physical_devices(device_type='GPU')\r\n\tcpus = tf.config.experimental.list_physical_devices(device_type='CPU')\r\n\tprint(gpus, cpus)\r\n\tgpuid = 0\r\n\ttf.config.experimental.set_visible_devices(devices=gpus[gpuid], device_type='GPU')\r\n\ttf.config.experimental.set_virtual_device_configuration(\r\n\t\tgpus[gpuid],\r\n\t\t[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=8000)]\r\n\t)\r\n\tclass CircleLoss(tf.keras.losses.Loss):\r\n\t\tdef __init__(self,\r\n\t\t\t\t\t\t\t\t gamma: int = 64,\r\n\t\t\t\t\t\t\t\t margin: float = 0.25,\r\n\t\t\t\t\t\t\t\t batch_size: int = None,\r\n\t\t\t\t\t\t\t\t reduction='auto',\r\n\t\t\t\t\t\t\t\t name=None):\r\n\t\t\tsuper().__init__(reduction=reduction, name=name)\r\n\t\t\tself.gamma = gamma\r\n\t\t\tself.margin = margin\r\n\t\t\tself.O_p = 1 + self.margin\r\n\t\t\tself.O_n = -self.margin\r\n\t\t\tself.Delta_p = 1 - self.margin\r\n\t\t\tself.Delta_n = self.margin\r\n\t\t\tif batch_size:\r\n\t\t\t\tself.batch_size = batch_size\r\n\t\t\t\tself.batch_idxs = tf.expand_dims(\r\n\t\t\t\t\t\ttf.range(0, batch_size, dtype=tf.int32), 1)\t# shape [batch,1]\r\n\r\n\t\tdef call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor:\r\n\t\t\t\"\"\" NOTE : y_pred must be cos similarity\r\n\t\t\tArgs:\r\n\t\t\t\t\ty_true (tf.Tensor): shape [batch,ndim]\r\n\t\t\t\t\ty_pred (tf.Tensor): shape [batch,ndim]\r\n\t\t\tReturns:\r\n\t\t\t\t\ttf.Tensor: loss\r\n\t\t\t\"\"\"\r\n\t\t\talpha_p = tf.nn.relu(self.O_p - tf.stop_gradient(y_pred))\r\n\t\t\talpha_n = tf.nn.relu(tf.stop_gradient(y_pred) - self.O_n)\r\n\t\t\t# yapf: disable\r\n\t\t\ty_true = tf.cast(y_true, tf.float32)\r\n\t\t\ty_pred = (y_true * (alpha_p * (y_pred - self.Delta_p)) +\r\n\t\t\t\t\t\t\t\t(1 - y_true) * (alpha_n * (y_pred - self.Delta_n))) * self.gamma\r\n\t\t\t# yapf: enable\r\n\t\t\treturn tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\r\n\t\r\n\tdef non_local_block(tensor_input, channel_denominator = 2):\r\n\t\t## theta-phi branch\r\n\t\ttheta = tf.keras.layers.Conv2D(filters = int(tensor_input.shape[3] / channel_denominator), kernel_size = (1, 1))(tensor_input);\r\n\t\ttheta = tf.keras.layers.Reshape((theta.shape[1] * theta.shape[2], theta.shape[3]))(theta);\r\n\t\t\r\n\t\tphi = tf.keras.layers.Conv2D(filters = int(tensor_input.shape[3] / channel_denominator), kernel_size = (1, 1))(tensor_input);\r\n\t\tphi = tf.keras.layers.Reshape((phi.shape[1] * phi.shape[2], phi.shape[3]))(phi);\r\n\t\t\r\n\t\tf = tf.keras.layers.dot([theta, phi], axes = 2);\r\n\t\tf = tf.keras.layers.Softmax(axis=1)(f);\r\n\t\t\r\n\t\t## g branch\r\n\t\tg = tf.keras.layers.Conv2D(filters = int(tensor_input.shape[3] / channel_denominator), kernel_size = (1, 1))(tensor_input);\r\n\t\tg = tf.keras.layers.Reshape((g.shape[1] * g.shape[2], g.shape[3]))(g);\r\n\t\t\r\n\t\t## weighted_matrix\r\n\t\ty = tf.keras.layers.dot([f, g], axes = [2, 1]);\r\n\t\ty = tf.keras.layers.Reshape((tensor_input.shape[1], tensor_input.shape[2], int(tensor_input.shape[3] / channel_denominator)))(y);\r\n\t\ty = tf.keras.layers.Conv2D(filters = tensor_input.shape[3], kernel_size = (1, 1))(y);\r\n\t\ty = tf.keras.layers.add([tensor_input, y]);\r\n\t\t\r\n\t\treturn y;\r\n\t\r\n\t## MoSE Block\r\n\tdef MoSE_Block(R_input):\r\n\t\tw = tf.keras.layers.GlobalAveragePooling2D()(R_input);\r\n\t\tw = tf.keras.layers.Reshape((1, w.shape[1], 1))(w);\r\n\t\tw = tf.keras.layers.Conv2D(filters = 16, kernel_size = (1, 1))(w);\r\n\t\tw = tf.keras.layers.Activation(activation = 'relu')(w);\r\n\t\tw = tf.keras.layers.Conv2D(filters = 1, kernel_size = (1, 1))(w);\r\n\t\tw = tf.keras.layers.Flatten()(w);\r\n\t\tw = tf.keras.layers.Activation(activation = 'softmax')(w);\r\n\t\tw = tf.keras.layers.Reshape((1, 1, w.shape[1]))(w);\r\n\t\treweighted_R = tf.keras.layers.Multiply()([w, R_input]);\r\n\t\t\r\n\t\treturn reweighted_R;\r\n\t\r\n\tdef model():\r\n\t\tinput = tf.keras.layers.Input(shape=(224, 224, 3));\r\n\t\t\r\n\t\tbase_model = tf.keras.applications.MobileNetV3Small(weights=\"imagenet\", include_top=False, input_tensor=input).output;\r\n\t\t##base_model = MoSE_Block(base_model);\r\n\t\t\r\n\t\tmask_classifier = tf.keras.layers.AveragePooling2D(pool_size=(7, 7))(base_model);\r\n\t\tmask_classifier = tf.keras.layers.Flatten()(mask_classifier);\r\n\t\tmask_classifier = tf.keras.layers.Dense(96, activation=\"relu\")(mask_classifier);\r\n\t\tmask_classifier = tf.keras.layers.BatchNormalization()(mask_classifier);\r\n\t\tmask_classifier = tf.keras.layers.Dense(2, activation=\"softmax\")(mask_classifier);\r\n\t\t\r\n\t\tperson_identifier = non_local_block(base_model);\r\n\t\tperson_identifier = MoSE_Block(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.AveragePooling2D(pool_size=(7, 7))(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.Conv2D(filters = 1024, kernel_size = (1, 1), activation=\"relu\")(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.BatchNormalization()(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.Conv2D(filters = 512, kernel_size = (1, 1), activation=\"relu\")(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.BatchNormalization()(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.Flatten()(person_identifier);\r\n\t\tperson_identifier = tf.keras.layers.Dense(429, activation=\"softmax\", kernel_regularizer=tf.keras.regularizers.l2())(person_identifier);\r\n\t\t\r\n\t\t\r\n\t\t\r\n\r\n\t\tmodel = tf.keras.Model(inputs = input, outputs = [mask_classifier, person_identifier]);\r\n\t\tadam = tf.keras.optimizers.Adam(lr = 0.0001);\r\n\t\tmodel.compile(loss=['categorical_crossentropy', CircleLoss(gamma=64, margin=0.25, batch_size=64)], loss_weights=[1,8], optimizer = adam, metrics = ['accuracy']);\r\n\t\t#model.compile(loss=[tfa.losses.SigmoidFocalCrossEntropy(reduction=tf.keras.losses.Reduction.AUTO), tfa.losses.SigmoidFocalCrossEntropy(reduction=tf.keras.losses.Reduction.AUTO)], loss_weights=[1,8], optimizer = adam, metrics = ['accuracy']);\r\n\t\treturn model\t\r\n\t\t\r\n\t\t\r\n\tprint('Loading data....');\r\n\tdata = [];\r\n\tlabel_mask = [];\r\n\tlabel_id = [];\r\n\r\n\r\n\tpath = './dataset/'\r\n\tdirs = os.listdir(path)\r\n\tfor f in dirs:\r\n\t\timg_path = path + '/' + f;\r\n\t\tdata.append(cv2.imread(img_path));\r\n\t\tlabel_mask.append(f.split('_')[1]);\r\n\t\tlabel_id.append(f.split('_')[0]);\r\n\tdata = np.array(data);\r\n\t\r\n\r\n\t(image_train, image_val, label_mask_train, label_mask_val, label_id_train, label_id_val) = train_test_split(data, label_mask, label_id, test_size = 0.10);\r\n\t\t\r\n\tlabel_mask_train_onehot = tf.keras.utils.to_categorical(label_mask_train, num_classes = 2);\r\n\tlabel_mask_val_onehot = tf.keras.utils.to_categorical(label_mask_val, num_classes = 2);\r\n\tlabel_id_train_onehot = tf.keras.utils.to_categorical(label_id_train, num_classes = 429);\r\n\tlabel_id_val_onehot = tf.keras.utils.to_categorical(label_id_val, num_classes = 429);\r\n\t\r\n\taug = tf.keras.preprocessing.image.ImageDataGenerator(\r\n\t\trotation_range=20,\r\n\t\tzoom_range=0.15,\r\n\t\twidth_shift_range=0.2,\r\n\t\theight_shift_range=0.2,\r\n\t\tshear_range=0.15,\r\n\t\thorizontal_flip=True,\r\n\t\tfill_mode=\"nearest\");\r\n\t\r\n\tBS = 64\r\n\t\r\n\tmodel = model();\r\n\tmodel.summary();\r\n\tcheckpointer = tf.keras.callbacks.ModelCheckpoint(filepath='facemask_personid_classifier_best.h5', save_best_only=True);\r\n\thistory = model.fit(\r\n\t\timage_train, [label_mask_train_onehot, label_id_train_onehot],\r\n\t\tvalidation_data=(image_val, [label_mask_val_onehot, label_id_val_onehot]), \r\n\t\tshuffle=True,\r\n\t\tepochs=200, \r\n\t\tverbose=1,\r\n\t\tbatch_size=BS, \r\n\t\tcallbacks=[checkpointer]\r\n\t);\r\n\tmodel.save('facemask_personid_classifier.h5');\r\n\r\n\t# 绘制训练 & 验证的准确率值\r\n\tplt.plot(history.history['accuracy']);\r\n\tplt.plot(history.history['val_accuracy']);\r\n\tplt.title('Accuracy')\r\n\tplt.ylabel('Accuracy')\r\n\tplt.xlabel('Epoch')\r\n\tplt.legend(['Train', 'Val'], loc='upper left')\r\n\tplt.show()\r\n\r\n\t# 绘制训练 & 验证的损失值\r\n\tplt.plot(history.history['loss'])\r\n\tplt.plot(history.history['val_loss'])\r\n\tplt.title('Loss')\r\n\tplt.ylabel('Loss')\r\n\tplt.xlabel('Epoch')\r\n\tplt.legend(['Train', 'Val'], loc='upper left')\r\n\tplt.show()\r\n\r\n","repo_name":"javierztl/AI2022","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70523088834","text":"import machine, time, ubinascii, json\npins = [machine.Pin(i, machine.Pin.OUT) for i in (0, 2, 4, 5, 12, 13, 14, 15)]\n\ndictionary = {0:0, 2:1, 4:2, 5:3, 12:4, 13:5, 14:6, 15:7}\n\nhtml = \"\"\"\n\n ESP8266 Pins \n

ESP8266 Pins

\n
%(asctime)s [UTC] - %(message)s
%s
PinValue
\n \n\n\"\"\"\n\n#Temperature-specific part\n#For calculating Celsius value from byte array\ndef temp_c(data):\n value = data[0] << 8 | data[1]\n temp = (value & 0xFFF) / 16.0\n if value & 0x1000:\n temp -= 256.0\n return temp\n\ni2c = machine.I2C(scl=machine.Pin(5), sda=machine.Pin(4)) #Set up a bus\n#Test if a bus has anything connected to it\ni2c.scan()\n\n#Addresses to get the temperature from - we don't need to set settings\naddress = 24\ntemp_reg = 5\n#Byte array used to hold temperature\ndata = bytearray(2)\n\n#Potentiometer-specific part\nspi = machine.SPI(1, baudrate=1000000, polarity=0, phase=0) \n#Set up chip select\nchip_select = machine.Pin(16, machine.Pin.OUT)\n\ndata_out = bytearray(3)\ndata_in = bytearray(3)\n#Set up input\ndata_in[0] = 0x01\ndata_in[1] = 0xb0\ndata_in[2] = 0x0 \n\n#Function for regular requests\ndef webpage_request():\n #Get the temperature\n i2c.readfrom_mem_into(address, temp_reg, data) #Read 2 bytes from temp register\n temperature = temp_c(data)\n #Get the potentiometer\n chip_select.off()\n spi.write_readinto(data_in, data_out) #Choose third input\n chip_select.on()\n data_out[1] = data_out[1] & 0x03\n hex = ubinascii.hexlify(data_out) #Get a hexadecimal value from output\n integer = int(hex, 16) #Get a decimal value from output\n fraction = integer % 1024 #Get results from the first 10 bits (0-1023) and dismiss the rest\n rows = ['%s%d' % (str(p), p.value()) for p in pins]\n rows.append('%s%d' % (\"Temperature\", temperature))\n rows.append('%s%d' % (\"Potentiometer\", fraction))\n response = html % '\\n'.join(rows)\n return response\n\ndef pin_request(value):\n if value == -1:\n rows = [(str(p), p.value()) for p in pins]\n response = json.dumps(rows)\n else:\n try:\n response = json.dumps((pins[value], pins[value].value()))\n except Exception:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n return response\n\ndef sensor_request(value):\n #Get the temperature\n i2c.readfrom_mem_into(address, temp_reg, data) #Read 2 bytes from temp register\n temperature = temp_c(data)\n #Get the potentiometer\n chip_select.off()\n spi.write_readinto(data_in, data_out) #Choose third input\n chip_select.on()\n data_out[1] = data_out[1] & 0x03\n hex = ubinascii.hexlify(data_out) #Get a hexadecimal value from output\n integer = int(hex, 16) #Get a decimal value from output\n fraction = integer % 1024 #Get results from the first 10 bits (0-1023) and dismiss the rest\n if value == b\"\": \n response = json.dumps([(\"Temperature\", temperature), (\"Potentiometer\", fraction)])\n elif value == b\"temperature\":\n response = json.dumps((\"Temperature\", temperature))\n elif value == b\"potentiometer\":\n response = json.dumps((\"Potentiometer\", fraction))\n else:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n return response\n\ndef pin_set(pin, value):\n try:\n if value == b\"1\":\n pins[pin].on()\n response = \"HTTP/1.1 200 OK\\r\\n\"\n elif value == b\"0\":\n pins[pin].off()\n response = \"HTTP/1.1 200 OK\\r\\n\"\n else:\n response = \"HTTP/1.1 400 Bad Request\\r\\n\"\n except Exception:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n return response\n\nimport socket\naddr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]\n\ns = socket.socket()\ns.bind(addr)\ns.listen(1)\n\nprint('listening on', addr)\n\nwhile True:\n cl, addr = s.accept()\n print('client connected from', addr)\n cl_file = cl.makefile('rwb', 0)\n line = cl_file.readline()\n print(line)\n url = line.split()\n if len(url) < 1:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n elif url[0] == b\"GET\":\n print(url[1])\n if url[1] == b\"/\":\n response = webpage_request()\n else:\n path = url[1].split(b\"/\")\n if path[1] == b\"pins\":\n if len(path) == 2:\n response = pin_request(-1)\n elif len(path) == 3:\n try:\n response = pin_request(dictionary[int(path[2])])\n except Exception:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n elif len(path) == 4:\n \tprint(\"Setting a pin\")\n \tvalue = path[3]\n \tprint(b\"value: \"+value)\n \ttry:\n \t\tresponse = pin_set(dictionary[int(path[2])], value)\n \texcept Exception:\n \t\tresponse = \"HTTP/1.1 404 Not Found\\r\\n\"\n else:\n \tresponse = \"HTTP/1.1 400 Bad Request\\r\\n\"\n elif path[1] == b\"sensors\":\n if len(path) == 2:\n response = sensor_request(b\"\")\n elif len(path) == 3:\n response = sensor_request(path[2])\n else:\n \tresponse = \"HTTP/1.1 400 Bad Request\\r\\n\"\n else:\n response = \"HTTP/1.1 404 Not Found\\r\\n\"\n while True:\n line = cl_file.readline()\n print(line)\n if not line or line == b'\\r\\n':\n break\n\n cl.send(response)\n cl.close()","repo_name":"binarily/dtu-projects","sub_path":"introduction-to-cyber-systems/REST.py","file_name":"REST.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70599595076","text":"preco = float(input(\"Preço unitário do produto: \"))\r\nquantidade = int(input(\"Quantidade comprada: \"))\r\ndinheiro = float(input(\"Dinheiro recebido: \"))\r\n\r\ntotal = preco * quantidade\r\ntroco = dinheiro - total\r\n\r\nif dinheiro < total:\r\n print(f\"DINHEIRO INSUFICIENTE. FALTAM {troco * -1:.2f} REAIS\")\r\nelse:\r\n print(f\"TROCO = {troco:.2f}\")\r\n","repo_name":"murilobarbosaa/Algoritmos_Udemy","sub_path":"Python/troco_verificado.py","file_name":"troco_verificado.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"72497437955","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[2]:\n\n\nclass NeuralNetwork:\n def __init__(self,X,y,hidden_units=5,lr=0.5):\n self.X = X\n self.y = y\n self.hidden_units = hidden_units\n self.lr = lr\n self._X = np.insert(X,[0],1,axis=1)\n self.alpha = np.random.uniform(-0.1,0.1,(self.hidden_units,len(self._X[0])))\n self.beta = np.random.uniform(-0.1,0.1,(len(self.y[0]),self.hidden_units+1))\n\n def NNforward(self,X_1,y_1):\n a = np.dot(self.alpha,np.reshape(X_1,(len(X_1),1)))\n z = 1/(1+np.exp(-a))\n Z = np.insert(z,0,1)\n b = np.dot(self.beta,np.reshape(Z,(len(Z),1)))\n yhat = 1/(1+np.exp(-b))\n y_1 = np.reshape(y_1,(len(y_1),1))\n J = np.sum((y_1-yhat)**2)/2\n return J,yhat,b,Z,z,a\n \n def NNbackward(self,J,yhat,b,Z,z,a,y_1,alpha,beta,X_1):\n y_1 = np.reshape(y_1,(len(y_1),1))\n dJdyhat = (yhat-y_1)\n dJdb = yhat*(1-yhat)*dJdyhat\n dJdbeta = np.dot(dJdb,np.reshape(Z,(1,len(Z))))\n dJdz = np.dot(beta[:,1:].T,dJdb)\n dJda = dJdz*z*(1-z)\n dJdalpha = np.dot(dJda,np.reshape(X_1,(1,len(X_1))))\n return dJdalpha,dJdbeta\n \n def train(self,epoch,error):\n e = 0\n allJ = 1e9\n while eerror:\n Jlist = []\n for i,j in zip(self._X,self.y):\n la = self.NNforward(i,j)\n Jlist.append(la[0])\n lo = self.NNbackward(*la,j,self.alpha,self.beta,i)\n self.alpha = self.alpha - self.lr*lo[0]\n self.beta = self.beta - self.lr*lo[1]\n e+=1\n allJ = sum(Jlist)/len(Jlist)\n \n def test(self,X_test,y_test):\n error = []\n pred = []\n X_new = np.insert(X_test,[0],1,axis=1)\n for x,y in zip(X_new,y_test):\n err = self.NNforward(x,y)\n error.append(err[0]*2)\n pred.append(err[1])\n return error,pred\n \n\n","repo_name":"deeprob/Computational-Methods-mAb-CHOcells","sub_path":"NN_model/NNModel.py","file_name":"NNModel.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3941013055","text":"from django.core.mail import send_mail\r\nfrom django.shortcuts import render,redirect\r\nfrom django.template.loader import render_to_string\r\nfrom django.conf import settings\r\n\r\nfrom .forms import ContactForm\r\n\r\ndef index(request):\r\n if request.method == 'POST':\r\n form = ContactForm(request.POST)\r\n\r\n if form.is_valid():\r\n name = form.cleaned_data['name']\r\n email = form.cleaned_data['email']\r\n content = form.cleaned_data['content']\r\n \r\n html = render_to_string('contact/emails/contactform.html', {\r\n 'name':name,\r\n 'email':email,\r\n 'content':content\r\n\r\n })\r\n\r\n\r\n print('the form was valid')\r\n\r\n\r\n send_mail('Email Testing', '', settings.EMAIL_HOST_USER, [email],html_message=html)\r\n\r\n return redirect('index')\r\n else:\r\n form = ContactForm()\r\n\r\n return render(request, 'contact/index.html', {\r\n 'form': form\r\n })\r\n\r\n","repo_name":"yahyiko/django-hello-world","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15879154979","text":"import argparse\nimport pathlib\nimport subprocess\nimport sys\nimport unittest\nfrom typing import Tuple\n\nfrom modules.test_utils import (\n with_tempdir,\n AbstractBlenderRunnerTest,\n)\n\n\nclass AbcPropError(Exception):\n \"\"\"Raised when AbstractAlembicTest.abcprop() finds an error.\"\"\"\n\n\nclass AbstractAlembicTest(AbstractBlenderRunnerTest):\n @classmethod\n def setUpClass(cls):\n import re\n\n cls.blender = args.blender\n cls.testdir = pathlib.Path(args.testdir)\n cls.alembic_root = pathlib.Path(args.alembic_root)\n\n # 'abcls' outputs ANSI colour codes, even when stdout is not a terminal.\n # See https://github.com/alembic/alembic/issues/120\n cls.ansi_remove_re = re.compile(rb'\\x1b[^m]*m')\n\n # 'abcls' array notation, like \"name[16]\"\n cls.abcls_array = re.compile(r'^(?P[^\\[]+)(\\[(?P\\d+)\\])?$')\n\n def abcls(self, *arguments) -> Tuple[int, str]:\n \"\"\"Uses abcls and return its output.\n\n :return: tuple (process exit status code, stdout)\n \"\"\"\n\n command = (self.alembic_root / 'bin' / 'abcls', *arguments)\n # Convert Path to str; Path works fine on Linux, but not on Windows.\n command_str = [str(arg) for arg in command]\n proc = subprocess.run(command_str, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n timeout=30)\n\n coloured_output = proc.stdout\n output = self.ansi_remove_re.sub(b'', coloured_output).decode('utf8')\n\n # Because of the ANSI colour codes, we need to remove those first before\n # decoding to text. This means that we cannot use the universal_newlines\n # parameter to subprocess.run(), and have to do the conversion ourselves\n output = output.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n\n if proc.returncode:\n str_command = \" \".join(str(c) for c in command)\n print(f'command {str_command} failed with status {proc.returncode}')\n\n return (proc.returncode, output)\n\n def abcprop(self, filepath: pathlib.Path, proppath: str) -> dict:\n \"\"\"Uses abcls to obtain compound property values from an Alembic object.\n\n A dict of subproperties is returned, where the values are Python values.\n\n The Python bindings for Alembic are old, and only compatible with Python 2.x,\n so that's why we can't use them here, and have to rely on other tooling.\n \"\"\"\n import collections\n\n command = ('-vl', '%s%s' % (filepath, proppath))\n returncode, output = self.abcls(*command)\n if returncode:\n raise AbcPropError('Error %d running abcls:\\n%s' % (returncode, output))\n\n # Mapping from value type to callable that can convert a string to Python values.\n converters = {\n 'bool_t': int,\n 'uint8_t': int,\n 'int16_t': int,\n 'int32_t': int,\n 'uint32_t': int,\n 'uint64_t': int,\n 'float64_t': float,\n 'float32_t': float,\n 'string': str,\n }\n\n result = {}\n\n # Ideally we'd get abcls to output JSON, see https://github.com/alembic/alembic/issues/121\n lines = collections.deque(output.split('\\n'))\n while lines:\n info = lines.popleft()\n if not info:\n continue\n parts = info.split()\n proptype = parts[0]\n\n if proptype == 'CompoundProperty':\n # To read those, call self.abcprop() on it.\n continue\n\n try:\n valtype_and_arrsize, name_and_extent = parts[1:]\n except ValueError as ex:\n raise ValueError(f'Error parsing result from abcprop \"{info.strip()}\": {ex}') from ex\n\n # Parse name and extent\n m = self.abcls_array.match(name_and_extent)\n if not m:\n self.fail('Unparsable name/extent from abcls: %s' % name_and_extent)\n name, extent = m.group('name'), m.group('arraysize')\n\n if extent != '1':\n self.fail('Unsupported extent %s for property %s/%s' % (extent, proppath, name))\n\n # Parse type\n m = self.abcls_array.match(valtype_and_arrsize)\n if not m:\n self.fail('Unparsable value type from abcls: %s' % valtype_and_arrsize)\n valtype, scalarsize = m.group('name'), m.group('arraysize')\n\n # Convert values\n try:\n conv = converters[valtype]\n except KeyError:\n self.fail('Unsupported type %s for property %s/%s' % (valtype, proppath, name))\n\n def convert_single_line(linevalue):\n try:\n if scalarsize is None:\n return conv(linevalue)\n else:\n return [conv(v.strip()) for v in linevalue.split(',')]\n except ValueError as ex:\n return str(ex)\n\n if proptype == 'ScalarProperty':\n value = lines.popleft()\n result[name] = convert_single_line(value)\n elif proptype == 'ArrayProperty':\n arrayvalue = []\n # Arrays consist of a variable number of items, and end in a blank line.\n while True:\n linevalue = lines.popleft()\n if not linevalue:\n break\n arrayvalue.append(convert_single_line(linevalue))\n result[name] = arrayvalue\n else:\n self.fail('Unsupported type %s for property %s/%s' % (proptype, proppath, name))\n\n return result\n\n def assertAlmostEqualFloatArray(self, actual, expect, places=6, delta=None):\n \"\"\"Asserts that the arrays of floats are almost equal.\"\"\"\n\n self.assertEqual(len(actual), len(expect),\n 'Actual array has %d items, expected %d' % (len(actual), len(expect)))\n\n for idx, (act, exp) in enumerate(zip(actual, expect)):\n self.assertAlmostEqual(act, exp, places=places, delta=delta,\n msg='%f != %f at index %d' % (act, exp, idx))\n\n\nclass HierarchicalAndFlatExportTest(AbstractAlembicTest):\n @with_tempdir\n def test_hierarchical_export(self, tempdir: pathlib.Path):\n abc = tempdir / 'cubes_hierarchical.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=True, flatten=False)\" % abc.as_posix()\n self.run_blender('cubes-hierarchy.blend', script)\n\n # Now check the resulting Alembic file.\n xform = self.abcprop(abc, '/Cube/Cube_002/Cube_012/.xform')\n self.assertEqual(1, xform['.inherits'])\n self.assertAlmostEqualFloatArray(\n xform['.vals'],\n [1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 3.07484, -2.92265, 0.0586434, 1.0]\n )\n\n @with_tempdir\n def test_flat_export(self, tempdir: pathlib.Path):\n abc = tempdir / 'cubes_flat.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=True, flatten=True)\" % abc.as_posix()\n self.run_blender('cubes-hierarchy.blend', script)\n\n # Now check the resulting Alembic file.\n xform = self.abcprop(abc, '/Cube_012/.xform')\n self.assertEqual(1, xform['.inherits'], \"Blender transforms always inherit\")\n\n self.assertAlmostEqualFloatArray(\n xform['.vals'],\n [0.343134, 0.485243, 0.804238, 0,\n 0.0, 0.856222, -0.516608, 0,\n -0.939287, 0.177266, 0.293799, 0,\n 1, 3, 4, 1],\n )\n\n\nclass DupliGroupExportTest(AbstractAlembicTest):\n @with_tempdir\n def test_hierarchical_export(self, tempdir: pathlib.Path):\n abc = tempdir / 'dupligroup_hierarchical.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=True, flatten=False)\" % abc.as_posix()\n self.run_blender('dupligroup-scene.blend', script)\n\n # Now check the resulting Alembic file.\n xform = self.abcprop(abc, '/Real_Cube/Linked_Suzanne/Cylinder-0/Suzanne-1/.xform')\n self.assertEqual(1, xform['.inherits'])\n self.assertAlmostEqualFloatArray(\n xform['.vals'],\n [1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 0.0, 2.0, 0.0, 1.0]\n )\n\n @with_tempdir\n def test_flat_export(self, tempdir: pathlib.Path):\n abc = tempdir / 'dupligroup_hierarchical.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=True, flatten=True)\" % abc.as_posix()\n self.run_blender('dupligroup-scene.blend', script)\n\n # Now check the resulting Alembic file.\n xform = self.abcprop(abc, '/Suzanne-1/.xform')\n self.assertEqual(1, xform['.inherits'])\n\n self.assertAlmostEqualFloatArray(\n xform['.vals'],\n [1.5, 0.0, 0.0, 0.0,\n 0.0, 1.5, 0.0, 0.0,\n 0.0, 0.0, 1.5, 0.0,\n 2.0, 3.0, 0.0, 1.0]\n )\n\n @with_tempdir\n def test_multiple_duplicated_hierarchies(self, tempdir: pathlib.Path):\n abc = tempdir / \"multiple-duplicated-hierarchies.abc\"\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1)\" % abc.as_posix()\n self.run_blender('multiple-duplicated-hierarchies.blend', script)\n\n # This is the expected hierarchy:\n # ABC\n # `--Triangle\n # |--Triangle\n # |--Empty-1\n # | `--Pole-1-0\n # | |--Pole\n # | `--Block-1-1\n # | `--Block\n # |--Empty\n # | `--Pole-0\n # | |--Pole\n # | `--Block-1\n # | `--Block\n # |--Empty-2\n # | `--Pole-2-0\n # | |--Pole\n # | `--Block-2-1\n # | `--Block\n # `--Empty-0\n # `--Pole-0-0\n # |--Pole\n # `--Block-0-1\n # `--Block\n\n # Now check the resulting Alembic file.\n xform = self.abcprop(abc, '/Triangle/Empty-1/Pole-1-0/Block-1-1/.xform')\n self.assertEqual(1, xform['.inherits'])\n self.assertAlmostEqualFloatArray(\n xform['.vals'],\n [1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 0.0, 2.0, 0.0, 1.0]\n )\n\n # If the property can be gotten, the hierarchy is okay. No need to actually check each xform.\n self.abcprop(abc, '/Triangle/.xform')\n self.abcprop(abc, '/Triangle/Empty-1/.xform')\n self.abcprop(abc, '/Triangle/Empty-1/Pole-1-0/.xform')\n self.abcprop(abc, '/Triangle/Empty-1/Pole-1-0/Block-1-1/.xform')\n self.abcprop(abc, '/Triangle/Empty/.xform')\n self.abcprop(abc, '/Triangle/Empty/Pole-0/.xform')\n self.abcprop(abc, '/Triangle/Empty/Pole-0/Block-1/.xform')\n self.abcprop(abc, '/Triangle/Empty-2/.xform')\n self.abcprop(abc, '/Triangle/Empty-2/Pole-2-0/.xform')\n self.abcprop(abc, '/Triangle/Empty-2/Pole-2-0/Block-2-1/.xform')\n self.abcprop(abc, '/Triangle/Empty-0/.xform')\n self.abcprop(abc, '/Triangle/Empty-0/Pole-0-0/.xform')\n self.abcprop(abc, '/Triangle/Empty-0/Pole-0-0/Block-0-1/.xform')\n\n\nclass CurveExportTest(AbstractAlembicTest):\n @with_tempdir\n def test_export_single_curve(self, tempdir: pathlib.Path):\n abc = tempdir / 'single-curve.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=True, flatten=False)\" % abc.as_posix()\n self.run_blender('single-curve.blend', script)\n\n # Now check the resulting Alembic file.\n abcprop = self.abcprop(abc, '/NurbsCurve/CurveData/.geom')\n self.assertEqual(abcprop['.orders'], [4])\n\n abcprop = self.abcprop(abc, '/NurbsCurve/CurveData/.geom/.userProperties')\n self.assertEqual(abcprop['blender:resolution'], 10)\n\n\nclass HairParticlesExportTest(AbstractAlembicTest):\n \"\"\"Tests exporting with/without hair/particles.\n\n Just a basic test to ensure that the enabling/disabling works, and that export\n works at all. NOT testing the quality/contents of the exported file.\n \"\"\"\n\n def _do_test(self, tempdir: pathlib.Path, export_hair: bool, export_particles: bool) -> pathlib.Path:\n abc = tempdir / 'hair-particles.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=True, flatten=False, \" \\\n \"export_hair=%r, export_particles=%r, as_background_job=False)\" \\\n % (abc.as_posix(), export_hair, export_particles)\n self.run_blender('hair-particles.blend', script)\n return abc\n\n @with_tempdir\n def test_with_both(self, tempdir: pathlib.Path):\n abc = self._do_test(tempdir, True, True)\n\n abcprop = self.abcprop(abc, '/Suzanne/Hair_system/.geom')\n self.assertIn('nVertices', abcprop)\n\n abcprop = self.abcprop(abc, '/Suzanne/Non-hair_particle_system/.geom')\n self.assertIn('.velocities', abcprop)\n\n abcprop = self.abcprop(abc, '/Suzanne/MonkeyMesh/.geom')\n self.assertIn('.faceIndices', abcprop)\n\n @with_tempdir\n def test_with_hair_only(self, tempdir: pathlib.Path):\n abc = self._do_test(tempdir, True, False)\n\n abcprop = self.abcprop(abc, '/Suzanne/Hair_system/.geom')\n self.assertIn('nVertices', abcprop)\n\n self.assertRaises(AbcPropError, self.abcprop, abc,\n '/Suzanne/Non-hair_particle_system/.geom')\n\n abcprop = self.abcprop(abc, '/Suzanne/MonkeyMesh/.geom')\n self.assertIn('.faceIndices', abcprop)\n\n @with_tempdir\n def test_with_particles_only(self, tempdir: pathlib.Path):\n abc = self._do_test(tempdir, False, True)\n\n self.assertRaises(AbcPropError, self.abcprop, abc, '/Suzanne/Hair_system/.geom')\n\n abcprop = self.abcprop(abc, '/Suzanne/Non-hair_particle_system/.geom')\n self.assertIn('.velocities', abcprop)\n\n abcprop = self.abcprop(abc, '/Suzanne/MonkeyMesh/.geom')\n self.assertIn('.faceIndices', abcprop)\n\n @with_tempdir\n def test_with_neither(self, tempdir: pathlib.Path):\n abc = self._do_test(tempdir, False, False)\n\n self.assertRaises(AbcPropError, self.abcprop, abc, '/Suzanne/Hair_system/.geom')\n self.assertRaises(AbcPropError, self.abcprop, abc,\n '/Suzanne/Non-hair_particle_system/.geom')\n\n abcprop = self.abcprop(abc, '/Suzanne/MonkeyMesh/.geom')\n self.assertIn('.faceIndices', abcprop)\n\n\nclass UVMapExportTest(AbstractAlembicTest):\n @with_tempdir\n def test_uvmap_export(self, tempdir: pathlib.Path):\n \"\"\"Minimal test for exporting multiple UV maps on an animated mesh.\n\n This covers the issue reported in #77021.\n \"\"\"\n basename = 'T77021-multiple-uvmaps-animated-mesh'\n abc = tempdir / f'{basename}.abc'\n script = f\"import bpy; bpy.ops.wm.alembic_export(filepath='{abc.as_posix()}', start=1, end=1, \" \\\n f\"visible_objects_only=True, flatten=False)\"\n self.run_blender(f'{basename}.blend', script)\n\n self.maxDiff = 1000\n\n # The main UV map should be written to .geom\n abcprop = self.abcprop(abc, '/Cube/Cube/.geom/uv')\n self.assertEqual(abcprop['.vals'], [\n [0.625, 0.75],\n [0.875, 0.75],\n [0.875, 0.5],\n [0.625, 0.5],\n [0.375, 1.0],\n [0.625, 1.0],\n [0.375, 0.75],\n [0.375, 0.25],\n [0.625, 0.25],\n [0.625, 0.0],\n [0.375, 0.0],\n [0.125, 0.75],\n [0.375, 0.5],\n [0.125, 0.5],\n ])\n\n # The second UV map should be written to .arbGeomParams\n abcprop = self.abcprop(abc, '/Cube/Cube/.geom/.arbGeomParams/Secondary')\n self.assertEqual(abcprop['.vals'], [\n [0.75, 0.375],\n [0.75, 0.125],\n [0.5, 0.125],\n [0.5, 0.375],\n [1.0, 0.625],\n [1.0, 0.375],\n [0.75, 0.625],\n [0.25, 0.625],\n [0.25, 0.375],\n [0.0, 0.375],\n [0.0, 0.625],\n [0.75, 0.875],\n [0.5, 0.625],\n [0.5, 0.875],\n ])\n\n\nclass LongNamesExportTest(AbstractAlembicTest):\n @with_tempdir\n def test_export_long_names(self, tempdir: pathlib.Path):\n abc = tempdir / 'long-names.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, \" \\\n \"visible_objects_only=False, flatten=False)\" % abc.as_posix()\n self.run_blender('long-names.blend', script)\n\n name_parts = [\n 'foG9aeLahgoh5goacee1dah6Hethaghohjaich5pasizairuWigee1ahPeekiGh',\n 'yoNgoisheedah2ua0eigh2AeCaiTee5bo0uphoo7Aixephah9racahvaingeeH4',\n 'zuthohnoi1thooS3eezoo8seuph2Boo5aefacaethuvee1aequoonoox1sookie',\n 'wugh4ciTh3dipiepeequait5uug7thiseek5ca7Eijei5ietaizokohhaecieto',\n 'up9aeheenein9oteiX6fohP3thiez6Ahvah0oohah1ep2Eesho4Beboechaipoh',\n 'coh4aehiacheTh0ue0eegho9oku1lohl4loht9ohPoongoow7dasiego6yimuis',\n 'lohtho8eigahfeipohviepajaix4it2peeQu6Iefee1nevihaes4cee2soh4noy',\n 'kaht9ahv0ieXaiyih7ohxe8bah7eeyicahjoa2ohbu7Choxua7oongah6sei4bu',\n 'deif0iPaechohkee5nahx6oi2uJeeN7ze3seunohJibe4shai0mah5Iesh3Quai',\n 'ChohDahshooNee0NeNohthah0eiDeese3Vu6ohShil1Iey9ja0uebi2quiShae6',\n 'Dee1kai7eiph2ahh2nufah3zai3eexeengohQue1caj0eeW0xeghi3eshuadoot',\n 'aeshiup3aengajoog0AhCoo5tiu3ieghaeGhie4Tu1ohh1thee8aepheingah1E',\n 'ooRa6ahciolohshaifoopeo9ZeiGhae2aech4raisheiWah9AaNga0uas9ahquo',\n 'thaepheip2aip6shief4EaXopei8ohPo0ighuiXah2ashowai9nohp4uach6Mei',\n 'ohph4yaev3quieji3phophiem3OoNuisheepahng4waithae3Naichai7aw3noo',\n 'aibeawaneBahmieyuph8ieng8iopheereeD2uu9Uyee5bei2phahXeir8eeJ8oo',\n 'ooshahphei2hoh3uth5chaen7ohsai6uutiesucheichai8ungah9Gie1Aiphie',\n 'eiwohchoo7ere2iebohn4Aapheichaelooriiyaoxaik7ooqua7aezahx0aeJei',\n 'Vah0ohgohphiefohTheshieghichaichahch5moshoo0zai5eeva7eisi4yae8T',\n 'EibeeN0fee0Gohnguz8iec6yeigh7shuNg4eingu3siph9joucahpeidoom4ree',\n 'iejiu3shohheeZahHusheimeefaihoh5eecachu5eeZie9ceisugu9taidohT3U',\n 'eex6dilakaix5Eetai7xiCh5Jaa8aiD4Ag3tuij1aijohv5fo0heevah8hohs3m',\n 'ohqueeNgahraew6uraemohtoo5qua3oojiex6ohqu6Aideibaithaiphuriquie',\n 'cei0eiN4Shiey7Aeluy3unohboo5choiphahc2mahbei5paephaiKeso1thoog1',\n 'ieghif4ohKequ7ong0jah5ooBah0eiGh1caechahnahThae9Shoo0phopashoo4',\n 'roh9er3thohwi5am8iequeequuSh3aic0voocai3ihi5nie2abahphupiegh7vu',\n 'uv3Quei7wujoo5beingei2aish5op4VaiX0aebai7iwoaPee5pei8ko9IepaPig',\n 'co7aegh5beitheesi9lu7jeeQu3johgeiphee9cheichi8aithuDehu2gaeNein',\n 'thai3Tiewoo4nuir1ohy4aithiuZ7shae1luuwei5phibohriepe2paeci1Ach8',\n 'phoi3ribah7ufuvoh8eigh1oB6deeBaiPohphaghiPieshahfah5EiCi3toogoo',\n 'aiM8geil7ooreinee4Cheiwea4yeec8eeshi7Sei4Shoo3wu6ohkaNgooQu1mai',\n 'agoo3faciewah9ZeesiXeereek7am0eigaeShie3Tisu8haReeNgoo0ci2Hae5u',\n 'Aesatheewiedohshaephaenohbooshee8eu7EiJ8isal1laech2eiHo0noaV3ta',\n 'liunguep3ooChoo4eir8ahSie8eenee0oo1TooXu8Cais8Aimo4eir6Phoo3xei',\n 'toe9heepeobein3teequachemei0Cejoomef9ujie3ohwae9AiNgiephi3ep0de',\n 'ua6xooY9uzaeB3of6sheiyaedohoiS5Eev0Aequ9ahm1zoa5Aegh3ooz9ChahDa',\n 'eevasah6Bu9wi7EiwiequumahkaeCheegh6lui8xoh4eeY4ieneavah8phaibun',\n 'AhNgei2sioZeeng6phaecheemeehiShie5eFeiTh6ooV8iiphabud0die4siep4',\n 'kushe6Xieg6ahQuoo9aex3aipheefiec1esa7OhBuG0ueziep9phai5eegh1vie',\n 'Jie5yu8aafuQuoh9shaep3moboh3Pooy7och8oC6obeik6jaew2aiLooweib3ch',\n 'ohohjajaivaiRail3odaimei6aekohVaicheip2wu7phieg5Gohsaing2ahxaiy',\n 'hahzaht6yaiYu9re9jah9loisiit4ahtoh2quoh9xohishioz4oo4phofu3ogha',\n 'pu4oorea0uh2tahB8aiZoonge1aophaes6ogaiK9ailaigeej4zoVou8ielotee',\n 'cae2thei3Luphuqu0zeeG8leeZuchahxaicai4ui4Eedohte9uW6gae8Geeh0ea',\n 'air7tuy7ohw5sho2Tahpai8aep4so5ria7eaShus5weaqu0Naquei2xaeyoo2ae',\n 'vohge4aeCh7ahwoo7Jaex6sohl0Koong4Iejisei8Coir0iemeiz9uru9Iebaep',\n 'aepeidie8aiw6waish9gie4Woolae2thuj5phae4phexux7gishaeph4Deu7ooS',\n 'vahc5ia0xohHooViT0uyuxookiaquu2ogueth0ahquoudeefohshai8aeThahba',\n 'mun3oagah2eequaenohfoo8DaigeghoozaV2eiveeQuee7kah0quaa6tiesheet',\n 'ooSet4IdieC4ugow3za0die4ohGoh1oopoh6luaPhaeng4Eechea1hae0eimie5',\n 'iedeimadaefu2NeiPaey2jooloov5iehiegeakoo4ueso7aeK9ahqu2Thahkaes',\n 'nahquah9Quuu2uuf0aJah7eishi2siegh8ue5eiJa2EeVu8ebohkepoh4dahNgo',\n 'io1bie7chioPiej5ae2oohe2fee6ooP2thaeJohjohb9Se8tang3eipaifeimai',\n 'oungoqu6dieneejiechez1xeD2Zi9iox2Ahchaiy9ithah3ohVoolu2euQuuawo',\n 'thaew0veigei4neishohd8mecaixuqu7eeshiex1chaigohmoThoghoitoTa0Eo',\n 'ahroob2phohvaiz0Ohteik2ohtakie6Iu1vitho8IyiyeeleeShae9defaiw9ki',\n 'DohHoothohzeaxolai3Toh5eJie7ahlah9reF0ohn1chaipoogain2aibahw4no',\n 'aif8lo5she4aich5cho2rie8ieJaujeem2Joongeedae4vie3tah1Leequaix1O',\n 'Aang0Shaih6chahthie1ahZ7aewei9thiethee7iuThah3yoongi8ahngiobaa5',\n 'iephoBuayoothah0Ru6aichai4aiw8deg1umongauvaixai3ohy6oowohlee8ei',\n 'ohn5shigoameer0aejohgoh8oChohlaecho9jie6shu0ahg9Bohngau6paevei9',\n 'edahghaishak0paigh1eecuich3aad7yeB0ieD6akeeliem2beifufaekee6eat',\n 'hiechahgheloh2zo7Ieghaiph0phahhu8aeyuiKie1xeipheech9zai4aeme0ee',\n 'Cube'\n ]\n name = '/' + '/'.join(name_parts)\n\n # Now check the resulting Alembic file.\n abcprop = self.abcprop(abc, '%s/.xform' % name)\n self.assertEqual(abcprop['.vals'], [\n 1.0, 0.0, 0.0, 0.0,\n 0.0, 1.0, 0.0, 0.0,\n 0.0, 0.0, 1.0, 0.0,\n 0.0, 3.0, 0.0, 1.0,\n ])\n\n abcprop = self.abcprop(abc, '%s/Cube/.geom' % name)\n self.assertIn('.faceCounts', abcprop)\n\n\nclass InvisibleObjectExportTest(AbstractAlembicTest):\n \"\"\"Export an object which is invisible.\n\n This test only tests a small subset of the functionality that is required to\n export invisible objects. It just tests that the visibility property is\n written, and that it has the correct initial value. This is a limitation\n caused by these tests relying on `abcls`.\n \"\"\"\n\n @with_tempdir\n def test_hierarchical_export(self, tempdir: pathlib.Path):\n abc = tempdir / 'visibility.abc'\n script = \"import bpy; bpy.ops.wm.alembic_export(filepath='%s', start=1, end=2, \" \\\n \"visible_objects_only=False)\" % abc.as_posix()\n self.run_blender('visibility.blend', script)\n\n def test(cube_name: str, expect_visible: bool):\n returncode, output = self.abcls('-va', f'{abc}/{cube_name}')\n if returncode:\n self.fail(f\"abcls failed: {output}\")\n output = output.strip()\n self.assertEqual(f'Cube .xform visible {int(expect_visible)}', output)\n\n # This cube is always visible.\n test('VisibleCube', True)\n\n # This cube is never visible, and thus will not be pulled into the\n # depsgraph by the standard builder, only by the all-objects builder.\n test('InvisibleCube', False)\n\n # This cube has animated visibility, and thus will be pulled into the\n # depsgraph by the standard builder as well as the all-objects builder.\n test('InvisibleAnimatedCube', False)\n\n\nclass CustomPropertiesExportTest(AbstractAlembicTest):\n \"\"\"Test export of custom properties.\"\"\"\n\n def _run_export(self, tempdir: pathlib.Path) -> pathlib.Path:\n abc = tempdir / 'custom-properties.abc'\n script = (\n \"import bpy; bpy.context.scene.frame_set(1); \"\n \"bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1)\" % abc.as_posix()\n )\n self.run_blender('custom-properties.blend', script)\n return abc\n\n @with_tempdir\n def test_xform_props(self, tempdir: pathlib.Path) -> None:\n abc = self._run_export(tempdir)\n abcprop = self.abcprop(abc, '/Cube/.xform/.userProperties')\n\n # Simple, single values.\n self.assertEqual(abcprop['static_int'], [327])\n self.assertEqual(abcprop['static_float'], [47.01])\n self.assertEqual(abcprop['static_string'], ['Agents'])\n self.assertEqual(abcprop['keyed_float'], [-1])\n self.assertEqual(abcprop['keyed_int'], [-47])\n\n # Arrays.\n self.assertEqual(abcprop['keyed_array_float'], [-1.000, 0.000, 1.000])\n self.assertEqual(abcprop['keyed_array_int'], [42, 47, 327])\n\n # Multi-dimensional arrays.\n self.assertEqual(abcprop['array_of_strings'], ['ผัดไทย', 'Pad Thai'])\n self.assertEqual(\n abcprop['matrix_tuple'],\n [1.0, 0.0, 0.0, 3.33333, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n self.assertEqual(\n abcprop['static_matrix'],\n [1.0, 0.0, 0.0, 3.33333, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0])\n self.assertEqual(\n abcprop['nonuniform_array'],\n [10, 20, 30, 1, 2, 47])\n\n @with_tempdir\n def test_mesh_props(self, tempdir: pathlib.Path) -> None:\n abc = self._run_export(tempdir)\n abcprop = self.abcprop(abc, '/Cube/Cube/.geom/.userProperties')\n self.assertEqual(abcprop['mesh_tags'], ['cube', 'box', 'low-poly-sphere'])\n\n @with_tempdir\n def test_camera_props(self, tempdir: pathlib.Path) -> None:\n abc = self._run_export(tempdir)\n abcprop = self.abcprop(abc, '/Camera/Hasselblad/.geom/.userProperties')\n self.assertEqual(abcprop['type'], ['500c/m'])\n\n @with_tempdir\n def test_disabled_export_option(self, tempdir: pathlib.Path) -> None:\n abc = tempdir / 'custom-properties.abc'\n script = (\n \"import bpy; bpy.context.scene.frame_set(1); \"\n \"bpy.ops.wm.alembic_export(filepath='%s', start=1, end=1, export_custom_properties=False)\" % abc.as_posix()\n )\n self.run_blender('custom-properties.blend', script)\n\n abcprop = self.abcprop(abc, '/Camera/Hasselblad/.geom/.userProperties')\n self.assertIn('eyeSeparation', abcprop, 'Regular non-standard properties should still be written')\n self.assertNotIn('type', abcprop, 'Custom properties should not be written')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--blender', required=True)\n parser.add_argument('--testdir', required=True)\n parser.add_argument('--alembic-root', required=True)\n args, remaining = parser.parse_known_args()\n\n unittest.main(argv=sys.argv[0:1] + remaining)\n","repo_name":"blender/blender","sub_path":"tests/python/alembic_export_tests.py","file_name":"alembic_export_tests.py","file_ext":"py","file_size_in_byte":27415,"program_lang":"python","lang":"en","doc_type":"code","stars":10105,"dataset":"github-code","pt":"61"} +{"seq_id":"22730746446","text":"import os, sys\nmodd_str = os.path.abspath(os.path.dirname(__file__)) # module dir\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# TENSORFLOW\nimport tensorflow as tf\n\n# EAGER_EXECUTION - necessary for calls to TF API's to work outside of \n# a graph session run.\ntf.compat.v1.enable_eager_execution(\n config = None,\n device_policy = None,\n execution_mode = None)\n\nprint(\"TensorFlow version - %s\"%(tf.__version__))\nassert tf.__version__ == \"1.15.0\"\n\n\n# GF_IMAGES_JOBS\nprint(\"\\n\\nRUN THIS TEST WITH - 'LD_LIBRARY_PATH=../../build/ python3 test.py'\\n\\n\")\nassert \"LD_LIBRARY_PATH\" in os.environ.keys()\n\nsys.path.append(\"%s/../../build\"%(modd_str))\nimport gf_images_jobs_py as gf_images_jobs\n\n#---------------------------------------------------------------------------\ndef main():\n\n print(\"creating ML dataset...\")\n\n #---------------------------\n # CONFIG\n dataset_name_str = \"test\"\n dataset_target_dir_path_str = \"%s/data/output_ml/generated\"%(modd_str)\n classes_lst = [\"rect\"]\n elements_num_int = 2000\n img_width_int = 32\n img_height_int = 32\n img_channels_int = 4 # RGBA\n\n test_py_tfrecords_file_str = \"./data/output_ml/gf_py_test.tfrecords\"\n test_rust_tfrecords_file_str = \"./data/output_ml/gf_rust_test.tfrecords\"\n\n\n\n test_ml_tf_records_train__file_str = \"./data/output_ml/generated/test__train.tfrecords\"\n test_ml_tf_records_validate__file_str = \"./data/output_ml/generated/test__validate.tfrecords\"\n \n #---------------------------\n\n # GENERATE_ML_DATASET\n gf_images_jobs.generate_ml_dataset(dataset_name_str,\n classes_lst,\n elements_num_int,\n img_width_int,\n img_height_int,\n dataset_target_dir_path_str)\n\n # GENERATE_AND_REGISTER_ML_DATASET - generates .tfrecords of images and issues\n # HTTP request to \"gf_ml\" server to register the generated dataset.\n # gf_images_jobs.generate_and_register_ml_dataset(dataset_name_str,\n # classes_lst,\n # elements_num_int,\n # img_width_int,\n # img_height_int,\n # dataset_target_dir_path_str)\n\n \n print(\"----------------\")\n print(\"test .tfrecords reading\")\n\n tf_example__img_width_int = img_width_int\n tf_example__img_height_int = img_height_int\n collage__img_width_int = 1000\n collage__img_height_int = 1000\n collage__rows_num_int = 40\n collage__columns_num_int = 40\n\n assert os.path.isfile(test_ml_tf_records_train__file_str)\n gf_images_jobs.view_ml_dataset(test_ml_tf_records_train__file_str,\n \"./data__test/generated_dataset_collage.png\",\n tf_example__img_width_int,\n tf_example__img_height_int,\n collage__img_width_int,\n collage__img_height_int,\n collage__rows_num_int,\n collage__columns_num_int)\n\n exit()\n\n\n \n test__tf_record_processing(test_rust_tfrecords_file_str,\n img_width_int,\n img_height_int,\n p_img_channels_int = img_channels_int)\n \n \n # PY_TFRECORDS\n test__py_write_tfrecord(test_py_tfrecords_file_str)\n test__py_read_tfrecord(test_py_tfrecords_file_str, p_view_bool = True)\n \n # RUST_TFRECORDS\n test__py_read_tfrecord(test_rust_tfrecords_file_str, p_view_bool = True)\n\n\n\n\n\n\n \n\n\n\n#---------------------------------------------------------------------------\ndef test__tf_record_processing(p_tfrecord_path_str,\n p_img_width_int,\n p_img_height_int,\n\n # 4 - channels for RGBA\n p_img_channels_int = 4):\n assert os.path.isfile(p_tfrecord_path_str)\n\n # dataset contains serialized tf.train.Example messages\n dataset = tf.data.TFRecordDataset(\n p_tfrecord_path_str,\n compression_type = None,\n buffer_size = None,\n num_parallel_reads = 4) # load the file in parallel\n \n assert isinstance(dataset, tf.data.Dataset)\n print(dataset)\n print(dataset.element_spec)\n print(\"before parsing...\")\n\n #---------------------------------------------------------------------------\n def map_f(p_example):\n\n # assert isinstance(p_example, tf.data.Tensor)\n # print(p_example)\n\n label_shape_lst = []\n img_shape_lst = [p_img_width_int, p_img_height_int, p_img_channels_int] \n\n # FixedLenFeature(shape, dtype) - configuration for parsing a fixed-length input feature\n features_def_map = {\n \"label\": tf.compat.v1.io.FixedLenFeature(label_shape_lst, tf.int64),\n \"img\": tf.compat.v1.io.FixedLenFeature(img_shape_lst, tf.string)\n }\n\n # parse_single_example() - Parses a single Example proto\n example = tf.compat.v1.io.parse_single_example(p_example,\n features = features_def_map,\n name = \"test_example\")\n\n return example\n\n #---------------------------------------------------------------------------\n parsed_dataset = dataset.map(map_f)\n print(\"element spec - %s\"%(parsed_dataset.element_spec)) # inspect the type of each element component\n\n w, h, channels_int = parsed_dataset.element_spec[\"img\"].shape\n assert p_img_width_int == w\n assert p_img_height_int == h\n assert channels_int == p_img_channels_int\n \n#---------------------------------------------------------------------------\ndef test__py_write_tfrecord(p_tfrecord_path_str):\n #---------------------------------------------------------------------------\n def _int64_feature(value):\n # Returns an int64_list from a bool / enum / int / uint\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n \n #---------------------------------------------------------------------------\n def _bytes_feature(value):\n # Returns a bytes_list from a string / byte\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n \n #---------------------------------------------------------------------------\n # Read image raw data, which will be embedded in the record file later.\n image_string = open(\"data/output_ml/generated/train/rect/test-rect-0.png\", \"rb\").read()\n \n # Manually set the label to 0. This should be set according to your situation.\n label = 0\n \n # For each sample there are two features: image raw data, and label. Wrap them in a single dict.\n feature = {\n \"label\": _int64_feature(label),\n \"img\": _bytes_feature(image_string),\n }\n \n # Create a `example` from the feature dict.\n tf_example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n # Write the serialized example to a record file.\n with tf.io.TFRecordWriter(p_tfrecord_path_str) as writer:\n writer.write(tf_example.SerializeToString())\n\n#---------------------------------------------------------------------------\ndef test__py_read_tfrecord(p_tfrecord_path_str,\n p_view_bool = False):\n\n # dataset contains serialized tf.train.Example messages\n dataset = tf.data.TFRecordDataset(\n p_tfrecord_path_str,\n compression_type = None,\n buffer_size = None,\n num_parallel_reads = 4) # load the file in parallel\n\n\n print(dataset)\n\n for i, example in dataset.enumerate():\n\n assert isinstance(example, tf.Tensor)\n print(\"=========================================================================\")\n print(\"unparsed example - %s\"%(example)) \n\n #assert isinstance(raw_record, tf.python.framework.ops.EagerTensor)\n \n print(\"=========================-------\")\n example_parsed = tf.train.Example.FromString(example.numpy())\n \n print(\"example parsed (%s):\"%(p_tfrecord_path_str))\n print(example_parsed)\n print(\"=========================-------\")\n\n print(\"example_parsed type - %s\"%(type(example_parsed)))\n print(\"image feature size - %s\"%(example_parsed.features.feature[\"img\"].ByteSize()))\n\n #import tensorflow.core.example.example_pb2 as example_pb2\n #assert isinstance(parsed, example_pb2.Example)\n\n print(\"=========================-------\")\n print(\"image feature (%s):\"%(p_tfrecord_path_str))\n img_feature = example_parsed.features.feature[\"img\"]\n print(img_feature)\n print(\"=========================-------\")\n\n # DECODE_PNG\n img_bytes = img_feature.bytes_list.value[0]\n img_png = tf.compat.v1.image.decode_png(img_bytes)\n print(\"PNG decoded...\")\n\n # VIEW_IMG\n if p_view_bool:\n plt.imshow(img_png, interpolation = \"nearest\")\n\n print(\"viewing file - %s\"%(p_tfrecord_path_str))\n plt.show()\n\n # reshaped_tensor = np.reshape(example_parsed.numpy(), (img_width_int, img_height_int))\n # print(reshaped_tensor)\n \n\n#---------------------------------------------------------------------------\nmain()","repo_name":"gloflow/gloflow","sub_path":"rust/gf_images_jobs/test/test_tf.py","file_name":"test_tf.py","file_ext":"py","file_size_in_byte":8900,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"23312211462","text":"import os\nfrom sklearn.metrics.pairwise import pairwise_distances\nimport matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nimport argparse\nfrom functions import *\nfrom WordEmbeddingLayer import *\n\n\nclass PairwiseDistPlot(object):\n\n def __init__(self,metric=\"cosine\"):\n self.metric = metric\n\n\n def compute_dists(self,list_of_vectors):\n return pairwise_distances(list_of_vectors,metric=self.metric, n_jobs=1)\n\n def compute_and_print_dists(self,list_of_vectors,id_dic):\n dists = self.compute_dists(list_of_vectors)\n\n return dists\n\n def compute_and_plot_dists(self,list_of_vectors,id_dic):\n dists = self.compute_dists(list_of_vectors)\n\n # Display matrix\n plt.imshow(dists, cmap=plt.cm.gray, interpolation='none')\n plt.xticks(np.arange(len(id_dic)),id_dic)\n plt.yticks(np.arange(len(id_dic)),id_dic)\n plt.xticks(rotation=90)\n plt.show()\n\n def compute_and_plot_diff_dists(self,list_of_vectors_1,list_of_vectors_2,id_dic):\n dists_1 = np.tanh(self.compute_dists(list_of_vectors_1))\n dists_2 = np.tanh(self.compute_dists(list_of_vectors_2))\n\n # Display matrix\n plt.imshow(abs(dists_1 - dists_2), cmap=plt.cm.gray, interpolation='none')\n plt.xticks(np.arange(len(id_dic)),id_dic)\n plt.yticks(np.arange(len(id_dic)),id_dic)\n plt.xticks(rotation=90)\n plt.show()\n\n\n\nif __name__ == '__main__':\n\n pdp = PairwiseDistPlot()\n\n fMRI_data_path = \"../data/\"\n fMRI_data_filename = \"data_\"\n fMRI_data_postfix = \".csv\"\n subject_id = str(1)\n subject = subject_id\n\n # Instantiate the parser\n parser = argparse.ArgumentParser(\n description='Single Layer Feed Forward Network for Brain Activation Prediction Task')\n parser.add_argument('--subject', '-s', type=str, nargs='?',\n help='An optional integer specifying the subject id', default=\"1\")\n\n args = parser.parse_args()\n print(\"subject id %s\" % args.subject)\n\n fMRI_file = fMRI_data_path + fMRI_data_filename + args.subject + fMRI_data_postfix\n\n brain_activations_1 = genfromtxt(fMRI_file, delimiter=',')\n brain_activations = brain_activations_1 - np.mean(brain_activations_1, axis=0)\n brain_activations = np.tanh(brain_activations)\n\n words_1 = []\n with open('../data/words', 'r') as f:\n reader = csv.reader(f)\n words_1 = list(reader)\n\n words = []\n words.extend([w[0] for w in words_1])\n\n\n conds_1 = []\n with open('../data/conds', 'r') as f:\n reader = csv.reader(f)\n conds_1 = list(reader)\n conds = [int(c[0]) for c in conds_1]\n\n cond_sorted_set = np.argsort(conds[:60])\n word_set = np.asarray(words)[cond_sorted_set]\n\n print(\"number of words: %d \" % len(word_set))\n\n selected_file_name = \"general_selected_500_\" + subject + \".npy\"\n\n if not os.path.isfile(selected_file_name):\n selected = select_stable_voxels(brain_activations_1, word_set, words, number_of_trials=6,\n size_of_selection=500)\n np.save(selected_file_name, selected)\n\n selected = np.load(selected_file_name)\n\n mean_Activations = []\n\n words = np.asarray(words)\n for word in word_set:\n indices = np.where(words == word)[0]\n mean_Activations.append(np.mean(brain_activations[indices, :], axis=0))\n\n mean_Activations = np.asarray(mean_Activations)\n\n\n\n pdp.compute_and_plot_dists(mean_Activations[:,selected],word_set)\n\n words = word_set\n wem = WordEmbeddingLayer()\n wem.load_filtered_embedding(\"../data/neuro_words\")\n\n embedded_words = wem.embed_words(word_set)\n word_representations = embedded_words\n\n pdp.compute_and_plot_diff_dists(mean_Activations[:,selected],word_representations,word_set)","repo_name":"samiraabnar/NeuroSemantics","sub_path":"src/PairwiseDistPlot.py","file_name":"PairwiseDistPlot.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"2984583425","text":"\"\"\"\nPODPOWIEDZI:\n* użyj operatora // do dzielenia całkowitego\n* użyj operatora %\n* sprawdz czy liczba nie jest równa 0 aby uniknac dzielenia przez zero\n\"\"\"\n\n\ndef digit_sum(number):\n if number == 0:\n return 0\n result = number % 10 + digit_sum(number // 10)\n return result\n\n\nif __name__ == '__main__':\n assert digit_sum(27) == 9\n assert digit_sum(123) == 6\n assert digit_sum(0) == 0\n assert digit_sum(1) == 1\n\n\n# Напишите рекурсивную функцию для подсчета элементов в списке.\n\ndef count(list):\n if list == []:\n return 0\n return 1 + count(list[1:])\n\n#Найдите наибольшее число в списке.\n\ndef max(list):\n if len(list) == 2:\n return list[0] if list[0] > list[1] else list[1]\n sub_max = max(list[1:])\n return list[0] if list[0] > sub_max else sub_max","repo_name":"Oleksandr015/Algorytmy","sub_path":"AISD/recursion/digit_sum.py","file_name":"digit_sum.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1043506323","text":"import shutil\nimport os\nimport re\nfrom runtimer import RunTimer\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom nmldbmodel import NMLDB_Model\nfrom tables import Channels\nfrom collector import Collector\nfrom neuronrunner import NeuronRunner, NumericalInstabilityException\nfrom math import pi\nfrom scipy.ndimage.filters import median_filter\n\n\nclass ChannelModel(NMLDB_Model):\n def __init__(self, *args, **kwargs):\n super(ChannelModel, self).__init__(*args, **kwargs)\n\n self.all_properties.extend([\n 'NEURON_conversion',\n 'equation_count',\n 'runtime_per_step',\n 'tolerances',\n 'stability_range',\n 'ACTIVATION',\n 'DEACTIVATION',\n 'INACTIVATION'\n ])\n\n self.server.connect()\n\n # Fetch channel record\n self.channel_record = Channels.get_or_none(Channels.Model_ID == self.get_model_nml_id())\n\n if self.channel_record is None:\n raise Exception(\"No records found in Channels table for model: \" + self.get_model_nml_id())\n\n self.is_passive = self.channel_record.Type.ID == 'pas'\n\n # Pre-retrieve the resting v of the channel type (the first voltage level of a protocol)\n self.rest_v = float(self.channel_record.Type.Activation_Protocol.Voltages.split(',')[0])\n\n self.ion = self.channel_record.Type.Species\n self.erev = self.channel_record.Type.Reversal_Potential\n\n self.ca_levels = self.channel_record.Type.Ca_Levels.split(',') \\\n if self.channel_record.Type.Ca_Levels is not None \\\n else None\n\n def save_ACTIVATION(self):\n self.save_channel_protocol(self.channel_record.Type.Activation_Protocol, \"ACTIVATION\")\n\n def save_INACTIVATION(self):\n self.save_channel_protocol(self.channel_record.Type.Inactivation_Protocol, \"INACTIVATION\")\n\n def save_DEACTIVATION(self):\n self.save_channel_protocol(self.channel_record.Type.Deactivation_Protocol, \"DEACTIVATION\")\n\n def save_channel_protocol(self, protocol_record, protocol_name):\n self.remove_protocol_waveforms(protocol_name)\n\n durations_ss, voltages_ss, durations_stim, voltages_stim = self.get_durations_voltages(protocol_record)\n\n self.save_vclamp_set(protocol_name,\n durations_ss, voltages_ss,\n durations_stim, voltages_stim,\n protocol_record.Low_Voltage, protocol_record.High_Voltage,\n step_count=11,\n ca_concentrations=self.ca_levels)\n\n def get_durations_voltages(self, protocol):\n times = protocol.Times.split(',')\n voltages = protocol.Voltages.split(',')\n\n durations = []\n steps = []\n prev_time = None\n\n for t, time in enumerate(times):\n if prev_time == time:\n continue\n\n if prev_time is not None:\n duration = float(time) - float(prev_time)\n durations.append(duration)\n steps.append(voltages[t])\n\n prev_time = time\n\n durations_ss = []\n voltages_ss = []\n durations_stim = []\n voltages_stim = []\n\n for v, voltage in enumerate(steps):\n # SS is before the steps begin\n if len(durations_stim) == 0 and voltage != 'LOWHIGH':\n durations_ss.append(durations[v])\n voltages_ss.append(voltage)\n\n # Stim is after SS\n else:\n durations_stim.append(durations[v])\n voltages_stim.append(voltage)\n\n return durations_ss, voltages_ss, durations_stim, voltages_stim\n\n def save_vclamp_set(self, protocol,\n durations_ss, voltages_ss,\n durations_stim, voltages_stim,\n voltage_low, voltage_high,\n step_count, ca_concentrations=None):\n\n if ca_concentrations is None:\n ca_concentrations = [None]\n\n # Run the same protocol at different Ca concentrations\n for ca_conc in ca_concentrations:\n\n voltages_ss = self.stable_voltages(voltages_ss)\n\n # Reach the desired steady-state\n result_ss = self.get_vclamp_response(durations=durations_ss,\n voltages=voltages_ss,\n ca_conc=ca_conc,\n save_state=True)\n\n # Create current amplitude set\n steps = np.linspace(\n max(voltage_low, self.channel_record.Stability_Range_Low),\n min(voltage_high, self.channel_record.Stability_Range_High),\n num=step_count) \\\n .tolist()\n\n # Run each vclamp as a separate simulation, resuming from desired steady state\n for step_v in steps:\n voltages = [step_v if v == 'LOWHIGH' else v for v in voltages_stim]\n\n # Ensure voltages are within stability range\n voltages = self.stable_voltages(voltages)\n\n result_stim = self.get_vclamp_response(durations=durations_stim,\n voltages=voltages,\n ca_conc=ca_conc,\n restore_state=True)\n\n result = self.concat_tvig_dicts(result_ss, result_stim)\n\n self.save_tvig_plot(label=protocol, case=str(step_v) + \" mV @ \" + str(ca_conc) + \" mM\",\n tvig_dict=result)\n\n meta_protocol = None if ca_conc is None else \"Ca2+ \" + str(ca_conc) + \" mM\"\n\n self.save_tvig_waveforms(protocol=protocol,\n label=str(step_v) + \" mV\",\n tvig_dict=result,\n meta_protocol=meta_protocol)\n\n def stable_voltages(self, steps):\n min_v = self.channel_record.Stability_Range_Low\n max_v = self.channel_record.Stability_Range_High\n return [max(min(float(v), max_v), min_v) for v in steps]\n\n def concat_tvig_dicts(self, dict1, dict2):\n result = {\n \"t\": dict1['t'] + dict2['t'],\n \"v\": dict1['v'] + dict2['v'],\n \"g\": dict1['g'] + dict2['g'],\n \"i\": dict1['i'] + dict2['i'],\n \"run_time\": dict1['run_time'] + dict2['run_time'],\n \"steps\": dict1['steps'] + dict2['steps'],\n \"cvode_active\": dict1['cvode_active'],\n \"dt_or_atol\": dict1['dt_or_atol'],\n }\n\n return result\n\n def save_tvig_plot(self, label, tvig_dict, case=\"\"):\n plt.clf()\n\n plt.figure(1)\n plt.subplot(311)\n plt.plot(tvig_dict[\"t\"], tvig_dict[\"v\"], label=\"Voltage - \" + label + (\" @ \" + case if case != \"\" else \"\"))\n plt.ylim(-160, 80)\n plt.legend()\n\n plt.subplot(312)\n plt.plot(tvig_dict[\"t\"], tvig_dict[\"g\"], label=\"Conductance - \" + label + (\" @ \" + case if case != \"\" else \"\"))\n plt.legend()\n\n plt.subplot(313)\n plt.plot(tvig_dict[\"t\"], tvig_dict[\"i\"], label=\"Current - \" + label + (\" @ \" + case if case != \"\" else \"\"))\n plt.legend()\n plt.savefig(label + (\"(\" + case + \")\" if case != \"\" else \"\") + \".png\")\n\n def save_tvig_waveforms(self, protocol, label, tvig_dict, meta_protocol):\n self.server.connect()\n\n with self.server.db.atomic() as transaction:\n self.create_or_update_waveform(protocol, label, meta_protocol, tvig_dict[\"t\"], \"Voltage\", tvig_dict[\"v\"],\n \"mV\",\n tvig_dict[\"run_time\"], None, tvig_dict[\"dt_or_atol\"],\n tvig_dict[\"cvode_active\"], tvig_dict[\"steps\"])\n self.create_or_update_waveform(protocol, label, meta_protocol, tvig_dict[\"t\"], \"Conductance\",\n tvig_dict[\"g\"], \"pS\",\n tvig_dict[\"run_time\"], None, tvig_dict[\"dt_or_atol\"],\n tvig_dict[\"cvode_active\"], tvig_dict[\"steps\"])\n self.create_or_update_waveform(protocol, label, meta_protocol, tvig_dict[\"t\"], \"Current\", tvig_dict[\"i\"],\n \"pA\",\n tvig_dict[\"run_time\"], None, tvig_dict[\"dt_or_atol\"],\n tvig_dict[\"cvode_active\"], tvig_dict[\"steps\"])\n\n def get_vclamp_response(self,\n durations,\n voltages,\n ca_conc,\n restore_state=False, save_state=False):\n\n def vclamp_protocol(time_flag):\n self.time_flag = time_flag\n\n # Run channels using fixed step\n print('Running channel protocol using FIXED DT:' + str(self.config.dt) + ' and voltages: ' + str(voltages))\n self.config.cvode_active = 0\n\n # Use dt from the record or default if not specified\n if self.channel_record.Time_Step is not None:\n self.config.dt = self.channel_record.Time_Step\n\n h = self.build_model()\n\n\n if ca_conc is not None:\n self.soma.cai = float(ca_conc)\n\n with RunTimer() as timer:\n if restore_state:\n self.restore_state() # False to avoid the 1-step workaround\n\n # HACK/workaround for 0-currents/conductances on restore\n h.cvode_active(1)\n h.cvode_active(0)\n # End HACK\n\n else:\n h.stdinit()\n\n for s in range(len(voltages)):\n self.vc.dur1 = 1e9\n self.vc.amp1 = float(voltages[s])\n\n self.runFor(durations[s])\n\n if save_state:\n self.save_state()\n\n # Filter out transient spikelets seen when using CVODE\n from scipy.ndimage.filters import median_filter\n\n result = {\n \"t\": self.t_collector.get_values_list(),\n \"v\": self.v_collector.get_values_list(),\n \"g\": median_filter(self.g_collector.get_values_np(), 3).tolist(),\n \"i\": median_filter(self.i_collector.get_values_np(), 3).tolist(),\n \"run_time\": timer.get_run_time(),\n \"steps\": int(self.tvec.size()),\n \"cvode_active\": int(self.config.cvode_active),\n \"dt_or_atol\": self.config.abs_tolerance if self.config.cvode_active else self.config.dt\n }\n\n return result\n\n runner = NeuronRunner(vclamp_protocol)\n runner.DONTKILL = True\n result = runner.run()\n return result\n\n def save_stability_range(self):\n print(\"Getting stability range...\")\n\n self.channel_record.Stability_Range_Low, self.channel_record.Stability_Range_High = self.get_stability_range()\n\n assert self.channel_record.Stability_Range_Low < self.channel_record.Stability_Range_High\n\n self.channel_record.save()\n\n def get_stability_range(self, testLow=-150, testHigh=70):\n\n print(\"Searching for UPPER boundary...\")\n current_range, found_once = self.find_border(\n rest_v=self.rest_v,\n lower_level=self.rest_v,\n upper_level=testHigh,\n delay=500,\n stim_duration=3,\n run_for_after_delay=10,\n test_condition=lambda t, v: False,\n on_unstable=lambda: True,\n max_iterations=7,\n fig_file=\"stabilityHigh.png\",\n skip_current_delay=False\n )\n\n high_edge = min(current_range)\n\n print(\"Searching for LOWER boundary...\")\n current_range, found_once = self.find_border(\n rest_v=self.rest_v,\n lower_level=testLow,\n upper_level=self.rest_v,\n delay=500,\n stim_duration=3,\n run_for_after_delay=10,\n test_condition=lambda t, v: True,\n on_unstable=lambda: False,\n max_iterations=7,\n fig_file=\"stabilityLow.png\",\n skip_current_delay=True\n )\n\n low_edge = max(current_range)\n\n return low_edge, high_edge\n\n def find_border(self, rest_v, lower_level, upper_level,\n delay, stim_duration,\n run_for_after_delay, test_condition, max_iterations, fig_file,\n skip_current_delay=False, on_unstable=None, test_early=False):\n\n state_file = 'border_state.bin'\n\n if not skip_current_delay:\n def reach_resting_state(time_flag):\n self.time_flag = time_flag\n self.build_model()\n\n print(\"Simulating till current onset...\")\n self.sim_init()\n self.set_voltages(voltages=(rest_v, 0, 0), durations=(delay, 0, 0))\n self.runFor(delay)\n self.save_state(state_file=state_file)\n print(\"Resting state reached. State saved.\")\n\n runner = NeuronRunner(reach_resting_state)\n result = runner.run()\n\n iterate = True\n iteration = 0\n found_once = False\n\n upperLevel_start = upper_level\n lowerLevel_start = lower_level\n\n while iterate:\n if iteration == 0:\n stim = upper_level\n elif iteration == 1:\n stim = lower_level\n else:\n stim = (lower_level + upper_level) / 2.0\n\n def simulate_iteration(time_flag):\n self.time_flag = time_flag\n h = self.build_model()\n self.set_voltages(voltages=(0, stim, 0), durations=(delay, stim_duration, 0))\n\n self.restore_state(state_file=state_file)\n print(\"Trying \" + str(stim) + \" ...\")\n\n if not test_early:\n t, v = self.runFor(run_for_after_delay)\n found = test_condition(t, v)\n else:\n t, v = self.runFor(run_for_after_delay, test_condition)\n found = test_condition(t, v)\n\n plt.plot(t, v, label=str(round(stim, 4)) + \", Found: \" + str(found))\n plt.legend(loc='upper left')\n plt.savefig(str(iteration) + \" \" + fig_file)\n\n print(\"FOUND\" if found else \"NOT FOUND\")\n\n return found\n\n runner = NeuronRunner(simulate_iteration)\n\n try:\n found = runner.run()\n\n except NumericalInstabilityException:\n if on_unstable is not None:\n found = on_unstable()\n else:\n raise\n\n if found:\n upper_level = stim\n found_once = True\n else:\n lower_level = stim\n\n iteration = iteration + 1\n\n if iteration >= max_iterations or lower_level == upperLevel_start or upper_level == lowerLevel_start:\n iterate = False\n\n stim_range = (lower_level, upper_level)\n\n return stim_range, found_once\n\n def set_voltages(self, voltages, durations=(0, 0, 0)):\n self.vc.dur1, self.vc.dur2, self.vc.dur3 = durations\n self.vc.amp1, self.vc.amp2, self.vc.amp3 = voltages\n\n def sim_init(self):\n from neuron import h\n h.stdinit()\n h.tstop = 1000\n self.set_voltages((0, 0, 0),\n (0, 0, 0))\n\n def load_model(self):\n # Load cell hoc and get soma\n os.chdir(self.temp_model_path)\n print(\n \"Loading NEURON... If this step 'freezes', ensure there are no hung NEURON processes with 'pkill -9 nrn*'\")\n from neuron import h, gui\n print(\"DONE\")\n\n h.celsius = self.config.default_temperature\n\n # Create a test cell with the channel\n self.mod_name = self.get_mod_name()\n\n # Passive channels use a different naming scheme\n if self.is_passive:\n self.ion = \"_\" + self.mod_name\n\n self.soma = h.Section()\n self.soma.L = 10\n self.soma.diam = 10\n self.soma.cm = 1000.0 / pi\n\n self.soma.insert(self.mod_name)\n\n # Set max conductance\n setattr(self.soma, \"gmax_\" + self.mod_name, 1.0)\n\n # Set reversal pot\n if hasattr(self.soma, \"e\" + self.ion):\n setattr(self.soma, \"e\" + self.ion, self.erev)\n\n elif hasattr(self.soma, \"e\" + self.ion + \"2\"):\n setattr(self.soma, \"e\" + self.ion + \"2\", self.erev)\n\n else:\n setattr(self.soma, \"e_\" + self.mod_name, self.erev)\n\n return h\n\n def build_model(self, restore_tolerances=True):\n print(\"Loading channel: \" + self.temp_model_path)\n h = self.load_model()\n\n # set up stim\n print('Setting up vi clamps...')\n self.vc = h.SEClamp(self.soma(0.5))\n self.vc.rs = 1e-6\n self.vc.amp1 = 0\n self.vc.dur1 = 0\n\n\n # Set up variable collectors\n print('Setting up tvi collectors...')\n self.t_collector = Collector(self.config.collection_period_ms, h._ref_t)\n self.v_collector = Collector(self.config.collection_period_ms, self.soma(0.5)._ref_v)\n self.g_collector = Collector(self.config.collection_period_ms,\n getattr(self.soma(0.5), \"_ref_gion_\" + self.mod_name))\n\n if hasattr(self.soma(0.5), \"i\" + self.ion):\n self.i_collector = Collector(self.config.collection_period_ms, getattr(self.soma(0.5), \"_ref_i\" + self.ion))\n\n elif hasattr(self.soma(0.5), \"i\" + self.ion + \"2\"):\n self.i_collector = Collector(self.config.collection_period_ms,\n getattr(self.soma(0.5), \"_ref_i\" + self.ion + \"2\"))\n\n else:\n self.i_collector = Collector(self.config.collection_period_ms,\n getattr(self.soma(0.5), \"_ref_i_\" + self.mod_name))\n\n # Keep track of all time steps taken\n self.tvec = h.Vector()\n self.tvec.record(h._ref_t)\n\n # h.nrncontrolmenu()\n self.nState = h.SaveState()\n self.sim_init()\n self.set_abs_tolerance(self.config.abs_tolerance)\n\n if restore_tolerances:\n self.restore_tolerances()\n\n return h\n\n def get_id_from_nml_file(self, nml):\n return re.search('<.*ionChannel.*?id.*?=.*?\"(.*?)\"', nml, re.IGNORECASE).groups(1)[0]\n\n def get_tv(self):\n from neuron import h\n v_np = self.v_collector.get_values_np()\n t_np = self.t_collector.get_values_np()\n\n if np.isnan(v_np).any():\n raise NumericalInstabilityException(\n \"Simulation is numericaly unstable with dt of \" + str(h.dt) + \" ms\")\n\n return (t_np, v_np)\n\n def get_mod_name(self):\n mod_files = self.get_mod_files()\n\n if len(mod_files) != 1:\n raise Exception(\"There should be exactly one .mod file in: \" + self.temp_model_path)\n\n mod_file = mod_files[0]\n\n return mod_file.replace(\".mod\", \"\")\n\n def on_before_mod_compile(self):\n if self.channel_record.Type.ID == 'KCa':\n mod_name = self.get_mod_name()\n\n with open(mod_name + \".mod\", \"r\") as f:\n mod_file = f.read()\n\n mod_file = mod_file.replace(\" SUFFIX \" + mod_name,\n \" SUFFIX \" + mod_name + \"\\n\" +\n \" USEION ca READ cai\")\n\n with open(mod_name + \".mod\", \"w\") as f:\n f.write(mod_file)\n\n","repo_name":"scrook/neuroml-db","sub_path":"Import Scripts/model-importer/channelmodel.py","file_name":"channelmodel.py","file_ext":"py","file_size_in_byte":19849,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"25663311691","text":"import numpy as np\nimport pprint\n\ndef modified_cholesky(A):\n L = np.zeros_like(A)\n D = np.zeros_like(A)\n\n D[0,0] = A[0,0]\n for i in range(len(A)):\n L[i,i] = 1\n for j in range(i):\n tmp = 0\n for k in range(j):\n tmp += L[i,k]*D[k,k]*L[j,k]\n L[i,j] = (A[i,j] - tmp)/D[j,j]\n\n tmp = 0\n for k in range(i):\n tmp += L[i,k]**2*D[k,k]\n D[i,i] = A[i, i] - tmp\n\n return L, D\n\nif __name__ == '__main__':\n\n A = np.array([[4, 12, -16],\n [12, 37, -43],\n [-16, -43, 98]], dtype=np.float32)\n\n L, D = modified_cholesky(A)\n pprint.pprint(L)\n pprint.pprint(D)\n pprint.pprint(np.dot(L, np.dot(D, L.T)))\n","repo_name":"djinn-pfa3736/linear_algebra_algorithms","sub_path":"linear_system_solving/modified_cholesky.py","file_name":"modified_cholesky.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11098979255","text":"# 6603 로또\n\n## itertools\nfrom itertools import combinations as cb\nwhile True:\n tmp_arr = list(map(int, input().split()))\n if tmp_arr == [0]:\n break\n nums = tmp_arr[1:]\n ans_list = list(cb(nums, 6))\n for ans in ans_list:\n print(\" \".join(map(str, ans)))\n print()\n\n\n##backtracking\ndef sol(cnt):\n if len(s) == 6:\n print(' '.join(map(str, s)))\n for i in range(cnt, n+1):\n if nums[i] not in s:\n s.append(nums[i])\n sol(i+1)\n s.pop()\n \nwhile True:\n nums = list(map(int, input().split()))\n n = nums[0]\n if n == 0:\n break\n s = []\n sol(1)\n print()","repo_name":"InKyuHwang001/Algorithm","sub_path":"백준/백트래킹/Python/실버/6603.py","file_name":"6603.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4327248506","text":"import asyncio\nfrom datetime import datetime, timedelta\nfrom traveltimepy import Coordinates, TravelTimeSdk, PublicTransport, FullRange, \\\n Location, Property\nfrom traveltimepy.errors import ApiError\nimport pandas as pd\n\n# Paths to CSV files\nMTC = \"combined_data_MP_NE_MTC_Coord_5.csv\"\nUCO = \"combined_data_MP_NE_Coord_uniqueClusterOutside.csv\"\n# Initialize counter to store the number of requests made\nCOUNTER = 0\n\n\n# Read CSV files and prepare data for API requests\ndef read_csv(csv):\n df = pd.read_csv(csv)\n result = []\n result_names = []\n\n # Loop through rows in the DataFrame\n for index, row in df.iterrows():\n coords = Coordinates(lat=row[\"lat\"], lng=row[\"lon\"])\n location = Location(id=row[\"name\"], coords=coords)\n result.append(location)\n result_names.append(row[\"name\"])\n print(len(result))\n return result, result_names\n\n\n# Fetch routes asynchronously\nasync def fetch_routes(loc_start, all_loc_end, sdk):\n try:\n all_results = []\n\n # Iterate in steps of 2 to create pairs of end locations\n for i in range(0, 6, 2):\n if i == 4:\n loc_end = [all_loc_end[i].id]\n else:\n loc_end = [all_loc_end[i].id, all_loc_end[i + 1].id]\n\n # Perform asynchronous API requests\n results = await sdk.routes_async(\n locations=[loc_start] + all_loc_end,\n search_ids={\n loc_start.id: loc_end\n },\n properties=[Property(\"travel_time\"), Property(\"distance\")],\n transportation=PublicTransport(walking_time=28800, pt_change_delay=0),\n departure_time=datetime(year=2023, month=8, day=9, hour=7, minute=0),\n range=FullRange(enabled=True, max_results=1, width=3600)\n )\n all_results.append(results)\n\n global COUNTER\n COUNTER += 1\n print(COUNTER)\n\n return all_results\n\n except ApiError as e:\n # Handle API error by waiting for the next minute\n print(\"Too many requests, waiting for the next minute...\")\n current_time = datetime.now()\n next_minute = current_time.replace(second=0, microsecond=0) + timedelta(minutes=1)\n await asyncio.sleep((next_minute - current_time).total_seconds())\n\n\n# Fetch routes using semaphore to limit concurrent API requests\nasync def fetch_routes_with_semaphore(semaphore, loc_start, all_loc_end, sdk):\n async with semaphore:\n return await fetch_routes(loc_start, all_loc_end, sdk)\n\n\n# Main asynchronous function\nasync def main():\n all_results = []\n top1_results = []\n\n # Initialize TravelTime SDK\n sdk = TravelTimeSdk(APP_ID, APP_KEY)\n\n # Read location data from CSV files\n all_loc_start, _ = read_csv(UCO)\n all_loc_end, loc_end_ids = read_csv(MTC)\n\n # Sort the starting locations\n all_loc_start = sorted(all_loc_start, key=lambda location: location.id, reverse=True)\n\n # Semaphore to limit concurrent API requests\n semaphore = asyncio.Semaphore(5) # Limit to 5 simultaneous API requests\n\n tasks = []\n for loc_start in all_loc_start:\n tasks.append(fetch_routes_with_semaphore(semaphore, loc_start, all_loc_end, sdk))\n\n # Gather results from asynchronous tasks\n results = await asyncio.gather(*tasks)\n\n for loc_start, results_list in zip(all_loc_start, results):\n if results_list is not None:\n top1_tt = 43200\n top1 = {}\n for result in results_list:\n result = result[0]\n for location in result.locations:\n if location.properties is not None:\n for prop in location.properties:\n all_results.append(\n {'search_id': result.search_id,\n 'id': location.id, 'travel_time': prop.travel_time,\n 'distance': prop.distance})\n if prop.travel_time < top1_tt:\n top1_tt = prop.travel_time\n top1 = {'search_id': result.search_id, 'id': location.id,\n 'travel_time': prop.travel_time,\n 'distance': prop.distance}\n\n top1_results.append(top1)\n\n # Store results in DataFrames and export to CSV files\n df = pd.DataFrame(all_results)\n df.to_csv(\"traveltime_results_all.csv\", index=False)\n\n df_top1 = pd.DataFrame(top1_results)\n df_top1.to_csv(\"traveltime_results_top1.csv\", index=False)\n\n\n# Execute the main function using asyncio\ndef run_main():\n asyncio.run(main())\n\n\nif __name__ == \"__main__\":\n run_main()\n","repo_name":"tmnstllr/kombinom2_ml","sub_path":"2_feature_engineering/request_traveltime.py","file_name":"request_traveltime.py","file_ext":"py","file_size_in_byte":4757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28650839134","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\nfrom typing import Tuple, Sequence, Dict, List, Any\n\nfrom PyQt5 import QtCore\n\nfrom viur_admin.network import NetworkService, RequestGroup, RequestWrapper\nfrom viur_admin.priorityqueue import protocolWrapperClassSelector, protocolWrapperInstanceSelector\n\n\nclass SingletonWrapper(QtCore.QObject):\n\tmaxCacheTime = 60 # Cache results for max. 60 Seconds\n\tupdateDelay = 1500 # 1,5 Seconds gracetime before reloading\n\n\tupdatingSucceeded = QtCore.pyqtSignal((str,)) # Adding/Editing an entry succeeded\n\tupdatingFailedError = QtCore.pyqtSignal((str,)) # Adding/Editing an entry failed due to network/server error\n\tupdatingDataAvailable = QtCore.pyqtSignal((str, dict, bool)) # Adding/Editing an entry failed due to missing fields\n\tmodulStructureAvailable = QtCore.pyqtSignal() # We fetched the structure for this module and that data is now\n\t# available\n\tbusyStateChanged = QtCore.pyqtSignal((bool,)) # If true, im busy right now\n\n\tdef __init__(self, module: str, *args: Any, **kwargs: Any):\n\t\tsuper(SingletonWrapper, self).__init__()\n\t\tself.module = module\n\t\tself.busy = True\n\t\tself.editStructure: Dict[str, Any] = None\n\t\tself.viewStructure: Dict[str, Any] = None\n\t\tprotocolWrapperInstanceSelector.insert(1, self.checkForOurModul, self)\n\t\tself.deferredTaskQueue: Sequence[Tuple[str, str]] = list()\n\t\treq = NetworkService.request(\n\t\t\t\"/getStructure/%s\" % self.module,\n\t\t\tsuccessHandler=self.onStructureAvailable)\n\n\tdef checkForOurModul(self, moduleName: str) -> bool:\n\t\treturn self.module == moduleName\n\n\tdef onStructureAvailable(self, req: RequestWrapper) -> None:\n\t\ttmp = NetworkService.decode(req)\n\t\tif tmp is None:\n\t\t\tself.checkBusyStatus()\n\t\t\treturn\n\t\tfor stype, structlist in tmp.items():\n\t\t\tstructure: OrderedDict = OrderedDict()\n\t\t\tfor k, v in structlist:\n\t\t\t\tstructure[k] = v\n\t\t\tif stype == \"viewSkel\":\n\t\t\t\tself.viewStructure = structure\n\t\t\telif stype == \"editSkel\":\n\t\t\t\tself.editStructure = structure\n\t\tself.modulStructureAvailable.emit()\n\t\tself.checkBusyStatus()\n\n\tdef edit(self, **kwargs: Any) -> str:\n\t\treq = NetworkService.request(\n\t\t\t\"/%s/edit\" % self.module,\n\t\t\tkwargs, secure=(len(kwargs) > 0),\n\t\t\tfinishedHandler=self.onSaveResult)\n\t\tif not kwargs:\n\t\t\t# This is our first request to fetch the data, dont show a missing hint\n\t\t\treq.wasInitial = True\n\t\telse:\n\t\t\treq.wasInitial = False\n\t\tself.checkBusyStatus()\n\t\treturn str(id(req))\n\n\tdef onSaveResult(self, req: RequestWrapper) -> None:\n\t\ttry:\n\t\t\tdata = NetworkService.decode(req)\n\t\texcept: # Something went wrong, call ErrorHandler\n\t\t\tself.updatingFailedError.emit(str(id(req)))\n\t\t\treturn\n\t\tif data[\"action\"] in [\"editSuccess\", \"deleteSuccess\"]: # Saving succeeded\n\t\t\tself.updatingSucceeded.emit(str(id(req)))\n\t\t\tself.checkBusyStatus()\n\t\telse: # There were missing fields\n\t\t\tself.updatingDataAvailable.emit(str(id(req)), data, req.wasInitial)\n\t\tself.checkBusyStatus()\n\n\tdef checkBusyStatus(self) -> None:\n\t\tbusy = False\n\t\tfor child in self.children():\n\t\t\tif isinstance(child, RequestWrapper) or isinstance(child, RequestGroup):\n\t\t\t\tif not child.hasFinished:\n\t\t\t\t\tbusy = True\n\t\t\t\t\tbreak\n\t\tif busy != self.busy:\n\t\t\tself.busy = busy\n\t\t\tself.busyStateChanged.emit(busy)\n\n\ndef CheckForSingletonModul(moduleName: str, moduleList: dict) -> bool:\n\tmodulData = moduleList[moduleName]\n\tif \"handler\" in modulData and (\n\t\t\tmodulData[\"handler\"] == \"singleton\" or modulData[\"handler\"].startswith(\"singleton.\")):\n\t\treturn True\n\treturn False\n\n\nprotocolWrapperClassSelector.insert(0, CheckForSingletonModul, SingletonWrapper)\n","repo_name":"viur-framework/viur-admin","sub_path":"viur_admin/protocolwrapper/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"43080585560","text":"import numpy as np\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom made import MADE\n\n\nclass Encoder(nn.Module):\n def __init__(self, input_size, hidden_size, n_layers=1, drop_ratio=0.5):\n super(Encoder, self).__init__()\n\n self.rnn = nn.LSTM(input_size, hidden_size, n_layers, bidirectional=True, batch_first=True,\n dropout=(0 if n_layers == 1 else drop_ratio))\n self.dropout_layer = nn.Dropout(drop_ratio)\n\n\n def forward(self, ori_embed_seq, input_lens=None):\n # ori_embed_seq: (B, L, emb_dim)\n # input_lens: (B)\n embed_seq = self.dropout_layer(ori_embed_seq)\n\n if input_lens is None:\n outputs, (state_h, state_c) = self.rnn(embed_seq, None)\n else:\n # Dynamic RNN\n packed = torch.nn.utils.rnn.pack_padded_sequence(embed_seq,\n input_lens, batch_first=True, enforce_sorted=False)\n outputs, (state_h, state_c) = self.rnn(packed, None)\n # outputs: (B, L, 2*H)\n # state: (num_layers*num_directions, B, H)\n outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs,\n batch_first=True)\n\n return outputs, (state_h, state_c)\n\n\nclass Attention(nn.Module):\n def __init__(self, d_q, d_v, drop_ratio=0.0):\n super(Attention, self).__init__()\n self.attn = nn.Linear(d_q+d_v, d_v)\n self.v = nn.Parameter(torch.rand(d_v))\n stdv = 1. / np.sqrt(self.v.size(0))\n self.v.data.normal_(mean=0, std=stdv)\n self.dropout = nn.Dropout(drop_ratio)\n\n def forward(self, Q, K, V, attn_mask):\n # Q: (B, 1, H)\n # V: (B, L, num_directions * H)\n # attn_mask: (B, L), True means mask\n k_len = K.size(1)\n q_state = Q.repeat(1, k_len, 1) # (B, L, d_q)\n\n attn_energies = self.score(q_state, K) # (B, L)\n\n attn_energies.masked_fill_(attn_mask, -1e12)\n\n attn_weights = F.softmax(attn_energies, dim=1).unsqueeze(1)\n attn_weights = self.dropout(attn_weights)\n\n # (B, 1, L) * (B, L, d_v) -> (B, 1, d_v)\n context = attn_weights.bmm(V)\n\n return context, attn_weights\n\n\n def score(self, query, memory):\n # query (B, L, d_q)\n # memory (B, L, d_k)\n\n # (B, L, d_q+d_v) -> (B, L, d_v)\n energy = torch.tanh(self.attn(torch.cat([query, memory], 2)))\n energy = energy.transpose(1, 2) # (B, d_v, L)\n\n v = self.v.repeat(memory.size(0), 1).unsqueeze(1) # (B, 1, d_v)\n energy = torch.bmm(v, energy) # (B, 1, d_v) * (B, d_v, L) -> (B, 1, L)\n return energy.squeeze(1) # (B, L)\n\n\nclass Decoder(nn.Module):\n def __init__(self, input_size, hidden_size,\n n_layers=1, drop_ratio=0.2, attn_drop_ratio=0.1):\n super(Decoder, self).__init__()\n\n # for bidir encoder\n self.dropout_layer = nn.Dropout(drop_ratio)\n self.attention = Attention(d_q=hidden_size, d_v=hidden_size*2,\n drop_ratio=attn_drop_ratio)\n\n # hidden_size for attention output, input_size for emb\n self.rnn = nn.LSTM(256, hidden_size, n_layers,\n dropout=(0 if n_layers == 1 else drop_ratio), batch_first=True)\n\n self.dec_merge = nn.Linear(input_size, 256)\n\n def forward(self, emb_inp, last_state, enc_outs, attn_mask, feature):\n # emb_inp: (B, 1, emb_size)\n # enc_outs: (B, L, H*2)\n # feature: (B, feature_size)\n embedded = self.dropout_layer(emb_inp)\n\n # use h_t as query\n # last_state[0]: (1, B, H)\n query = last_state[0].transpose(0,1) # (B, 1, H)\n\n # context: (B, 1, H*2)\n context, attn_weights = self.attention(query, enc_outs, enc_outs, attn_mask)\n\n rnn_input = torch.cat([embedded, context, feature.unsqueeze(1)], 2)\n\n x = self.dec_merge(rnn_input)\n output, state = self.rnn(x, last_state)\n\n output = output.squeeze(1) # (B, 1, N) -> (B, N)\n return output, state, attn_weights\n\n\n\nclass InverseAutoregressiveBlock(nn.Module):\n \"\"\"The Inverse Autoregressive Flow block,\n https://arxiv.org/abs/1606.04934\"\"\"\n def __init__(self, n_z, n_h, n_made):\n super(InverseAutoregressiveBlock, self).__init__()\n\n # made: take as inputs: z_{t-1}, h; output: m_t, s_t\n self.made = MADE(num_input=n_z, num_output=n_z * 2,\n num_hidden=n_made, num_context=n_h)\n self.sigmoid_arg_bias = nn.Parameter(torch.ones(n_z) * 2)\n\n\n def forward(self, prev_z, h):\n '''\n prev_z: z_{t-1}\n h: the context\n '''\n m, s = torch.chunk(self.made(prev_z, h), chunks=2, dim=-1)\n # the bias is used to make s sufficiently positive\n # see Sec. 4 in (Kingma et al., 2016) for more details\n s = s + self.sigmoid_arg_bias\n sigma = torch.sigmoid(s)\n z = sigma * prev_z + (1 - sigma) * m\n\n log_det = -F.logsigmoid(s)\n\n return z, log_det\n\n\n\nclass IAF(nn.Module):\n \"\"\"docstring for IAF\"\"\"\n def __init__(self, n_z, n_h, n_made, flow_depth):\n super(IAF, self).__init__()\n self._flow_depth = flow_depth\n self._flows = nn.ModuleList(\n [InverseAutoregressiveBlock(n_z, n_h, n_made)\n for _ in range(0, flow_depth)])\n\n self._reverse_idxes = np.array(np.arange(0, n_z)[::-1])\n\n def _do_reverse(self, v):\n return v[:, self._reverse_idxes]\n\n def forward(self, z, h):\n total_log_det = torch.zeros_like(z, device=z.device)\n for i, flow in enumerate(self._flows):\n z, log_det = flow(z, h)\n z = self._do_reverse(z)\n total_log_det += log_det\n return z, total_log_det\n\n#---------------------------------------\n\nclass Criterion(nn.Module):\n def __init__(self, pad_idx):\n super().__init__()\n self._criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=pad_idx)\n self._pad_idx = pad_idx\n\n\n def forward(self, outputs, targets, truncate=False):\n # outputs: (B, L, V)\n # targets: (B, L)\n # truncate: sometimes outputs may be longer than targets,\n # we truncate outputs to the length of targets\n vocab_size = outputs.size(-1)\n tgts = targets.contiguous().view(-1) # tgts: (N)\n\n if truncate:\n tgt_len = targets.size(1)\n outs = outputs[:, 0:tgt_len, :].contiguous().view(-1, vocab_size) # outs: (N, V)\n else:\n outs = outputs.contiguous().view(-1, vocab_size) # outs: (N, V)\n\n non_pad_mask = tgts.ne(self._pad_idx)\n\n loss = self._criterion(outs, tgts) # [N]\n loss = loss.masked_select(non_pad_mask)\n\n #loss = torch.where(torch.isnan(loss), torch.full_like(loss, 0.0), loss)\n #loss = torch.where(torch.isinf(loss), torch.full_like(loss, 1.0), loss)\n\n return loss.mean()\n","repo_name":"XiaoyuanYi/StyIns","sub_path":"sources/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"61"} +{"seq_id":"14226916693","text":"#here from the previous code we use socket to reterive data from internet\n#now we are going to use urllib to make code more easy also \n\n#we use library urllib\n\n#import modulename .library\nimport urllib.request,urllib.parse,urllib.error\n\n# modulelibrary.function()\n#urllib.urlopen(\"url\") this return a file handler \nfilehandle=urllib.request.urlopen(\"http://data.pr4e.org/romeo.txt\")\n\n \n\n\n#this is now like treating a file not a web page which is very nice and easy\n#to do\n\n#empty list\nle=list()\n#empty dictionary\nd=dict()\n\n#iterate from filehandle using loop\nfor i in filehandle:\n #putting data into list le and also removing the spliting them when space come\n le=i.decode().split()\n\n #now iterarte from list\n for j in le:\n #putting and counting data into dictinory \n d[j]=d.get(j,0) +1\n#now printing dictionary\nprint(d)\n \n \n","repo_name":"harshittaneja090/mywork.github.io","sub_path":"python/networking all/socket networking module/urllib/code 2.py","file_name":"code 2.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9747413028","text":"import xcffib\nimport struct\nimport io\nMAJOR_VERSION = 1\nMINOR_VERSION = 6\nkey = xcffib.ExtensionKey(\"RANDR\")\n_events = {}\n_errors = {}\nclass TRANSFORM(xcffib.Struct):\n xge = False\n def __init__(self, unpacker):\n if isinstance(unpacker, xcffib.Protobj):\n unpacker = xcffib.MemoryUnpacker(unpacker.pack())\n xcffib.Struct.__init__(self, unpacker)\n base = unpacker.offset\n self.matrix11, self.matrix12, self.matrix13, self.matrix21, self.matrix22, self.matrix23, self.matrix31, self.matrix32, self.matrix33 = unpacker.unpack(\"iiiiiiiii\")\n self.bufsize = unpacker.offset - base\n def pack(self):\n buf = io.BytesIO()\n buf.write(struct.pack(\"=iiiiiiiii\", self.matrix11, self.matrix12, self.matrix13, self.matrix21, self.matrix22, self.matrix23, self.matrix31, self.matrix32, self.matrix33))\n return buf.getvalue()\n fixed_size = 36\n @classmethod\n def synthetic(cls, matrix11, matrix12, matrix13, matrix21, matrix22, matrix23, matrix31, matrix32, matrix33):\n self = cls.__new__(cls)\n self.matrix11 = matrix11\n self.matrix12 = matrix12\n self.matrix13 = matrix13\n self.matrix21 = matrix21\n self.matrix22 = matrix22\n self.matrix23 = matrix23\n self.matrix31 = matrix31\n self.matrix32 = matrix32\n self.matrix33 = matrix33\n return self\nclass GetCrtcTransformReply(xcffib.Reply):\n xge = False\n def __init__(self, unpacker):\n if isinstance(unpacker, xcffib.Protobj):\n unpacker = xcffib.MemoryUnpacker(unpacker.pack())\n xcffib.Reply.__init__(self, unpacker)\n base = unpacker.offset\n unpacker.unpack(\"xx2x4x\")\n self.pending_transform = TRANSFORM(unpacker)\n self.has_transforms, = unpacker.unpack(\"B3x\")\n unpacker.pad(TRANSFORM)\n self.current_transform = TRANSFORM(unpacker)\n self.pending_len, self.pending_nparams, self.current_len, self.current_nparams = unpacker.unpack(\"4xHHHH\")\n unpacker.pad(\"c\")\n self.pending_filter_name = xcffib.List(unpacker, \"c\", self.pending_len)\n unpacker.pad(\"i\")\n self.pending_params = xcffib.List(unpacker, \"i\", self.pending_nparams)\n unpacker.pad(\"c\")\n self.current_filter_name = xcffib.List(unpacker, \"c\", self.current_len)\n unpacker.pad(\"i\")\n self.current_params = xcffib.List(unpacker, \"i\", self.current_nparams)\n self.bufsize = unpacker.offset - base\nclass GetCrtcTransformCookie(xcffib.Cookie):\n reply_type = GetCrtcTransformReply\nclass randrExtension(xcffib.Extension):\n def GetCrtcTransform(self, crtc, is_checked=True):\n buf = io.BytesIO()\n buf.write(struct.pack(\"=xx2xI\", crtc))\n return self.send_request(27, buf, GetCrtcTransformCookie, is_checked=is_checked)\nxcffib._add_ext(key, randrExtension, _events, _errors)\n","repo_name":"tych0/xcffib","sub_path":"test/generator/randr.py","file_name":"randr.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"61"} +{"seq_id":"14103300366","text":"from typing import Optional\n\nfrom rest_framework import status, serializers\nfrom rest_framework.response import Response\n\n\ndef response(data: dict,\n message: Optional[str] = None,\n extra: Optional[dict] = None,\n status_code: Optional[int] = status.HTTP_200_OK,\n success: Optional[bool] = True):\n response_data = dict(data=data,\n success=success,\n status_code=status_code,\n message=message,\n extra=extra or {})\n return Response(data=response_data, status=status.HTTP_200_OK)\n\n\ndef failed_validation_response(\n serializer: Optional[serializers.Serializer] = None,\n error: Optional[str] = None):\n fail_validation_reason = \"Some of the provided data was incorrect\"\n if not any([serializer, error]):\n raise ValueError(\n \"At least one parameter should be provided: serializer or error\")\n if error:\n fail_validation_reason = error\n elif serializer:\n fail_validation_reason = \" \".join([\n f\"{key}: {[value[:] for value in values][0]}\"\n for key, values in serializer.errors.items()\n ])\n response_data: dict = dict(data=None,\n success=False,\n status_code=status.HTTP_400_BAD_REQUEST,\n message=fail_validation_reason,\n extra={})\n return Response(data=response_data, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"BankingBattle/BB_back","sub_path":"bb_back/bb_back/core/utils/view_utils.py","file_name":"view_utils.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16517454384","text":"import unittest\nfrom .test_response_data import TestResponse\nfrom src.geocode_helper import GeocodeServiceHelper\nfrom src.geocode_proxy import GeocodeProxy\nfrom src.geocode_adapters import GoogleAdapter, HereAdapter\n\n\nclass TestGeocodeProxy (unittest.TestCase):\n\n @staticmethod\n def mock_api(url, name):\n tr = TestResponse()\n\n if name == \"Google Geocode\":\n return tr.google_response\n else:\n return tr.here_response\n\n def setUp(self):\n self.mock_helper = GeocodeServiceHelper()\n\n self.mock_helper.invoke_api = self.mock_api\n\n config = self.mock_helper.get_config(\"config.test.ini\")\n\n self.gcp = GeocodeProxy([GoogleAdapter(config, True, self.mock_helper.invoke_api),\n HereAdapter(config, False, self.mock_helper.invoke_api)])\n\n def test_primary_service_works(self):\n services = self.gcp.services\n self.assertTrue(services[0].is_primary)\n self.assertTrue(not services[1].is_primary)\n\n assert self.gcp.get_coordinates_by_address(\"425 W Randolph Chicago\") == {'lng': -87.6389545, 'lat': 41.8841621}\n\n self.assertTrue(services[0].is_primary)\n self.assertTrue(not services[1].is_primary)\n\n def test_failover_to_secondary_service(self):\n services = self.gcp.services\n self.assertTrue(services[0].is_primary)\n self.assertTrue(not services[1].is_primary)\n\n self.gcp.services[0].api_helper = lambda x, y: \"broken api response\"\n\n res = self.gcp.get_coordinates_by_address(\"425 W Randolph Chicago\")\n self.assertTrue(res == {'lng': -87.6387699, 'lat': 41.88449})\n\n self.assertTrue(not services[0].is_primary)\n self.assertTrue(services[1].is_primary)\n\n\nif __name__ == \"main\":\n unittest.main(verbosity=2)\n\n","repo_name":"mereck/geocode-proxy","sub_path":"tests/test_geocode_proxy.py","file_name":"test_geocode_proxy.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21897430044","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 11 09:42:35 2021\r\n\r\n@author: anowakowska\r\n\"\"\"\r\n\r\n\r\nimport time\r\nfrom urllib.request import urlopen as uReq, Request as Req\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nczas = time.localtime()\r\ndata = str(str(czas[0])+\"_\"+str(czas[1])+\"_\"+str(czas[2]))\r\n\r\n\r\n# tworzenie pliku\r\nfilename = 'sv_bg_{p}.csv'.format(p=data)\r\nf = open(filename, 'w', encoding='utf-8')\r\n\r\n\r\n# naglowki\r\nheaders = \" Nazwa; Marka; Cena; \\n\"\r\nf.write(headers)\r\n\r\n\r\nkategorie = [[0, 1, 2, 3, 4, 5],[\"m\", \"d\", \"s\", \"dz_s\", \"dz_b\", \"dz_u\"],[\"https://www.sportvision.bg/produkti/mens/page-0\", \"https://www.sportvision.bg/produkti/womens/page-0\", \"https://www.sportvision.bg/produkti/page-0\", \"https://www.sportvision.bg/produkti/detsa/page-0\", \"https://www.sportvision.bg/obuvki/detsa/page-0\", \"https://www.sportvision.bg/drehi/detsa/page-0\"]]\r\n\r\nfor line in kategorie[0]:\r\n my_url = kategorie[2][line]\r\n\r\n\r\n # opening website, grabing\r\n uClient = uReq(Req(my_url, headers={'User-Agent': 'Mozilla/5.0'}))\r\n page_html = uClient.read()\r\n uClient.close()\r\n # html parsing\r\n page_soup = soup(page_html, 'html.parser')\r\n \r\n \r\n \r\n # podanie ilosci stron\r\n a = page_soup.find(\"a\", {\"rel\": \"last\"}).text\r\n \r\n \r\n \r\n \r\n for i in range(int(a)):\r\n my_url = my_url\r\n uClient = uReq(Req(my_url, headers={'User-Agent': 'Mozilla/5.0'}))\r\n page_html = uClient.read()\r\n uClient.close()\r\n page_soup = soup(page_html, \"html.parser\")\r\n buty = page_soup.findAll(\"div\", {\"class\": \"wrapper-grid-view item product-item ease col-xs-6 col-sm-4 col-md-3 col-lg-3 grid-view\"})\r\n \r\n \r\n \r\n for but in buty:\r\n nazwa = but[\"data-product-item-id\"] \r\n \r\n marka = but[\"data-productbrand\"]\r\n \r\n cena = but[\"data-productprice\"]\r\n \r\n \r\n \r\n \r\n f.write(str(nazwa) + \";\" + str(marka) + \";\" + str(cena) + \";\" + \"\\n\")\r\n \r\n \r\n \r\n if kategorie[1][line] == \"m\":\r\n my_url = 'https://www.sportvision.bg/produkti/mens/page-{p:s}'.format(p=str(i + 1))\r\n else:\r\n if kategorie[1][line] == \"d\":\r\n my_url = 'https://www.sportvision.bg/produkti/womens/page-{p:s}'.format(p=str(i + 1))\r\n else:\r\n if kategorie[1][line] == \"s\":\r\n my_url = 'https://www.sportvision.bg/produkti/page-{p:s}'.format(p=str(i + 1))\r\n else:\r\n if kategorie[1][line] == \"dz_s\":\r\n my_url = 'https://www.sportvision.bg/produkti/detsa/page-{p:s}'.format(p=str(i + 1))\r\n else:\r\n if kategorie[1][line] == \"dz_b\":\r\n my_url = 'https://www.sportvision.bg/obuvki/detsa/page-{p:s}'.format(p=str(i + 1))\r\n else:\r\n if kategorie[1][line] == \"dz_u\":\r\n my_url = 'https://www.sportvision.bg/drehi/detsa/page-{p:s}'.format(p=str(i + 1))\r\n else:\r\n print(\"Błąd\")\r\n \r\n \r\nf.close()\r\n \r\n\r\n","repo_name":"annaknowakowska95/scrapping","sub_path":"sv.py","file_name":"sv.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21465499499","text":"with open('input.txt') as infile:\n droplets = infile.readlines()\n\n# open_sides: 64\n#droplets = [\n# '2,2,2',\n# '1,2,2',\n# '3,2,2',\n# '2,1,2',\n# '2,3,2',\n# '2,2,1',\n# '2,2,3',\n# '2,2,4',\n# '2,2,6',\n# '1,2,5',\n# '3,2,5',\n# '2,1,5',\n# '2,3,5',\n#]\n\nmax_bounds = (0, 0, 0)\n\nfor droplet in droplets:\n x, y, z = (int(c) + 1 for c in droplet.strip().split(','))\n if x > max_bounds[0] or y > max_bounds[1] or z > max_bounds[2]:\n px, py, pz = max_bounds\n max_bounds = max(x, px), max(y, py), max(pz, z)\n\nprint(f'size: {max_bounds}')\n\nvoxels = [[[0 for _ in range(max_bounds[2])] for _ in range(max_bounds[1])] for _ in range(max_bounds[0])]\n\nfor droplet in droplets:\n x, y, z = (int(c) for c in droplet.strip().split(','))\n voxels[x][y][z] = 1\n\nopen_sides = 0\n# size: (20, 20, 19)\n\nfor x in range(max_bounds[0]):\n for y in range(max_bounds[1]):\n for z in range(max_bounds[2]):\n if voxels[x][y][z] == 0:\n continue\n if x == 0 or voxels[x-1][y][z] == 0:\n open_sides += 1\n if x == max_bounds[0] - 1 or voxels[x+1][y][z] == 0:\n open_sides += 1\n if y == 0 or voxels[x][y-1][z] == 0:\n open_sides += 1\n if y == max_bounds[1] - 1 or voxels[x][y+1][z] == 0:\n open_sides += 1\n if z == 0 or voxels[x][y][z-1] == 0:\n open_sides += 1\n if z == max_bounds[2] - 1 or voxels[x][y][z+1] == 0:\n open_sides += 1\n\nprint(f'open_sides: {open_sides}')\n# open_sides: 3610\n","repo_name":"DragonFighter603/AdventOfCode","sub_path":"AOC-2022/day18/2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"38292856305","text":"strs = [\"flower\",\"flow\",\"flight\"]\nprefix = []\n\ntransform = list(zip(*strs))\n\nfor i in transform:\n if len(set(i)) == 1:\n prefix.append(i[0])\n else:\n break\n\nprefixstr = \"\"\nprint(prefixstr.join(prefix))\n\n\n# Treats a list of string as a 2d matrix example below (truncate all elements to same size as smallest string first)\n# [ [a,a,a], [a,a,b], [a,a,c]\n# Transform the matrix so that rows become columns and columns become rows\n# [a,a,a] [a,a,a]\n# [a,a,b]--->[a,a,a] \n# [a,a,c] [a,b,c]\n# First row is the first letters of each word, second row becomes list of seconds latters of each word\n# Then we can easily see if a letter is has same position in each word\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#","repo_name":"ahhossain/leetcode","sub_path":"longest-prefix.py","file_name":"longest-prefix.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13624762884","text":"'''\nJob Connector Data Science\nPurwadhika Startup & Coding School\nKamims, 24 Oktober 2019\n\nT U G A S\n\n'''\n\n## SOAL ##\n\n# S = {bilangan cacah > 11}\n# A = {x|x<10, bilangan prima}\n# B = {5, 7, 9}\n\nS = []\nA = []\nB = []\n\nfor i in [0, 11, 1]:\n S.append(i)\n\nfor i in [2, 3, 5, 7]:\n A.append(i)\n\nfor i in [5, 7, 9]:\n B.append(i)\n\nS = set(S)\nA = set(A)\nB = set(B)\n\nop_1 = (A & B)\nop_2 = (A | B)\nop_3 = (A & op_2)\nop_4 = (B & op_2)\nop_5 = (op_2 & op_2)\nop_6 = (op_1 & op_2)\n\nprint(f'Irisan dari A dan B adalah {op_1}')\nprint(f'Gabungan dari A dan B adalah {op_2}')\nprint(f'Operasi A ∩ (A ∪ B) adalah {op_3}')\nprint(f'Operasi B ∩ (A ∪ B) adalah {op_4}')\nprint(f'Operasi (A ∪ B) ∩ (A ∪ B) adalah {op_5}')\nprint(f'Operasi (A ∩ B) ∪ (A ∪ B) adalah {op_6}')\n\n\n\n","repo_name":"MuhamadAhsanul/JCDS_-_Modul_1_Fundamental","sub_path":"Hari 4/Tugas4.py","file_name":"Tugas4.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34475079190","text":"\"\"\"\r\nValve API\r\n\r\n@author: Raj Kumar Gupta\r\n\"\"\"\r\n\r\nimport yaml\r\nimport os\r\n\r\nclass Valves:\r\n \"\"\"Class for Sub Vavles\"\"\" \r\n \r\n #class variable\r\n irrigation_status = { 'valve1' : \r\n {'IrrigationOccuring':False,\r\n 'IrrigationDone':False},\r\n 'valve2' : \r\n {'IrrigationOccuring':False,\r\n 'IrrigationDone':False}, \r\n 'valve3' : \r\n {'IrrigationOccuring':False,\r\n 'IrrigationDone':False},\r\n 'valve4' : \r\n {'IrrigationOccuring':False,\r\n 'IrrigationDone':False},\r\n 'valve5' : \r\n {'IrrigationOccuring':False,\r\n 'IrrigationDone':False}, \r\n 'valve6' : \r\n {'IrrigationOccuring':False,\r\n 'IrrigationDone':False}\r\n } \r\n \r\n def __init__(self, arg_pump,arg_num_valves=6):\r\n #Valve list\r\n self.valve_list = [] \r\n #Dictionary to stor valves and water fields\r\n self.water_translation = {}\r\n self.water_size = {}\r\n self.water_transparency = {}\r\n self.valve_pipe_translation = {}\r\n self.valve_pipe_length = {}\r\n #Variable for total no of valves present in farm\r\n self.num_of_valves = arg_num_valves\r\n #pump\r\n self.pump = arg_pump\r\n #valve status\r\n self.valve_closed = {}\r\n #valves distribution in the field\r\n self.valve_distribution ={'TOMATO': ['valve1','valve2','valve3'] ,\r\n 'GROUNDNUT':['valve3','valve5','valve6']}\r\n \r\n \r\n #Running path\r\n self.directory = os.getcwd()\r\n self.splited_directory = self.directory.split(\"\\\\\")\r\n del self.splited_directory[len(self.splited_directory)-1]\r\n self.splited_directory.append('Master_controller')\r\n self.new_path = \"\"\r\n for f in self.splited_directory:\r\n self.new_path = self.new_path + f + '/'\r\n \r\n self.get_valves()\r\n self.get_fields()\r\n self.set_valve()\r\n \r\n \r\n def get_valves(self):\r\n \"\"\"Function to get sub valves\"\"\"\r\n for i in range(1 , self.num_of_valves+1):\r\n self.valve_list.append('valve'+ str(i))\r\n return self.valve_list\r\n \r\n def get_fields(self):\r\n \"\"\"Function to get the water fields\"\"\"\r\n for valve in self.valve_list:\r\n Water = self.pump.getFromDef(valve+'_Water')\r\n water_geometry = self.pump.getFromDef(valve+'_water_geometry')\r\n water_appearance = self.pump.getFromDef(valve+'_water_appearance')\r\n valve_pipe = self.pump.getFromDef(valve+'_pipe')\r\n self.water_translation[valve] = Water.getField('translation')\r\n self.water_size[valve] = water_geometry.getField('size') \r\n self.water_transparency[valve] = water_appearance.getField('transparency')\r\n self.valve_pipe_translation[valve] = valve_pipe.getField('translation')\r\n self.valve_pipe_length[valve] = valve_pipe.getField('height') \r\n self.valve_closed[valve] = False\r\n \r\n def set_valve(self):\r\n \"\"\"Function to set the valves\"\"\"\r\n for valves in self.valve_list:\r\n self.pipe_translate = self.valve_pipe_translation[valves].getSFVec3f()\r\n self.pipe_length = self.valve_pipe_length[valves].getSFFloat() \r\n self.length_water = 0.1\r\n self.water_height = self.pipe_length-1.0\r\n self.water_size[valves].setSFVec2f([self.water_height,self.length_water])\r\n self.water_translation[valves].setSFVec3f([self.pipe_translate[0],self.pipe_translate[1], (self.pipe_translate[2]/abs(self.pipe_translate[2]))*(abs(self.pipe_translate[2]) + 0.5)]) \r\n self.water_transparency[valves].setSFFloat(0)\r\n \r\n def open_valve(self, arg_valve_num):\r\n \"\"\"Function to open the valve and water start flowing\"\"\"\r\n if arg_valve_num in [val_num for val_num in range(1,7)]: \r\n #print(\"inside open valve 1\")\r\n if not Valves.irrigation_status['valve'+str(arg_valve_num)]['IrrigationOccuring']: \r\n Valves.irrigation_status['valve'+str(arg_valve_num)]['IrrigationOccuring'] = True \r\n self.Update_Irrigation_Status(Valves.irrigation_status)\r\n else:\r\n raise Exception(\"No Valve name:\",'valve'+str(arg_valve_num))\r\n \r\n def close_valve(self,arg_valve_num):\r\n \"\"\"Function to close valve\"\"\" \r\n if arg_valve_num in [val_num for val_num in range(1,7)]: \r\n print(\"inside close valve 1\")\r\n if not Valves.irrigation_status['valve'+str(arg_valve_num)]['IrrigationDone'] and Valves.irrigation_status['valve'+str(arg_valve_num)]['IrrigationOccuring']: \r\n print(\"inside close valve 2\")\r\n Valves.irrigation_status['valve'+str(arg_valve_num)]['IrrigationOccuring'] = False \r\n Valves.irrigation_status['valve'+str(arg_valve_num)]['IrrigationDone'] = True \r\n self.Update_Irrigation_Status(Valves.irrigation_status) \r\n print('valve'+str(arg_valve_num),\"is closed\")\r\n else:\r\n raise Exception(\"No Valve name:\",'valve'+str(arg_valve_num))\r\n\r\n def check_irrigation_completed(self , arg_crop_name , arg_stop_len=10):\r\n \"\"\"Function to check whether irrigation of a particular crop is completed or not\"\"\"\r\n self.crop_name = arg_crop_name.upper()\r\n self.valves = self.valve_distribution[self.crop_name] \r\n self.field_section_irrigated = 0\r\n for valve in self.valves:\r\n if self.water_size[valve].getSFVec2f()[1] >= arg_stop_len:\r\n self.field_section_irrigated = self.field_section_irrigated + 1\r\n \r\n if self.field_section_irrigated == 3:\r\n flag = True\r\n else:\r\n flag = False\r\n return flag\r\n\r\n def check_section_irrigation_completed(self , arg_section_num , arg_stop_len=10):\r\n \"\"\"Function to check whether irrigation of a particular section in the field\"\"\"\r\n if arg_section_num in [val_num for val_num in range(1,7)]: \r\n self.valve_name = 'valve'+str(arg_section_num) \r\n if self.water_size[self.valve_name].getSFVec2f()[1] >= arg_stop_len: \r\n flag = True \r\n else: \r\n flag = False \r\n return flag\r\n else:\r\n raise Exception(\"No Valve name:\",'valve'+str(arg_section_num))\r\n \r\n def Open_Irrigation_Status(self):\r\n with open(self.new_path+'IrrigationStatus.yaml') as file:\r\n self.IrrigationStatus = yaml.load(file, Loader=yaml.FullLoader)\r\n \r\n return self.IrrigationStatus\r\n \r\n def Update_Irrigation_Status(self , arg_data): \r\n with open(self.new_path+'IrrigationStatus.yaml', 'w') as file:\r\n dumped = yaml.dump(arg_data,file)\r\n \r\n def Reset_Irrigation_Status(self):\r\n \"\"\"Function to reset the irrigation status\"\"\"\r\n with open(self.new_path+'IrrigationStatus.yaml', 'w') as file:\r\n reset_dumped = yaml.dump(self.irrigation_status,file)\r\n \r\n","repo_name":"ysrastogi/webots_IoT_projects","sub_path":"controlling_water_valve_moisture_sensor/controllers/Master_controller/valve.py","file_name":"valve.py","file_ext":"py","file_size_in_byte":7534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1053628799","text":"# pySCF chkpoint file -> TREXIO hdf5 file\n# author: Kosuke Nakano\n# maintainer: Kosuke Nakano\n# email: \"kousuke_1123@icloud.com\"\n\n# Logger\nfrom logging import getLogger\nlogger = getLogger(\"pyscf-trexio\").getChild(__name__)\n\n\ndef pyscf_to_trexio(\n pyscf_checkfile: str = \"pyscf.chk\",\n trexio_filename: str = \"trexio.hdf5\",\n back_end: str = \"hdf5\"\n):\n \"\"\"PySCF to TREXIO converter.\"\"\"\n\n # load python packages\n import os\n import numpy as np\n # load pyscf packages\n from pyscf import scf\n from pyscf.pbc import scf as pbcscf\n\n # ## pySCF -> TREX-IO\n # - how to install trexio\n # - pip install trexio\n\n # import trexio\n import trexio\n\n logger.info(f\"pyscf_checkfile = {pyscf_checkfile}\")\n logger.info(f\"trexio_filename = {trexio_filename}\")\n logger.info(\"Conversion starts...\")\n\n # pyscf instances\n mol = scf.chkfile.load_mol(pyscf_checkfile)\n mf = scf.chkfile.load(pyscf_checkfile, \"scf\")\n\n # PBC info\n try:\n mol.a\n pbc_flag = True\n except AttributeError:\n pbc_flag = False\n logger.info(f\"PBC flag = {pbc_flag}\")\n\n # twist_average info\n if pbc_flag:\n try:\n k = mf[\"kpt\"]\n twist_average = False\n logger.info(\"Single-k calculation\")\n k_list = [k]\n if all([k_i == 0.0 for k_i in list(k)]):\n logger.info(\"k = gamma point\")\n logger.info(\"The generated WF will be real.\")\n force_wf_complex = False\n else:\n logger.info(\"k = general point\")\n logger.info(\"The generated WF will be complex.\")\n force_wf_complex = True\n except KeyError:\n twist_average = True\n logger.info(\"Twisted-average calculation\")\n logger.info(\"Separate TREXIO files are generated\")\n logger.info(\n \"The correspondence between the index \\\n and k is written in kp_info.dat\"\n )\n logger.info(\"The generated WFs will be complex.\")\n force_wf_complex = True\n with open(\"kp_info.dat\", \"w\") as f:\n f.write(\"# k_index, kx, ky, kz\\n\")\n k_list = mf[\"kpts\"]\n finally:\n mol = pbcscf.chkfile.load_cell(pyscf_checkfile)\n k_list = mol.get_scaled_kpts(k_list)\n logger.info(k_list)\n\n else:\n twist_average = False\n k_list = [[0.0, 0.0, 0.0]]\n force_wf_complex = False\n\n # if pbc_flag == true, check if ecp or pseudo\n if pbc_flag:\n if len(mol._pseudo) > 0:\n logger.error(\n \"TREXIO does not support 'pseudo' format for PBC. Use 'ecp'.\"\n )\n raise NotImplementedError\n\n if twist_average:\n logger.warning(\n f\"WF at each k point is saved in a separate file kXXXX_{trexio_filename}\"\n )\n logger.warning(\"k points information is stored in kp_info.dat file.\")\n\n # each k WF is stored as a separate file!!\n # for an open-boundary calculation, and a single-k one,\n # k_index is a dummy variable\n for k_index, k_vec in enumerate(k_list):\n assert len(k_vec) == 3 # 3d variable\n # set a filename\n if twist_average:\n logger.info(f\"kpt={k_vec}\")\n filename = os.path.join(\n os.path.dirname(trexio_filename),\n f\"k{k_index}_\" + os.path.basename(trexio_filename),\n )\n logger.info(f\"filename={filename}\")\n with open(\"kp_info.dat\", \"a\") as f:\n f.write(f\"{k_index} {k_vec[0]} {k_vec[1]} {k_vec[2]}\\n\")\n else:\n filename = trexio_filename\n\n if os.path.exists(filename):\n logger.warning(f\"TREXIO file {filename} already exists and will be removed before conversion.\")\n if back_end.lower() == \"hdf5\":\n os.remove(filename)\n else:\n raise NotImplementedError(f\"Please remove the {filename} directory manually.\")\n\n # trexio back end handling\n if back_end.lower() == \"hdf5\":\n trexio_back_end = trexio.TREXIO_HDF5\n elif back_end.lower() == \"text\":\n trexio_back_end = trexio.TREXIO_TEXT\n else:\n raise NotImplementedError(f\"{back_end} back-end is not supported.\")\n\n # trexio file\n trexio_file = trexio.File(filename, mode=\"w\", back_end=trexio_back_end)\n\n ##########################################\n # PBC info\n ##########################################\n if pbc_flag:\n Bohr = 0.5291772109\n if isinstance(mol.a, list):\n a = np.array(mol.a[0]) / Bohr # angstrom -> bohr\n b = np.array(mol.a[1]) / Bohr # angstrom -> bohr\n c = np.array(mol.a[2]) / Bohr # angstrom -> bohr\n else:\n hmatrix=np.fromstring(mol.a, dtype=np.float64, sep=' ').reshape((3,3),order='C')\n a=np.array(hmatrix[0,:])/ Bohr # angstrom -> bohr\n b=np.array(hmatrix[1,:])/ Bohr # angstrom -> bohr\n c=np.array(hmatrix[2,:])/ Bohr # angstrom -> bohr\n\n k_point = k_vec\n periodic = True\n else:\n periodic = False\n\n # pbc and cell info\n trexio.write_pbc_periodic(trexio_file, periodic)\n if pbc_flag:\n trexio.write_cell_a(trexio_file, a)\n trexio.write_cell_b(trexio_file, b)\n trexio.write_cell_c(trexio_file, c)\n trexio.write_pbc_k_point(trexio_file, k_point)\n\n # structure info.\n electron_up_num, electron_dn_num = mol.nelec\n nucleus_num = mol.natm\n atom_charges_list = [mol.atom_charge(i) for i in range(mol.natm)]\n \"\"\"\n atom_nelec_core_list = [\n mol.atom_nelec_core(i) for i in range(mol.natm)\n ]\n atomic_number_list = [\n mol.atom_charge(i) + mol.atom_nelec_core(i)\n for i in range(mol.natm)\n ]\n \"\"\"\n chemical_symbol_list = [mol.atom_pure_symbol(i) for i in range(mol.natm)]\n atom_symbol_list = [mol.atom_symbol(i) for i in range(mol.natm)]\n coords_np = mol.atom_coords(unit=\"Bohr\")\n\n ##########################################\n # Structure info\n ##########################################\n trexio.write_electron_up_num(trexio_file, electron_up_num)\n trexio.write_electron_dn_num(trexio_file, electron_dn_num)\n trexio.write_nucleus_num(trexio_file, nucleus_num)\n trexio.write_nucleus_charge(trexio_file, atom_charges_list)\n trexio.write_nucleus_label(trexio_file, chemical_symbol_list)\n trexio.write_nucleus_coord(trexio_file, coords_np)\n\n ##########################################\n # basis set info\n ##########################################\n # check the orders of the spherical atomic basis in pyscf!!\n # gto.spheric_labels(mol, fmt=\"%d, %s, %s, %s\")\n # for s -> s\n # for p -> px, py, pz\n # for l >= d -> m=(-l ... 0 ... +l)\n\n basis_type = \"Gaussian\" # thanks anthony!\n basis_shell_num = int(np.sum([mol.atom_nshells(i) for i in range(nucleus_num)]))\n nucleus_index = []\n for i in range(nucleus_num):\n for _ in range(len(mol.atom_shell_ids(i))):\n nucleus_index.append(i)\n shell_ang_mom = [mol.bas_angular(i) for i in range(basis_shell_num)]\n basis_prim_num = int(np.sum([mol.bas_nprim(i) for i in range(basis_shell_num)]))\n\n basis_exponent = []\n basis_coefficient = []\n for i in range(basis_shell_num):\n for bas_exp in mol.bas_exp(i):\n basis_exponent.append(float(bas_exp))\n for bas_ctr_coeff in mol.bas_ctr_coeff(i):\n basis_coefficient.append(float(bas_ctr_coeff))\n\n basis_shell_index = []\n for i in range(basis_shell_num):\n for _ in range(len(mol.bas_exp(i))):\n basis_shell_index.append(i)\n\n # normalization factors\n basis_shell_factor = [1.0 for _ in range(basis_shell_num)] # 1.0 in pySCF\n\n # gto_norm(l, expnt) => l is angmom, expnt is exponent\n # Note!! Here, the normalization factor of the spherical part\n # are not included. The normalization factor is computed according\n # to Eq.8 of the following paper\n # H.B.S and M.J.F, Int. J. Quant. Chem., 54(1995), 83-87.\n basis_prim_factor = []\n for prim_i in range(basis_prim_num):\n coeff = basis_coefficient[prim_i]\n expnt = basis_exponent[prim_i]\n l_num = shell_ang_mom[basis_shell_index[prim_i]]\n basis_prim_factor.append(\n mol.gto_norm(l_num, expnt) / np.sqrt(4 * np.pi) * np.sqrt(2 * l_num + 1)\n )\n\n ##########################################\n # ao info\n ##########################################\n # to be fixed!! for Victor case mol.cart is false, but the basis seems cartesian...\n if mol.cart:\n ao_cartesian = 99999\n else:\n ao_cartesian = 0 # spherical basis representation\n ao_shell = []\n for i, ang_mom in enumerate(shell_ang_mom):\n for _ in range(2 * ang_mom + 1):\n ao_shell.append(i)\n ao_num = len(ao_shell)\n\n # 1.0 in pyscf (because spherical)\n ao_normalization = [1.0 for _ in range(ao_num)]\n\n ##########################################\n # mo info\n ##########################################\n mo_type = \"MO\"\n\n if twist_average:\n mo_occupation_read = mf[\"mo_occ\"][k_index]\n mo_energy_read = mf[\"mo_energy\"][k_index]\n mo_coeff_read = mf[\"mo_coeff\"][k_index]\n else:\n mo_occupation_read = mf[\"mo_occ\"]\n mo_energy_read = mf[\"mo_energy\"]\n mo_coeff_read = mf[\"mo_coeff\"]\n\n # check if the pySCF calculation is Restricted or Unrestricted\n # Restricted -> RHF,RKS,ROHF,OROKS\n # Unrestricted -> UHF,UKS\n\n if len(mo_energy_read) == 2:\n if isinstance(mo_energy_read[0], float):\n spin_restricted = True\n else:\n spin_restricted = False\n else:\n spin_restricted = True\n\n # the followins are given to TREXIO file lager if spin_restricted == False,\n mo_coefficient_all = []\n mo_occupation_all = []\n mo_energy_all = []\n mo_spin_all = []\n\n # mo read part starts both for alpha and beta spins\n for ns, spin in enumerate([0, 1]):\n\n if spin_restricted:\n mo_occupation = mo_occupation_read\n mo_energy = mo_energy_read\n mo_coeff = mo_coeff_read\n if spin == 1: # 0 is alpha(up), 1 is beta(dn)\n logger.info(\"This is spin-restricted calculation.\")\n logger.info(\"Skip the MO conversion step for beta MOs.\")\n break\n else:\n logger.info(\n f\"MO conversion step for {spin}-spin MOs. 0 is alpha(up), 1 is beta(dn).\"\n )\n mo_occupation = mo_occupation_read[ns]\n mo_energy = mo_energy_read[ns]\n mo_coeff = mo_coeff_read[ns]\n\n mo_num = len(mo_coeff[0])\n \n mo_spin_all += [spin for _ in range(mo_num)]\n\n # mo reordering because mo_coeff[:,mo_i]!!\n mo_coeff = [mo_coeff[:, mo_i] for mo_i in range(mo_num)]\n\n logger.debug(mo_num)\n logger.debug(len(mo_coeff))\n logger.debug(mo_occupation)\n logger.debug(mo_energy)\n # logger.info(mo_coeff)\n\n # check if MOs are descending order with respect to \"mo occ\"\n # this is usually true, but not always true for\n # RO (restricted open-shell) calculations.\n order_bool = all(\n [\n True if mo_occupation[i] >= mo_occupation[i + 1] else False\n for i in range(len(mo_occupation) - 1)\n ]\n )\n logger.info(f\"MO occupations are in the descending order ? -> {order_bool}\")\n if not order_bool:\n logger.warning(\"MO occupations are not in the descending order!!\")\n logger.warning(\"RO (restricted open-shell) calculations?\")\n logger.warning(\"Reordering MOs...\")\n # reordering MOs.\n # descending order (mo occ)\n reo_moocc_index = np.argsort(mo_occupation)[::-1]\n mo_occupation_o = [mo_occupation[l_num] for l_num in reo_moocc_index]\n mo_energy_o = [mo_energy[l_num] for l_num in reo_moocc_index]\n mo_coeff_o = [mo_coeff[l_num] for l_num in reo_moocc_index]\n # descending order (mo energy)\n mo_coeff = []\n mo_occupation = []\n mo_energy = []\n set_mo_occupation = sorted(list(set(mo_occupation_o)), reverse=True)\n for mo_occ in set_mo_occupation:\n mo_re_index = [\n i for i, mo in enumerate(mo_occupation_o) if mo == mo_occ\n ]\n mo_occupation_t = [mo_occupation_o[l_num] for l_num in mo_re_index]\n mo_energy_t = [mo_energy_o[l_num] for l_num in mo_re_index]\n mo_coeff_t = [mo_coeff_o[l_num] for l_num in mo_re_index]\n reo_ene_index = np.argsort(mo_energy_t)\n mo_occupation += [mo_occupation_t[l_num] for l_num in reo_ene_index]\n mo_energy += [mo_energy_t[l_num] for l_num in reo_ene_index]\n mo_coeff += [mo_coeff_t[l_num] for l_num in reo_ene_index]\n\n logger.debug(\"--mo_num--\")\n logger.debug(mo_num)\n logger.debug(\"--len(mo_coeff)--\")\n logger.debug(len(mo_coeff))\n logger.debug(\"--mo_occupation--\")\n logger.debug(mo_occupation)\n logger.debug(\"--mo_energy--\")\n logger.debug(mo_energy)\n # logger.debug(mo_coeff)\n\n # saved mo_occ and mo_energy\n mo_occupation_all += list(mo_occupation)\n mo_energy_all += list(mo_energy)\n\n # permutation_matrix = [] # for ao and mo swaps, used later\n\n # molecular coefficient reordering\n # TREX-IO employs (m=-l,..., 0, ..., +l) for spherical basis\n mo_coefficient = []\n\n for mo_i in range(mo_num):\n mo = mo_coeff[mo_i]\n mo_coeff_buffer = []\n\n perm_list = []\n perm_n = 0\n for ao_i, ao_c in enumerate(mo):\n\n # initialization\n if ao_i == 0:\n mo_coeff_for_reord = []\n current_ang_mom = -1\n\n # read ang_mom (i.e., angular momentum of the shell)\n bas_i = ao_shell[ao_i]\n ang_mom = shell_ang_mom[bas_i]\n\n previous_ang_mom = current_ang_mom\n current_ang_mom = ang_mom\n\n # set multiplicity\n multiplicity = 2 * ang_mom + 1\n # print(f\"multiplicity = {multiplicity}\")\n\n # check if the buffer is null, when ang_mom changes\n if previous_ang_mom != current_ang_mom:\n assert len(mo_coeff_for_reord) == 0\n\n if current_ang_mom == 0: # s shell\n # print(\"s shell/no permutation is needed.\")\n # print(\"(pyscf notation): s(l=0)\")\n # print(\"(trexio notation): s(l=0)\")\n reorder_index = [0]\n\n elif current_ang_mom == 1: # p shell\n\n # print(\"p shell/permutation is needed.\")\n # print(\"(pyscf notation): px(l=+1), py(l=-1), pz(l=0)\")\n # print(\"(trexio notation): pz(l=0), px(l=+1), py(l=-1)\")\n reorder_index = [2, 0, 1]\n\n elif current_ang_mom >= 2: # > d shell\n\n # print(\"> d shell/permutation is needed.\")\n # print(\n # \"(pyscf) e.g., f3,-3(l=-3), f3,-2(l=-2), f3,-1(l=-1), \\\n # f3,0(l=0), f3,+1(l=+1), f3,+2(l=+2), f3,+3(l=+3)\"\n # )\n # print(\n # \"(trexio) e.g, f3,0(l=0), f3,+1(l=+1), f3,-1(l=-1), \\\n # f3,+2(l=+2), f3,-2(l=-2), f3,+3(l=+3), f3,-3(l=-3)\"\n # )\n l0_index = int((multiplicity - 1) / 2)\n reorder_index = [l0_index]\n for i in range(1, int((multiplicity - 1) / 2) + 1):\n reorder_index.append(l0_index + i)\n reorder_index.append(l0_index - i)\n\n else:\n raise ValueError(\"A wrong value was set to current_ang_mom.\")\n\n mo_coeff_for_reord.append(ao_c)\n\n # write MOs!!\n if len(mo_coeff_for_reord) == multiplicity:\n # print(\"--write MOs!!--\")\n mo_coeff_buffer += [\n mo_coeff_for_reord[i] for i in reorder_index\n ]\n\n # reset buffer\n mo_coeff_for_reord = []\n\n # print(\"--write perm_list\")\n perm_list += list(np.array(reorder_index) + perm_n)\n perm_n = perm_n + len(reorder_index)\n\n mo_coefficient.append(mo_coeff_buffer)\n # permutation_matrix.append(perm_list)\n\n mo_coefficient_all += mo_coefficient\n\n # MOs read part end both for alpha and beta spins[l]\n logger.debug(\"len(mo_coefficient_all)\")\n logger.debug(len(mo_coefficient_all))\n logger.debug(\"len(mo_occupation_all)\")\n logger.debug(len(mo_occupation_all))\n logger.debug(\"len(mo_spin_all)\")\n logger.debug(len(mo_spin_all))\n\n # Conversion from Python complex -> real, complex separately.\n # force WF complex\n if force_wf_complex:\n complex_flag = True\n # check if the MOs have imag.!\n else:\n imag_flags = []\n for mo in mo_coefficient_all:\n imag_flags += list(np.isreal(list(np.real_if_close(mo, tol=100))))\n # print(imag_flags)\n if all(imag_flags):\n complex_flag = False\n else:\n complex_flag = True\n\n if complex_flag:\n logger.info(\"The WF is complex\")\n mo_coefficient_real = []\n mo_coefficient_imag = []\n\n for mo__ in mo_coefficient_all:\n mo_real_b = []\n mo_imag_b = []\n for coeff in mo__:\n mo_real_b.append(coeff.real)\n mo_imag_b.append(coeff.imag)\n mo_coefficient_real.append(mo_real_b)\n mo_coefficient_imag.append(mo_imag_b)\n\n else:\n logger.info(\"The WF is real\")\n mo_coefficient_real = [list(np.array(mo).real) for mo in mo_coefficient_all]\n\n logger.debug(\"--MOs Done--\")\n\n ##########################################\n # basis set info\n ##########################################\n trexio.write_basis_type(trexio_file, basis_type) #\n trexio.write_basis_shell_num(trexio_file, basis_shell_num) #\n trexio.write_basis_prim_num(trexio_file, basis_prim_num) #\n trexio.write_basis_nucleus_index(trexio_file, nucleus_index) #\n trexio.write_basis_shell_ang_mom(trexio_file, shell_ang_mom) #\n trexio.write_basis_shell_factor(trexio_file, basis_shell_factor) #\n trexio.write_basis_shell_index(trexio_file, basis_shell_index) #\n trexio.write_basis_exponent(trexio_file, basis_exponent) #\n trexio.write_basis_coefficient(trexio_file, basis_coefficient) #\n trexio.write_basis_prim_factor(trexio_file, basis_prim_factor) #\n\n ##########################################\n # ao info\n ##########################################\n trexio.write_ao_cartesian(trexio_file, ao_cartesian) #\n trexio.write_ao_num(trexio_file, ao_num) #\n trexio.write_ao_shell(trexio_file, ao_shell) #\n trexio.write_ao_normalization(trexio_file, ao_normalization) #\n\n ##########################################\n # mo info\n ##########################################\n trexio.write_mo_type(trexio_file, mo_type) #\n\n if complex_flag:\n trexio.write_mo_num(trexio_file, len(mo_coefficient_real)) #\n trexio.write_mo_coefficient(trexio_file, mo_coefficient_real) #\n trexio.write_mo_coefficient_im(trexio_file, mo_coefficient_imag) #\n else:\n trexio.write_mo_num(trexio_file, len(mo_coefficient_real)) #\n trexio.write_mo_coefficient(trexio_file, mo_coefficient_real) #\n\n trexio.write_mo_occupation(trexio_file, mo_occupation_all) #\n\n trexio.write_mo_spin(trexio_file, mo_spin_all) #\n\n ##########################################\n # ao integrals\n ##########################################\n # trexio.write_ao_1e_int_overlap(trexio_file, intor_int1e_ovlp)\n # trexio.write_ao_1e_int_kinetic(trexio_file, intor_int1e_kin)\n # trexio.write_ao_1e_int_potential_n_e(trexio_file, intor_int1e_nuc)\n\n ##########################################\n # ECP\n ##########################################\n # internal format of pyscf\n # https://pyscf.org/pyscf_api_docs/pyscf.gto.html?highlight=ecp#module-pyscf.gto.ecp\n \"\"\"\n { atom: (nelec, # core electrons\n ((l, # l=-1 for UL, l>=0 for Ul to indicate |l> 0: # to be fixed!! for Victor case\n\n ecp_num = 0\n ecp_max_ang_mom_plus_1 = []\n ecp_z_core = []\n ecp_nucleus_index = []\n ecp_ang_mom = []\n ecp_coefficient = []\n ecp_exponent = []\n ecp_power = []\n\n for nuc_index, (chemical_symbol, atom_symbol) in enumerate(\n zip(chemical_symbol_list, atom_symbol_list)\n ):\n\n # atom_symbol is superior to atom_pure_symbol!!\n try:\n z_core, ecp_list = mol._ecp[atom_symbol]\n except KeyError:\n z_core, ecp_list = mol._ecp[chemical_symbol]\n\n # ecp zcore\n ecp_z_core.append(z_core)\n\n # max_ang_mom\n max_ang_mom = max([ecp[0] for ecp in ecp_list]) # this is lmax, right?\n if max_ang_mom == -1:\n # special case!! H and He.\n # PySCF database does not define the ul-s part for them.\n max_ang_mom = 0\n max_ang_mom_plus_1 = 1\n else:\n max_ang_mom_plus_1 = max_ang_mom + 1\n ecp_max_ang_mom_plus_1.append(max_ang_mom_plus_1)\n\n for ecp in ecp_list:\n ang_mom = ecp[0]\n if ang_mom == -1:\n ang_mom = max_ang_mom_plus_1\n for r, exp_coeff_list in enumerate(ecp[1]):\n for exp_coeff in exp_coeff_list:\n exp, coeff = exp_coeff\n\n # store variables!!\n ecp_num += 1\n ecp_nucleus_index.append(nuc_index)\n ecp_ang_mom.append(ang_mom)\n ecp_coefficient.append(coeff)\n ecp_exponent.append(exp)\n ecp_power.append(r - 2)\n\n # special case!! H and He.\n # For the sake of clarity, here I put a dummy coefficient (0.0)\n # for the ul-s part here.\n ecp_num += 1\n ecp_nucleus_index.append(nuc_index)\n ecp_ang_mom.append(0)\n ecp_coefficient.append(0.0)\n ecp_exponent.append(1.0)\n ecp_power.append(0)\n\n # write to the trex file\n trexio.write_ecp_num(trexio_file, ecp_num)\n trexio.write_ecp_max_ang_mom_plus_1(trexio_file, ecp_max_ang_mom_plus_1)\n trexio.write_ecp_z_core(trexio_file, ecp_z_core)\n trexio.write_ecp_nucleus_index(trexio_file, ecp_nucleus_index)\n trexio.write_ecp_ang_mom(trexio_file, ecp_ang_mom)\n trexio.write_ecp_coefficient(trexio_file, ecp_coefficient)\n trexio.write_ecp_exponent(trexio_file, ecp_exponent)\n trexio.write_ecp_power(trexio_file, ecp_power)\n\n # close the TREX-IO file\n trexio_file.close()\n\n logger.info(\"Conversion to TREXIO is done.\")\n\n\ndef cli():\n import argparse\n from logging import getLogger, StreamHandler, Formatter\n\n log_level = \"INFO\"\n logger = getLogger(\"pyscf-trexio\")\n logger.setLevel(log_level)\n stream_handler = StreamHandler()\n stream_handler.setLevel(log_level)\n handler_format = Formatter(\"%(message)s\")\n stream_handler.setFormatter(handler_format)\n logger.addHandler(stream_handler)\n\n # define the parser\n parser = argparse.ArgumentParser(\n epilog=\"From pyscf chk file to TREXIO file\",\n usage=\"python pyscf_to_trexio.py -c \\\n pyscf_checkfile -o trexio_filename\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n \"-c\",\n \"--pyscf_checkfile\",\n help=\"pyscf checkfile\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"-o\",\n \"--trexio_filename\",\n help=\"trexio filename\",\n type=str,\n default=\"trexio.hdf5\",\n )\n parser.add_argument(\n \"-b\",\n \"--back_end\",\n help=\"trexio I/O back-end\",\n type=str,\n default=\"hdf5\",\n )\n\n # parse the input values\n args = parser.parse_args()\n # parsed_parameter_dict = vars(args)\n\n pyscf_to_trexio(\n pyscf_checkfile=args.pyscf_checkfile,\n trexio_filename=args.trexio_filename,\n back_end=args.back_end\n )\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"TREX-CoE/trexio_tools","sub_path":"src/trexio_tools/converters/pyscf_to_trexio.py","file_name":"pyscf_to_trexio.py","file_ext":"py","file_size_in_byte":27062,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"61"} +{"seq_id":"25489010509","text":"import pandas as pd\n\ndef joinTables( file_best, file_output, *datas ):\n together = pd.read_csv( file_best )\n\n for d in datas:\n together = pd.merge(together, d, how ='inner', on ='instance')\n \n cols = [\"Instâncias\",\"Melhor\"]\n for i in range( 0, len( datas ) ):\n cols.append( \"Média\" )\n cols.append( \"Melhor\")\n \n together.columns = cols\n return together","repo_name":"tiagofunk/TCC-Algoritmo-MemPlas-Com-Path-Relinking-Problema-QCars","sub_path":"Pandas/joinTables.py","file_name":"joinTables.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23440059091","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\nimport operator\nfrom collections import defaultdict\n\n\ndef read(f):\n n = int(f.readline().strip())\n for i in xrange(n):\n N = int(f.readline().strip())\n yield [f.readline().strip() for j in xrange(N)]\n\n\ndef parse(s):\n item = []\n for c in s:\n if len(item) > 0 and item[-1][0] == c:\n item[-1][1] += 1\n else:\n item.append([c, 1])\n return item\n\n\ndef calc_move(vals):\n def func():\n for i in xrange(min(vals), max(vals)):\n yield sum(abs(val - i) for val in vals)\n return min(func())\n\n\ndef solve(strings):\n items = map(parse, strings)\n keys = [\"\".join(map(operator.itemgetter(0), item)) for item in items]\n if len(set(keys)) != 1:\n return None\n n = 0\n for parts in zip(*items):\n vals = map(operator.itemgetter(1), parts)\n if len(set(vals)) == 1:\n continue\n n += calc_move(vals)\n return n\n\n\ndef main(f):\n for i, strings in enumerate(read(f)):\n n = solve(strings)\n if n is None:\n print(\"Case #{0}: Fegla Won\".format(i+1))\n else:\n print(\"Case #{0}: {1}\".format(i+1, n))\n\n\n_input = \"\"\"\n5\n2\nmmaw\nmaw\n2\ngcj\ncj\n3\naaabbb\nab\naabb\n2\nabc\nabc\n3\naabc\nabbc\nabcc\n\"\"\".strip()\n\n_output = \"\"\"\nCase #1: 1\nCase #2: Fegla Won\nCase #3: 4\nCase #4: 0\nCase #5: 3\n\"\"\".strip()\n\n\ndef test_main(compare=False):\n import sys\n from difflib import unified_diff\n from StringIO import StringIO\n\n if compare:\n stdout = sys.stdout\n sys.stdout = StringIO()\n try:\n main(StringIO(_input))\n result = sys.stdout.getvalue().strip()\n finally:\n sys.stdout = stdout\n\n print(result)\n\n for line in unified_diff(result.splitlines(), _output.splitlines(),\n 'Output', 'Expect', lineterm=''):\n print(line)\n\n if result == _output:\n print(\"OK\")\n else:\n print(\"NG\")\n\n else:\n main(StringIO(_input))\n\n\nif __name__ == '__main__':\n test = False\n compare = True\n if test:\n test_main(compare)\n else:\n import sys\n if len(sys.argv) > 1:\n f = open(sys.argv[1])\n main(f)\n f.close()\n else:\n main(sys.stdin)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_142/523.py","file_name":"523.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39280637824","text":"#! /usr/bin/env python\n\ndef GetLambdaCharmm(alf_info,fnmout,fnmsin):\n import sys\n import numpy as np\n from scipy.io import FortranFile\n\n nblocks=alf_info['nblocks']\n Lambdas=np.zeros((0,nblocks))\n\n for fnmin in fnmsin:\n fp=FortranFile(fnmin,'r')\n\n # The header and icntrl array are read in as a single record\n # Read the icntrl array (length 20) and extract key variables\n\n header = (fp.read_record([('hdr',np.string_,4),('icntrl',np.int32,20)]))\n hdr = header['hdr'][0]\n icntrl = header['icntrl'][0][:]\n nfile = icntrl[0] # Total number of dynamcis steps in lambda file\n npriv = icntrl[1] # Number of steps preceding this run\n nsavl = icntrl[2] # Save frequency for lambda in file\n nblocks = icntrl[6] # Total number of blocks = env + subsite blocks\n nsitemld = icntrl[10] # Total number of substitution sites (R-groups) in MSLD\n\n # Time step for dynamics in AKMA units\n delta4 = (fp.read_record(dtype=np.float32))\n\n # Title in trajectoory file \n title = (fp.read_record([('h',np.int32,1),('title',np.string_,80)]))[0][1]\n\n # Unused in current processing\n nbiasv = (fp.read_record(dtype=np.int32))\n junk = (fp.read_record(dtype=np.float32))\n\n # Array (length nblocks) indicating which subsites below\n # to which R-substitiution site\n isitemld = (fp.read_record(dtype=np.int32))\n\n # Temeprature used in lambda dynamics thermostat\n temp = (fp.read_record(dtype=np.float32))\n\n # Unsed data for this processing\n junk3 = (fp.read_record(dtype=np.float32))\n\n Lambda=np.zeros((nfile,nblocks-1))\n\n for i in range(nfile):\n # Read a line of lambda values\n lambdav = (fp.read_record(dtype=np.float32))\n theta = (fp.read_record(dtype=np.float32))\n Lambda[i,:]=lambdav[1:]\n\n fp.close()\n\n Lambdas=np.concatenate((Lambdas,Lambda),axis=0)\n\n np.savetxt(fnmout,Lambdas,fmt=\"%10.6f\")\n\n\n\ndef GetLambdaBlade(alf_info,fnmout,fnmsin):\n import sys\n import numpy as np\n from xdrlib import Unpacker\n from xdrlib import Packer\n\n nblocks=alf_info['nblocks']\n Lambdas=np.zeros((0,nblocks))\n\n p=Packer()\n p.pack_int(0)\n for j in range(0,nblocks):\n p.pack_float(0)\n linewidth=len(p.get_buffer())\n\n for fnmin in fnmsin:\n # Lambda=np.loadtxt(sys.argv[ifp])\n fp=open(fnmin,\"rb\")\n fpdata=fp.read()\n lines=len(fpdata)//linewidth\n fp.close()\n Lambda=np.zeros((lines,nblocks))\n p=Unpacker(fpdata)\n for i in range(0,lines):\n p.unpack_int()\n for j in range(0,nblocks):\n Lambda[i,j]=p.unpack_float()\n Lambdas=np.concatenate((Lambdas,Lambda),axis=0)\n\n np.savetxt(fnmout,Lambdas,fmt=\"%10.6f\")\n\n\n\ndef GetLambda(alf_info,fnmout,fnmsin):\n if alf_info['engine'] in ['charmm','bladelib']:\n GetLambdaCharmm(alf_info,fnmout,fnmsin)\n elif alf_info['engine'] in ['blade']:\n GetLambdaBlade(alf_info,fnmout,fnmsin)\n else:\n print(\"Error: unsupported engine type %s\" % alf_info['engine'])\n quit()\n","repo_name":"stanislc/ALF","sub_path":"v3.2beta/alf/GetLambda.py","file_name":"GetLambda.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"2588273923","text":"# import *\nimport numpy as np\n\n# import * as *\nimport matplotlib.pyplot as plt\n\n# from * import *\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# get distance from 2 points\ndef distance(point1, point2):\n return np.sqrt(np.sum((point1 - point2)**2))\n\n# ant colony function\ndef ant_colony_optimization(points, n_ants, n_iterations, alpha, beta, evaporation_rate, Q):\n\t# number of points = length of points\n n_points = len(points)\n # pheromones init as 1 for each point (init as matrix bidirectional matrix)\n pheromone = np.ones((n_points, n_points))\n # bestpath = none\n best_path = None\n # best length is infinity\n best_path_length = np.inf\n \n # for each iteration (could be a while loop instead and update screen using pygame and location)\n for iteration in range(n_iterations):\n \t# paths = none\n paths = []\n # path lenghts = none\n path_lengths = []\n \n # for each ant in the number of ants\n for ant in range(n_ants):\n \t# none have been visited starts as list of false where index is node key\n visited = [False]*n_points\n # starting point is random (doesn't matter where you're visiting them all anyway)\n current_point = np.random.randint(n_points)\n # visited current point is true (start here duh)\n visited[current_point] = True\n # path is set to current point (path list goes left to right beginning to end of path)\n path = [current_point]\n # path length = 0 (haven't gone anywhere)\n path_length = 0\n \n # while i haven't visited somewhere get going!\n while False in visited:\n \t# find where we haven't visited\n \t# np.where (where it is)\n \t# logical not, compute truth value of NOT x element wise, for each element compute not x\n \t# returns a list of points that have not been visited (aka, inverts the visited list and returns the first one, index 0)\n unvisited = np.where(np.logical_not(visited))[0]\n # probabilties is set to 0 for the each unvisited node\n probabilities = np.zeros(len(unvisited))\n \n # for each unvisited node\n for i, unvisited_point in enumerate(unvisited):\n \t# the probability of that node being visited is equal to the pheromone\n \t# for the path from current point to the unvisited candidate to the power of alpha (default 1) [desirability of closest]\n \t# divide by the distance between the 2 points to the power of beta (default 1) [how strictly it follows pheromones]\n probabilities[i] = pheromone[current_point, unvisited_point]**alpha / distance(points[current_point], points[unvisited_point])**beta\n \n # all probabilities are divided by the sum of all probs\n probabilities /= np.sum(probabilities)\n \n # the next point is calculated at random weighted by probability\n # numpy chooses from unvisited list and returns the index (uses index to get the probability for each point, e.g. index 0 of candidate list is index 0 of probs)\n next_point = np.random.choice(unvisited, p=probabilities)\n\n # add the new point to the path\n path.append(next_point)\n\n # add the distance between current and new to the path length\n path_length += distance(points[current_point], points[next_point])\n\n # set the new point to visited in the visited list\n visited[next_point] = True\n\n # new point is now current point\n current_point = next_point\n\n # append path to the paths list\n paths.append(path)\n\n # append path length to pathlengths\n path_lengths.append(path_length)\n\n # if the path length is less that previous best then it is new best\n if path_length < best_path_length:\n best_path = path\n best_path_length = path_length\n \n # evaporate the pheromones slightly\n pheromone *= evaporation_rate\n \n # for each path and path length\n for path, path_length in zip(paths, path_lengths):\n \t# for each point - 1 (because python)\n for i in range(n_points-1):\n \t# the pheromone is increased by Q (default 1) divide by path_length (so that best is enforced but allows deviation)\n pheromone[path[i], path[i+1]] += Q/path_length\n # for the last point to the first point do the same\n pheromone[path[-1], path[0]] += Q/path_length\n \n # display the figure\n fig = plt.figure(figsize=(8, 6))\n # display the axes\n ax = fig.add_subplot(111, projection='3d')\n # scatter the axes according to it's location\n ax.scatter(points[:,0], points[:,1], points[:,2], c='r', marker='o')\n\n # for each point - 1 (because python)\n for i in range(n_points-1):\n \t# plot the best path)\n ax.plot([points[best_path[i],0], points[best_path[i+1],0]],\n [points[best_path[i],1], points[best_path[i+1],1]],\n [points[best_path[i],2], points[best_path[i+1],2]],\n c='g', linestyle='-', linewidth=2, marker='o')\n \n # plot the best path for last to first\n ax.plot([points[best_path[0],0], points[best_path[-1],0]],\n [points[best_path[0],1], points[best_path[-1],1]],\n [points[best_path[0],2], points[best_path[-1],2]],\n c='g', linestyle='-', linewidth=2, marker='o')\n \n # set labels\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n\n # show plot\n plt.show()\n \n# Example usage:\npoints = np.random.rand(50, 3) # Generate 10 random 3D points\nant_colony_optimization(points, n_ants=10, n_iterations=100, alpha=1, beta=1, evaporation_rate=0.5, Q=1)","repo_name":"Nidhogg-Wyrmborn/Ant-Colony-Optimization-Python3","sub_path":"Example.py","file_name":"Example.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19524234161","text":"import time\n\nprint('Welcome to the homeroom sorter!')\ntime.sleep(1.7)\nprint('We just need your last name, and we will sort you into a homeroom!')\ntime.sleep(2.5)\n\n# recieve user input of last name, sets it as variable\nlast_name = input('What is your last name? ')\nprint('Thanks! We are sorting you into a homeroom now. Please wait...')\ntime.sleep(4)\n\n# extracts first letter of last name, and sets it as the first initial\nfirst_initial_of_last_name = last_name[0]\n\n# sorts into 3 homerooms based on the first initial of last name\nif first_initial_of_last_name.lower() in ('a', 'b', 'c', 'd', 'e', 'f', 'g'):\n homeroom = 101\nelif first_initial_of_last_name.lower() in ('h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'):\n homeroom = 102\nelse:\n homeroom = 103\n\n# prints the result\nprint (f'You are in homeroom: {homeroom}. Thanks for using the homeroom sorter!')\n","repo_name":"Shatterdest/sithsprojects","sub_path":"9thgrade/classprojects/homeroomsorter/program2.py","file_name":"program2.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13091268573","text":"tc = int(input())\r\n\r\nfor cases in range(tc):\r\n\r\n n = int(input())\r\n \r\n if n < 1:\r\n print(\"not ugly\")\r\n continue\r\n\r\n while(n%2==0):\r\n n = n//2\r\n\r\n while(n%3==0):\r\n n = n//3\r\n\r\n while(n%5==0):\r\n n = n//5\r\n\r\n if n > 1:\r\n print(\"not ugly\")\r\n else:\r\n print(\"ugly\")\r\n","repo_name":"prabhu30/coding","sub_path":"Infytq/Ugly Numbers/ugly_numbers.py","file_name":"ugly_numbers.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73973580034","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 29 09:29:42 2018\n\n@author: jingwenken\n\"\"\"\n\nimport cv2\nimport glob, os\n\n#filepath, RGB (1=as it is, 0=BW, -1=Colour)\nimg=cv2.imread(\"galaxy.jpg\",0)\n\nprint(img)\n\n#to show the img on screen\nresized_image=cv2.resize(img,(int(img.shape[1]/2),int(img.shape[0]/2)))\ncv2.imshow(\"Galaxy\",resized_image)\n#0 is to wait for button press, 2000 is to wait for 2sec to close window\ncv2.waitKey(0)\ncv2.imwrite(\"GalaxyResized.jpg\",resized_image)\ncv2.destroyAllWindows()\n\nos.chdir(\"./sample-images\")\nfor file in glob.glob(\"*\"):\n img=cv2.imread(file,-1)\n resized_img=cv2.resize(img,(100,100))\n cv2.imshow(file,resized_img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n cv2.imwrite(\"Resized_\"+file,resized_img)\n","repo_name":"ken333135/WorkingFiles","sub_path":"AppsDev/ComVision/ComVision.py","file_name":"ComVision.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39875004145","text":"import bluetooth\nimport time\nimport sys\nfrom open_close import unlock_door, lock_door, reset_relays\n\nimport bt_manager\nimport dbus\nimport gobject\nfrom dbus.mainloop.glib import DBusGMainLoop\nimport re\nimport syslog\n\nadapter = bt_manager.BTAdapter()\n\ntarget_address = \"\"\ndeviceInRange = True\nlastState = \"away\" #host down\ncurrentState = \"connected\"\n\n#pins\nlock = 3 #pin 9 on harness, IN2, GPIO3\nunlock_init = 17 #pin 1 on harness, IN4, GPIO17\nunlock = 4 #pin 2 on harness, IN3, GPIO4\nlight = 2 #pin 4 on harness, IN1, GPIO2\nsleepTime = 2\n\n#unlock seq - pin 1 ground, pin 2 ground\n#light - pin 4 ground\n#lock - pin 9 ground\n\n#Pairing is necessary for mac address spoofing\n#sudo service bluetooth restart\n#sudo hciconfig\n#sudo hciconfig hci0 piscan\n#sudo bluez-simple-agent\n\nwhile True:\n try:\n btsocket=bluetooth.BluetoothSocket(bluetooth.L2CAP)\n btsocket.connect((target_address, 3))\n if btsocket.send(\"init\") and lastState == \"away\":\n syslog.syslog(\"device found %s\" %(target_address))\n syslog.syslog(\"Unlocking car...\")\n unlock_door(lock,unlock_init, unlock, light)\n lastState = \"connected\"\n btsocket.close()\n time.sleep(sleepTime)\n except bluetooth.btcommon.BluetoothError as e:\n#terrible way to check error, but e.errno didn't work\n if lastState == \"connected\" and \"112\" in str(e):\n syslog.syslog(\"Locking...\")\n lock_door(lock, unlock_init, unlock, light)\n lastState = \"away\"\n time.sleep(sleepTime)\n except KeyboardInterrupt:\n btsocket.close()\n reset_relays(lock, unlock_init, unlock, light)\n time.sleep(sleepTime)\n sys.exit(1)\n except:\n #sys.exc_info()[0]\n lock_door(lock, unlock_init, unlock, light)\n btsocket.close()\n","repo_name":"susmit85/keyless","sub_path":"keyless.py","file_name":"keyless.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23604929071","text":"from django.contrib import admin\nfrom django.contrib.admin import ModelAdmin\nfrom .models import Project, Skill\n\n\nclass CustomModelAdmin(ModelAdmin):\n actions = ['copy_record']\n\n def copy_record(self, request, queryset):\n for obj in queryset:\n obj.pk = None\n obj.save()\n\n copy_record.short_description = \"Duplicate selected record\"\n\n\nadmin.site.register(Skill)\nadmin.site.register(Project)\nadmin.site.unregister(Project)\nadmin.site.register(Project, CustomModelAdmin)\n","repo_name":"robin-private/personal_portfolio-django","sub_path":"portfolio/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18568797213","text":"import argparse\n\n## Create ArgumentParser Object\nparser = argparse.ArgumentParser(description='Process some integers.')\n\n\n## \"integer\": list consits of int data type\nparser.add_argument('integers', metavar='N', type=int, nargs='+',\n help='an integer for the accumulator')\n\n## \"accumulate: --Sum -> sum built-in function is saved in namespace, Deafualt-> Max buil-in '' \nparser.add_argument('--sum', dest='accumulate', action='store_const',\n const=sum, default=max,\n help='sum the integers (default: find the max)')\n\n## returns namepsace args using \"parser.parse_args()\" method\nargs = parser.parse_args()\n\n## Calculate values in \"integers\" using functions saved in \"accumulate\" \nprint(args.accumulate(args.integers))\n","repo_name":"ico1036/Python_ML_study","sub_path":"N01_Some_useful_python/IO/N02_argparser.py","file_name":"N02_argparser.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32981031727","text":"# You are given a string, s, and a list of words, words, that are all of the same length. Find all starting indices of substring(s) in s that is a concatenation of each word in words exactly once and without any intervening characters.\n#\n# Example 1:\n#\n# Input:\n# s = \"barfoothefoobarman\",\n# words = [\"foo\",\"bar\"]\n# Output: [0,9]\n# Explanation: Substrings starting at index 0 and 9 are \"barfoor\" and \"foobar\" respectively.\n# The output order does not matter, returning [9,0] is fine too.\n# Example 2:\n#\n# Input:\n# s = \"wordgoodgoodgoodbestword\",\n# words = [\"word\",\"good\",\"best\",\"word\"]\n# Output: []\nimport collections\n\n\nclass Solution(object):\n def findSubstring(self, s, words):\n \"\"\"\n :type s: str\n :type words: List[str]\n :rtype: List[int]\n \"\"\"\n # brute force\n # if not s or not words:\n # return []\n # l = len(words[0])\n # counter = collections.Counter(words)\n # res = []\n # for i in range(len(s) - (l * len(words)) + 1):\n # j = i\n # total = 0\n # visited = collections.defaultdict(lambda: 0)\n # while j < len(s):\n # sub = s[j:j + l]\n # if sub in counter and visited[sub] < counter[sub]:\n # visited[sub] += 1\n # total += 1\n # else:\n # break\n # j += l\n # if total == len(words):\n # res.append(i)\n # return res\n\n if not s or not words or not words[0]:\n return []\n m = len(words[0])\n n = len(words)\n counter = collections.Counter(words)\n res = []\n ### key: use sliding window to gradually increase window\n for starting in range(m):\n sliding = collections.Counter() ## count the word in the current sliding window\n wordcount = 0 ## count how many word right now\n for i in range(starting, len(s), m): ## use m as interval\n w = s[i:i + m]\n if w in counter:\n sliding[w] += 1\n wordcount += 1\n ## check whether this word has be repeated counted\n while sliding[w] > counter[w]:\n currstarting = i - m * (wordcount - 1)\n sliding[s[currstarting:currstarting + m]] -= 1\n wordcount -= 1\n else:\n sliding.clear()\n wordcount = 0\n\n if wordcount == n:\n res.append(i - m * (wordcount - 1))\n\n return res\n\n\ns = Solution()\nprint(s.findSubstring(\"wordgoodgoodgoodbestword\",\n [\"word\", \"good\", \"best\", \"good\"]))\n","repo_name":"yshshadow/Leetcode","sub_path":"1-50/30.py","file_name":"30.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18134572093","text":"from os import system, name \nfrom custom_exceptions import *\n\n# colors\ncolors = {\n\t\"endc\": \"\\033[0m\",\n\t\"bold\": \"\\033[1m\",\n\t\"red\": \"\\033[0;31m\",\n\t\"blue\": \"\\033[0;34m\",\n\t\"grey\": \"\\033[0;37m\",\n\t\"green\": \"\\033[0;32m\"\n}\n\n \n# clear function \ndef clear(): \n\t# for windows \n\tif name == 'nt': \n\t\t_ = system('cls') \n \n\t# for mac and linux (posix)\n\telse: \n\t\t_ = system('clear') \n\n\n# get input function\ndef get_input(prompt, valid, max_length=250, invert=False):\n\t# get input\n\tuser_input = input(prompt)\n\n\t# if any input at all\n\tif user_input:\n\t\t# if invalid...\n\t\tif (user_input not in valid) is not invert:\n\t\t\traise InvalidInputError()\n\t\t# if too long...\n\t\telif len(user_input) > max_length:\n\t\t\traise ExceededMaxLengthError()\n\t\t# if alright...\n\t\telse:\n\t\t\treturn user_input\n\telse:\n\t\traise BlankInputError()\n\n\n# color fuction\ndef colorify(string, color, style=None):\n\treturn f\"{colors[color]}{colors[style] if style else ''}{string}{colors['endc']}\"","repo_name":"Positron11/python-console-rpg","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26257056557","text":"#########################################################################\n# Node File\n# Creates and handles the Nodes on the Catan board\n# Written by David Hwang (dchwang) for 15-112 Fall 2019 Term Project\n#########################################################################\n\nimport copy\nfrom resources.game.utils import Utils\n\nclass Node(object):\n def __init__(self, id, port=None):\n self.id = id\n self.port = port\n self.pos = None\n self.owner = None\n self.nodeLevel = 0 # 1 and 2 for settlement and city\n self.buildable = True\n\n def __repr__(self):\n port = f'with Port {self.port}' if self.port != None else 'without a port' \n return f''\n \n def __eq__(self, other):\n return isinstance(other, Node) and other.id == self.id\n \n def __hash__(self):\n return hash((self.id,))\n \n def checkAdjacencies(self, board):\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen): \n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes:\n nodeIndex = tile.nodes.index(self)\n node1 = tile.nodes[nodeIndex-1]\n node2 = tile.nodes[(nodeIndex+1)%6]\n board.nodes[node1.id].buildable = False\n board.nodes[node2.id].buildable = False\n \n def getRoads(self, board):\n seen = set()\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen):\n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes:\n nodeIndex = tile.nodes.index(self)\n road1 = board.edges[tile.edges[nodeIndex].id]\n road2 = board.edges[tile.edges[(nodeIndex-1)%6].id]\n if (road1 not in seen):\n seen.add(road1.id)\n if (road2 not in seen):\n seen.add(road2.id)\n return seen\n \n def setupCollect(self, player, board):\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen): \n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes:\n resource = Utils.getResourceFromType(tile.type)\n if (resource != None):\n player.resources[resource] += self.nodeLevel\n \n def getNodeValue(self, board):\n resList = []\n res = 0\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen): \n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes and tile.number != None:\n res += Utils.getProbabilityFromNumber(tile.number)\n resList.append((tile.number, Utils.getProbabilityFromNumber(tile.number)))\n return res, resList\n \n def collectFromNumber(self, player, n, board):\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen): \n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes:\n resource = Utils.getResourceFromType(tile.type)\n if (tile.number == n):\n player.resources[resource] += self.nodeLevel\n \n def checkOwnedRoads(self, board, player):\n indexes = self.getRoads(board)\n roads = set()\n for index in indexes:\n roads.add(board.edges[index])\n for edge in roads:\n if (edge.road == player.bgColor):\n return True\n\n def getAdjacentNodes(self, board):\n seen = set()\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen): \n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes:\n nodeIndex = tile.nodes.index(self)\n node1 = tile.nodes[nodeIndex-1]\n node2 = tile.nodes[(nodeIndex+1)%6]\n if (node1 not in seen):\n seen.add(node1)\n if (node2 not in seen):\n seen.add(node2)\n return seen\n \n def getRoadBetweenNodes(self, other, board):\n for i in range(board.q):\n row = copy.copy(board.hexBoard[i])\n colCtr = 0\n while None in row:\n row.remove(None)\n firstIndex = board.hexBoard[i].index(row[0])\n rowLen = len(row)\n for j in range(rowLen): \n tile = board.hexBoard[i][j+firstIndex]\n if self in tile.nodes and other in tile.nodes:\n if (tile.nodes.index(self) - tile.nodes.index(other) > 0 or\n tile.nodes.index(self) == 0 and tile.nodes.index(other) == 5):\n edgeIndex = tile.nodes.index(other)\n elif (tile.nodes.index(other) - tile.nodes.index(self) > 0 or\n tile.nodes.index(other) == 0 and tile.nodes.index(self) == 5):\n edgeIndex = tile.nodes.index(self)\n else:\n return None\n return board.edges[tile.edges[edgeIndex].id]","repo_name":"hdavidethan/catan-tp","sub_path":"resources/game/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":6477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41146981341","text":"from objects.modulebase import ModuleBase\nfrom objects.permissions import PermissionManageMessages\n\nfrom utils.funcs import find_user\n\n\nDEF_LIMIT = 100\nMAX_LIMIT = 500\n\nclass Module(ModuleBase):\n\n usage_doc = '{prefix}{aliases} [target] [limit]'\n short_doc = 'Delete messages in channel'\n long_doc = (\n f'Target of prune can be: user mention/id/name, bots, users.\\n'\n f'Default number of messages to check (limit) is {DEF_LIMIT}.\\n'\n f'Maximum value is {MAX_LIMIT}.'\n )\n\n name = 'purge'\n aliases = (name, 'clear')\n category = 'Moderation'\n bot_perms = (PermissionManageMessages(), )\n user_perms = (PermissionManageMessages(), )\n guild_only = True\n\n async def on_call(self, ctx, args, **flags):\n limit = None\n user_string = None\n\n if len(args) > 1:\n if args[-1].isdigit():\n limit = int(args[-1])\n if limit > MAX_LIMIT:\n limit = MAX_LIMIT\n if len(args) > 2:\n user_string = args[1:-1]\n\n if limit is None:\n limit = DEF_LIMIT\n user_string = args[1:]\n\n if len(args) == 1 or len(args) == 2 and user_string is None:\n check = lambda m: True\n elif user_string.lower() == 'bots':\n check = lambda m: m.author.bot\n elif user_string.lower() == 'users':\n check = lambda m: not m.author.bot\n else:\n user = await find_user(user_string, ctx.message)\n if user is None:\n return await ctx.error(f'User **{user_string}** not found!')\n check = lambda m: m.author.id == user.id\n\n deleted = await ctx.channel.purge(\n limit=limit, check=check,\n before=ctx.message.created_at\n )\n\n await ctx.send(\n f'Deleted {len(deleted)} messages from {\", \".join(set(\"**\" + str(m.author) + \"**\" for m in deleted))}' if deleted else 'No matched messages found',\n delete_after=7, register=False\n )\n\n await self.bot.delete_message(ctx.message)\n","repo_name":"Fogapod/KiwiBot","sub_path":"modules/moderation/module_purge.py","file_name":"module_purge.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"61"} +{"seq_id":"13742251259","text":"'''\nAula sobre operadores lógicos\nand\nor\nnot\nin\nnot in\n'''\n\n# (Verdadeiro e verdadeiro = verdadeiro) as duas expressões tem que ser verdadeira\n'''comparacao1 and comaparacao2'''\n\n# (verdadeiro ou verdadeiro = verdadeiro) uma expressão tem que ser verdadeira\n'''comp1 or comp2'''\n\n# not inverte a expressão\n'''\na = 2\nb = 3\n\nif not b > a:\n print('B é maior do que A')\nelse:\n print('A é maior do que B')\n\n# exemplo\n\nnome = 'Lucas'\nif 'u' in nome: # in é como se fosse 'estar em'\n print('Existe a letra U no seu nome.')\n\n# exemplo\nnome = 'Lucas'\nif 'asas' not in nome: # not in é \"se não estiver\"\n print('Executei.')\nelse:\n print('Existe o texto')\n'''\n# Exemplo Login\n# cadastrado no bd\nusuario_bd = 'lucas'\nsenha_bd = '1234'\n\nusuario = input('Nome de usuário: ')\nsenha = input('Senha do usuário: ')\n\nif usuario_bd == usuario and senha_bd == senha:\n print('Você está logado no sistema')\nelse:\n print('Usuário ou senha inválida')\n\n\n\n","repo_name":"lucas-ioliveira/curso_python3","sub_path":"secao2_python_basico/aula10/aula10.py","file_name":"aula10.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34744305214","text":"import sklearn.metrics as sk, math, sys, matplotlib.pyplot as plot, numpy as np, Utils\n\ndef getAUC(csvFile, xName, yName):\n line = 0\n xIndex = 0\n yIndex = 0\n X = []\n Y = []\n for row in Utils.csvRowsGenerator(csvFile):\n for index in range(0, len(row), 1):\n if line == 0:\n if str(row[index]).strip() == xName:\n xIndex = index\n if str(row[index]).strip() == yName:\n yIndex = index\n else:\n if index == xIndex: X.append(float(str(row[index]).strip()))\n if index == yIndex: Y.append(float(str(row[index]).strip()))\n line += 1\n\n return sk.auc(X, Y)\n\n\ndef getAUCLoc(csvFile, xName, yName, percent):\n line = 0\n xIndex = 0\n yIndex = 0\n X = []\n Y = []\n for row in Utils.csvRowsGenerator(csvFile):\n for index in range(0, len(row), 1):\n if line == 0:\n if str(row[index]).strip() == xName:\n xIndex = index\n if str(row[index]).strip() == yName:\n yIndex = index\n else:\n if float(str(row[xIndex]).strip()) < percent:\n if index == xIndex: X.append(float(str(row[index]).strip()))\n if index == yIndex: Y.append(float(str(row[index]).strip()))\n line += 1\n # print(X)\n # print(Y)\n return sk.auc(X, Y)\n\n\n\ndef getAUCOfAll():\n dataset_list = ['abinit', 'lammps', 'libmesh', 'mda']\n learner_list = ['cart', 'fft', 'rf', 'vfdt']\n x_list = ['size', 'loc']\n y_list = ['precision', 'recall', 'time']\n\n file = open('auc-report.csv', 'w')\n file.write('dataset, learner, x-axis, y-axis, auc-score\\n')\n for d in dataset_list:\n for x in x_list:\n for y in y_list:\n for l in learner_list:\n path = f'/home/rr/Workspace/NCSUFSS18/cp/report/{d}-dump-{l}.csv'\n auc = getAUC(path, x, y)\n file.write(f'{d}, {l}, {x}, {y}, {auc:.2f}\\n')\n\n file.close()\n return\n\n\ndef getPlot(learners, dataset, xName, yName):\n csvFiles = []\n\n for item in learners:\n csvFiles.append(f'/home/rr/Workspace/NCSUFSS18/cp/report/{dataset}-dump-{item}.csv')\n\n X = [None]*len(csvFiles)\n Y = [None]*len(csvFiles)\n\n for element in range(0, len(csvFiles), 1):\n line = 0\n xIndex = 0\n yIndex = 0\n X[element] = []\n Y[element] = []\n\n for row in Utils.csvRowsGenerator(csvFiles[element]):\n for index in range(0, len(row), 1):\n if line == 0:\n if str(row[index]).strip() == xName:\n xIndex = index\n if str(row[index]).strip() == yName:\n yIndex = index\n else:\n if index == xIndex: X[element].append(float(str(row[index]).strip()))\n if index == yIndex: Y[element].append(float(str(row[index]).strip()))\n line += 1\n\n plot.xlim(0, 100)\n plot.plot(X[0], Y[0], color='green', label='CART', marker='x')\n plot.plot(X[1], Y[1], color='red', label='FFT', marker='o')\n plot.plot(X[2], Y[2], color='blue', label='RF', marker='+')\n plot.plot(X[3], Y[3], color='black', label='VFDT', marker='s')\n plot.xlabel(f'{xName}')\n plot.ylabel(f'{yName}')\n plot.title(f'{dataset}: {xName} vs false alarm')\n plot.legend()\n # plot.show()\n plot.savefig(f'{dataset}-{xName}-{yName}.png')\n plot.clf()\n return\n\n# getPlot(['cart', 'fft', 'rf', 'vfdt'], 'libmesh', 'data-size', 'accuracy')\n\ndef getAllPlot():\n dataset_list = ['abinit', 'lammps', 'libmesh', 'mda']\n x_list = ['size']\n y_list = [ 'fa']\n\n for d in dataset_list:\n for x in x_list:\n for y in y_list:\n getPlot(['cart', 'fft', 'rf', 'vfdt'], d, x, y)\n\n# data = ['abinit', 'lammps', 'libmesh', 'mda']\n# learner = ['vfdt', 'cart', 'fft', 'rf']\n# for d in data:\n# for l in learner:\n# file = open(f'loc-auc-{d}-{l}.csv', 'w')\n# file.write('loc, auc\\n')\n# for i in range(1, 11):\n# x = getAUCLoc(f'/home/rr/Workspace/NCSUFSS18/cp/report/{d}-dump-{l}.csv', 'loc', 'recall', i*10)\n# file.write(f'{i*10}, {x:.2f}\\n')\n#\n# file.close()\n\ngetAllPlot()\n\n","repo_name":"rayhanur-rahman/VFDT-Defect-Prediction","sub_path":"cp/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38065395347","text":"import sys\ntree = dict()\ni = 0\nfor t in sys.stdin:\n i += 1\n t = t.rstrip()\n try:\n tree[t] += 1\n except:\n tree[t] = 1\n\ntmp = sorted(tree.items())\n\nfor t in tmp:\n print('%s %.4f' % (t[0], t[1]/i*100))\n","repo_name":"shg9411/algo","sub_path":"algo_py/boj/bj4358.py","file_name":"bj4358.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"41534696422","text":"import tkinter as tk\nfrom tkinter import ttk\n#from tkinter import*\nimport tkinter\nfrom tkinter import BOTH, LEFT, TOP,RIGHT,BOTTOM, Frame,messagebox,OptionMenu,StringVar\nimport numpy as np\nimport cv2\nimport PIL.Image, PIL.ImageTk\nimport time\nimport const as CONST\nimport DataMaster as DataMaster\n\nclass App:\n def __init__(self, window, window_title, video_source=0):\n self.window = window\n self.window.title(window_title)\n self.video_source = video_source\n\n fm = Frame(window)\n \n tkinter.Label( fm, text=\"Device Manage:\" ).pack(side = TOP, anchor=\"w\",pady=15)\n\n t = ToggledFrame(fm, text='Device 1:', relief=\"raised\", borderwidth=1)\n t.pack(fill=\"x\", pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.TempurateAir1 = ttk.Label(t.sub_frame, text='Temperature Air (*C ) : ' + str(CONST.Data[0]) )\n self.TempurateAir1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Humidity1 = ttk.Label(t.sub_frame, text='Humidity Air (%) : '+ str(CONST.Data[1]) )\n self.Humidity1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.TemperatureMax1 = ttk.Label(t.sub_frame, text='Temperature 2 (*C ) : '+ str(CONST.Data[2]))\n self.TemperatureMax1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Peresure1 = ttk.Label(t.sub_frame, text='Peresure (bmp) : ' + str(CONST.Data[3]))\n self.Peresure1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.CoGas1 = ttk.Label(t.sub_frame, text='CO Gas (...) : ' + str(CONST.Data[4]))\n self.CoGas1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.SoundNoise1 = ttk.Label(t.sub_frame, text='Sound Noise (db) : ' + str(CONST.Data[5]) )\n self.SoundNoise1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Status1 = ttk.Label(t.sub_frame, text='Status : ' + str(CONST.StrStatus1) )\n self.Status1.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n\n t2 = ToggledFrame(fm, text='Device 2:', relief=\"raised\", borderwidth=1)\n t2.pack(fill=\"x\", pady=2, padx=2, side = TOP, anchor=\"w\")\n\n self.TempurateAir2 = ttk.Label(t2.sub_frame, text='Temperature Air (*C ) : ' + str(CONST.Data2[0]) )\n self.TempurateAir2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Humidity2 = ttk.Label(t2.sub_frame, text='Humidity Air (%) : '+ str(CONST.Data2[1]) )\n self.Humidity2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.TemperatureMax2 = ttk.Label(t2.sub_frame, text='Temperature 2 (*C ) : '+ str(CONST.Data2[2]))\n self.TemperatureMax2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Pressure2 = ttk.Label(t2.sub_frame, text='Pressure (bmp) : ' + str(CONST.Data2[3]))\n self.Pressure2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.CoGas2 = ttk.Label(t2.sub_frame, text='CO Gas (...) : ' + str(CONST.Data2[4]))\n self.CoGas2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.SoundNoise2 = ttk.Label(t2.sub_frame, text='Sound Noise (db) : ' + str(CONST.Data2[5]) )\n self.SoundNoise2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Status2 = ttk.Label(t2.sub_frame, text='Status : ' + str(CONST.StrStatus2) )\n self.Status2.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n \n tkinter.Label( fm, text=\"Finger Manage:\" ).pack(side = TOP, anchor=\"w\", pady=15)\n t3 = ToggledFrame(fm, text='Information Accept: No Accept', relief=\"raised\", borderwidth=1)\n t3.pack(fill=\"x\", pady=2, padx=2, side = TOP, anchor=\"w\")\n\n self.NameText = ttk.Label(t3.sub_frame, text='Full Name: not found ' )\n self.NameText.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.IDText = ttk.Label(t3.sub_frame, text='ID : ')\n self.IDText.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Position = ttk.Label(t3.sub_frame, text='Position : ')\n self.Position.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.Parterment = ttk.Label(t3.sub_frame, text='Part :' )\n self.Parterment.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n\n \n self.btn_snapshot=tkinter.Button(fm, text=\"Add Finger\", width=50, command=self.AndFinger)\n self.btn_snapshot.pack(side = TOP, anchor=\"w\")\n\n fm.pack(side=LEFT, fill=BOTH, expand=True)\n\n # open video source (by default this will try to open the computer webcam)\n self.vid = MyVideoCapture(self.video_source)\n # Create a canvas that can fit the above video source size\n fm2 = Frame(window)\n tkinter.Label( fm2, text=\"Video Manage:\" ).pack(side = TOP, anchor=\"w\") \n \n self.var = StringVar(fm2)\n self.lst = [\"Video Stream\", \"Face detection\", \"Face recognition\", \"None\"]\n self.var.set(self.lst[0]) # initial value\n option = OptionMenu(fm2, self.var,*self.lst )\n option.pack(side = TOP, anchor=\"w\",padx= 2,pady= 2)\n self.canvas = tkinter.Canvas( fm2, width = self.vid.width, height = self.vid.height)\n self.canvas.pack( side = TOP,ipadx= 5,ipady=5,padx= 10,pady= 10, fill = \"x\" , expand=1,anchor=\"w\" )\n \n tkinter.Label( fm2, text=\"Time: \" ).pack(side = TOP, anchor=\"w\",)\n\n t4 = ToggledFrame(fm2, text='Information In/Out:', relief=\"raised\", borderwidth=1)\n t4.pack(fill=\"x\", pady=2, padx=2, side = TOP, anchor=\"w\")\n\n self.NumberIn = ttk.Label(t4.sub_frame, text='Number In : ' + str(CONST.Data[0]) )\n self.NumberIn.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.NumberOut = ttk.Label(t4.sub_frame, text='Number out : '+ str(CONST.Data[1]) )\n self.NumberOut.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.TimeIn = ttk.Label(t4.sub_frame, text='Last Time In : '+ str(CONST.Data[1]) )\n self.TimeIn.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n self.TimeOut = ttk.Label(t4.sub_frame, text='Last Time Out : '+ str(CONST.Data[1]) )\n self.TimeOut.pack(pady=2, padx=2, side = TOP, anchor=\"nw\")\n\n tkinter.Button(fm2, text=\"Information\",command=self.advance).pack(side = TOP, anchor=\"w\")\n\n ttk.Label(fm2, text='Design by: GF Team - email: tuyenathai@gmail.com ').pack(pady=2, padx=2, side = TOP, anchor=\"e\")\n\n fm2.pack(side=LEFT, padx=10,pady= 10)\n self.delay = 5\n self.update()\n\n self.window.mainloop() \n\n def AndFinger(self):\n #response = tkinter.messagebox.askquestion(\"Simple Question\", \"Do you add finger?\")\n #if response == 1 :\n #self.btn_snapshot.pack_forget()\n self.btn_snapshot.config(state=\"disabled\")\n self.Fingerwindow = tk.Toplevel(self.window)\n self.Fingerwindow.title(\"FG-And Finger\")\n display = tk.Label(self.Fingerwindow, text=\"Finger config, insert a new Information !\")\n display.pack(side = TOP) \n fmInformation = Frame (self.Fingerwindow)\n\n L1 = tk.Label(fmInformation, text=\"User Name\")\n L1.grid( row = 0)\n self.V1 = StringVar()\n self.E1 = tk.Entry(fmInformation, textvariable= self.V1, bd =5)\n self.E1.grid(row = 0, column=1)\n L2 = tk.Label(fmInformation, text=\"ID\")\n L2.grid( row = 1)\n self.V2 = StringVar()\n self.E2 = tk.Entry(fmInformation, textvariable= self.V2,bd =5)\n self.E2.grid(row = 1, column=1)\n L3 = tk.Label(fmInformation, text=\"Position\")\n L3.grid( row = 2)\n self.E3 = tk.Entry(fmInformation, bd =5)\n self.E3.grid(row = 2, column=1)\n L4 = tk.Label(fmInformation, text=\"Part\")\n L4.grid( row = 3)\n self.V4 = StringVar()\n self.E4 = tk.Entry(fmInformation,textvariable= self.V4, bd =5)\n self.E4.grid(row = 3, column=1)\n fmInformation.pack(side=TOP, padx=5,pady= 5,anchor = \"w\")\n fmSaveButon = Frame (self.Fingerwindow)\n tk.Button(fmSaveButon, text=\"Save\",command=self.SaveInfomation).pack(side = LEFT, anchor=\"w\",padx=2,pady= 2)\n tk.Button(fmSaveButon, text=\"Exit\",command=self.ExitInfomation).pack(side = LEFT, anchor=\"w\",padx=2,pady= 2)\n fmSaveButon.pack(side=TOP, padx=5,pady= 5,anchor=\"e\")\n #self.Fingerwindow.mainloop()\n\n def SaveInfomation(self):\n self.Fingerwindow.destroy()\n DataSave = [self.V1.get(),self.V2.get(),self.V4.get()]\n DataMaster.WriteData(DataSave)\n #self.btn_snapshot.pack()\n self.btn_snapshot.config(state=\"normal\")\n\n def ExitInfomation(self):\n \tself.Fingerwindow.destroy()\n \t#self.btn_snapshot.pack() \n \tself.btn_snapshot.config(state=\"normal\") \t\n\n def advance(self):\n # Get a frame from the video source\n tkinter.messagebox.showinfo(\"Information\", \"System V1.0, using lora and checkin finger\")\n\n def update(self):\n # Get a frame from the video source\n #print(self.lst.index(self.var.get()))\n try:\n \tret, frame = self.vid.get_frame(self.lst.index(self.var.get()))\n \tif ret:\n \t\tself.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(frame))\n \t\tself.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW)\n except:\n \ttkinter.messagebox.showerror(\"Warning\", \"System can't open camera\")\n\n self.TempurateAir1.config(text = 'Temperature Air (*C ) : ' + str(CONST.Data[0]) )\n self.TempurateAir2.config(text = 'Temperature Air (*C ) : ' + str(CONST.Data2[0]) )\n self.Humidity1.config(text='Humidity Air (%) : '+ str(CONST.Data[1]))\n self.Humidity2.config(text='Humidity Air (%) : '+ str(CONST.Data2[1]))\n self.TemperatureMax1.config(text='Temperature 2 (*C ) : '+ str(CONST.Data[2]))\n self.TemperatureMax2.config(text='Temperature 2 (*C ) : '+ str(CONST.Data2[2]))\n self.Peresure1.config(text='Pressure (bmp) : ' + str(CONST.Data[3]))\n self.Pressure2.config(text='Pressure (bmp) : ' + str(CONST.Data2[3]))\n self.CoGas1.config(text='CO Gas (...) : ' + str(CONST.Data[4]))\n self.CoGas2.config(text='CO Gas (...) : ' + str(CONST.Data2[4]))\n self.SoundNoise1.config(text='Sound Noise (db) : ' + str(CONST.Data[5]))\n self.SoundNoise2.config(text='Sound Noise (db) : ' + str(CONST.Data2[5]))\n self.Status1.config(text='Status : ' + str(CONST.StrStatus1))\n self.Status2.config(text='Status : ' + str(CONST.StrStatus2))\n if CONST.PositionAccess == True:\n DataMaster.ReadData(CONST.PositionNum)\n self.NameText.config(text='Full Name: '+ CONST.FingerData[0])\n self.IDText.config(text='ID : '+ CONST.FingerData[1])\n self.Position.config(text='Position : '+str(CONST.PositionNum))\n self.Parterment.config(text='Part :'+ CONST.FingerData[2])\n CONST.PositionAccess = False\n self.NumberIn.config(text='Number In : '+ str(CONST.DataCheckIn[0]))\n self.NumberOut.config(text='Number out : '+ str(CONST.DataCheckIn[1]))\n self.TimeIn.config(text='Last Time In : Null')\n self.TimeOut.config(text='Last Time Out : Null')\n \n self.window.after(self.delay, self.update)\n\n \n\n\nclass MyVideoCapture:\n def __init__(self, video_source=0):\n # Open the video source\n self.vid = cv2.VideoCapture(video_source)\n if not self.vid.isOpened():\n raise ValueError(\"Unable to open video source\", video_source)\n \n # Get video source width and height\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\n self.faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')\n \n def get_frame(self,DataMode):\n if self.vid.isOpened():\n \tret, img = self.vid.read()\n \tif ret:\n \t\tif DataMode == 0:\n \t\t\treturn (ret, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n \t\telif DataMode == 1:\n \t\t\timg = cv2.flip(img, 1)\n \t\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \t\t\tfaces = self.faceCascade.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=5,minSize=(20, 20))\n \t\t\tfor (x,y,w,h) in faces:\n \t\t\t\tcv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n \t\t\t\troi_gray = gray[y:y+h, x:x+w]\n \t\t\t\troi_color = img[y:y+h, x:x+w]\n \t\t\treturn (ret, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n \t\telse:\n \t\t\treturn (ret, cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n \telse:\n \t\treturn (ret, None)\n else:\n \treturn (ret, None)\n\n # Release the video source when the object is destroyed\n def __del__(self):\n if self.vid.isOpened():\n self.vid.release()\n\nclass ToggledFrame(tk.Frame):\n\n def __init__(self, parent, text=\"\", *args, **options):\n tk.Frame.__init__(self, parent, *args, **options)\n\n self.show = tk.IntVar()\n self.show.set(1)\n\n self.title_frame = ttk.Frame(self)\n self.title_frame.pack(fill=\"x\", expand=1)\n\n ttk.Label(self.title_frame, text=text).pack(side=\"left\", fill=\"x\", expand=1)\n\n self.toggle_button = ttk.Checkbutton(self.title_frame, width=2, text='-', command=self.toggle,\n variable=self.show, style='Toolbutton')\n self.toggle_button.pack(side=\"left\")\n\n self.sub_frame = tk.Frame(self, relief=\"sunken\", borderwidth=1)\n self.sub_frame.pack(fill=\"x\", expand=1)\n\n def toggle(self):\n if bool(self.show.get()):\n self.sub_frame.pack(fill=\"x\", expand=1)\n self.toggle_button.configure(text='-')\n else:\n self.sub_frame.forget()\n self.toggle_button.configure(text='+')\n\n\n\nApp(tkinter.Tk(), \"Factory Guard\")\n","repo_name":"ThaiTuyen/SmartCambusSystem","sub_path":"Source Code/rasp/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":13734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10759673477","text":"from contants import *\n\n\nclass Lexer():\n\n def __init__(self , data):\n self.data = data\n self.tokens = []\n\n def _parseBoolean(self, sIndex, data):\n word = \"\"\n maxSearchLen = 5 # because false is 5 letter long\n \n if data[sIndex] == \"t\":\n maxSearchLen = 4\n \n maxSearchLen += sIndex\n maxSearchLen = min(maxSearchLen , len(data))\n\n for i in range(sIndex , maxSearchLen):\n word += data[i]\n\n if word in BOLEANS:\n return i , word\n\n raise Exception(\"Invalid Boolean Token `\" + word.strip() + \"`\")\n\n def _parseNull(self, sIndex, data):\n word = \"\"\n maxSearchLen = 4 # because false is 5 letter long\n \n maxSearchLen += sIndex\n maxSearchLen = min(maxSearchLen , len(data))\n\n for i in range(sIndex , maxSearchLen):\n word += data[i]\n\n if word == \"null\":\n return i , word\n\n raise Exception(\"Invalid Null Token `\" + word.strip() + \"`\")\n\n def _parseNumber(self, sIndex , data):\n dataLenght = len(data)\n totalDots = 0\n number = \"\"\n expecting = []\n for i in range(sIndex , dataLenght): \n letter = data[i]\n\n if letter in DIGITS + [\".\" , \"-\" , \"e\" , \"+\"]:\n\n if letter in expecting:\n number += letter\n expecting = []\n elif letter == \"-\":\n if len(number) != 0 : raise Exception(\"Invalid Number token, near `\" + data[i] + \"` , \" + data[sIndex:sIndex+i])\n number += letter\n \n elif letter == \".\":\n if totalDots == 1: raise Exception(\"Invalid Number token, near `\" + data[i] + \"` , \" + data[sIndex:sIndex+i])\n totalDots += 1\n number += letter\n elif letter == \"e\":\n number += letter\n expecting = [\"+\" , \"-\"]\n else:\n number += letter\n else:\n if len(number) != 0:\n if number[0] == \"0\" and totalDots == 0:\n raise Exception(\"Invalid Number token, near `\" + data[i] + \"` , \" + data[sIndex:sIndex+i])\n return i - 1 , number\n raise Exception(\"Invalid Token, empty number \" + data[sIndex:sIndex+20])\n\n raise Exception(\"Invalid Token Number \" + data[sIndex:sIndex+20])\n\n\n def _parseString(self, sIndex , data):\n\n assert data[sIndex] == \"\\\"\" , \"String must start with a \\\"\"\n \n dataLength = len(data)\n word = data[sIndex]\n sIndex += 1\n \n for i in range(sIndex , dataLength):\n letter = data[i]\n word += data[i]\n \n if data[i-1:i+1] == \"\\\\\\\"\": \n pass\n elif letter == \"\\\"\":\n return i , word \n\n raise Exception(\"Invalid Token string \" + data[sIndex : sIndex + 10])\n\n def sdf(self, word):\n if word == \"true\" or word == \"false\":\n return True\n return False\n\n\n def printTokens(self):\n for token in self.tokens:\n print(token)\n\n def lexer(self):\n\n dataLength = len(self.data)\n sIndex = 0 # startIndex\n index = 0 \n commentMode = False\n\n while index < dataLength:\n # print(\"Index is \" , index , \" / \" , dataLength)\n letter = self.data[index]\n word = self.data[sIndex:index]\n\n # print(\"Letter is `\" , letter , \"`\" , commentMode , letter == \"\\n\")\n\n if letter in [\" \" , \"\\n\" , \"\\t\"]:\n pass\n # elif commentMode:\n # pass\n # comment end \n # elif letter == \"\\n\" and commentMode:\n # commentMode = False\n\n # elif letter == \"*\" and commentMode:\n # if index+1 < dataLength and self.data[index+1] == \"/\":\n # commentMode = False\n # else: raise Exception(\"Error: Invalid Token `\" + letter.strip() + \"`\")\n\n # comment start\n # elif letter == \"/\":\n # if index+1 < dataLength and (self.data[index+1] == \"/\" or self.data[index:index+2] == \"/*\"):\n # commentMode = True\n # else: raise Exception(\"Error: Invalid Token `\" + letter.strip() + \"`\")\n\n elif letter == \"{\":\n self.tokens.append(Token(letter , DEFINED_TOKENS[\"lbrack\"] , index , index+1))\n elif letter == \"}\":\n self.tokens.append(Token(letter , DEFINED_TOKENS[\"rbrack\"] , index , index+1))\n elif letter in \"[\":\n self.tokens.append(Token(letter , DEFINED_TOKENS[\"lsqbrack\"] , index , index+1))\n elif letter in \"]\":\n self.tokens.append(Token(letter , DEFINED_TOKENS[\"rsqbrack\"] , index , index+1))\n elif letter in [\":\"]:\n self.tokens.append(Token(letter , DEFINED_TOKENS[\"colon\"] , index , index+1))\n elif letter in [\",\"]:\n self.tokens.append(Token(letter , DEFINED_TOKENS[\"comma\"] , index , index+1))\n\n elif letter in [\"t\" , \"f\"]:\n eIndex , word = self._parseBoolean(index, self.data)\n self.tokens.append(Token(word , DEFINED_TOKENS[\"boolean\"] , index , eIndex))\n index = eIndex\n elif letter == \"n\":\n eIndex , word = self._parseNull(index, self.data)\n self.tokens.append(Token(word , DEFINED_TOKENS[\"null\"] , index , eIndex))\n index = eIndex\n\n elif letter in DIGITS + [\"-\"]:\n eIndex , word = self._parseNumber(index, self.data)\n self.tokens.append(Token(word , DEFINED_TOKENS[\"number\"] , index , eIndex))\n index = eIndex\n \n elif letter == \"\\\"\":\n eIndex , word = self._parseString(index, self.data)\n self.tokens.append(Token(word , DEFINED_TOKENS[\"string\"] , index , eIndex))\n index = eIndex\n else:\n raise Exception(\"Invalid Token near `\" + self.data[index] + \"` \" + self.data[index:index+10])\n \n index += 1","repo_name":"ayushkatoch98/JSONParser","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":6320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17137173899","text":"from firebase_admin import credentials, initialize_app, db\nfrom warnings import warn\nimport api_testing as api\nfrom inspect import currentframe, getouterframes\nfrom datetime import datetime, timedelta\nimport math\n\n\ncred = credentials.Certificate(\"./bazaar-96e54-firebase-adminsdk-zac07-5bfa8beefa.json\")\ndefault_app = initialize_app(cred, {'databaseURL': 'https://bazaar-96e54-default-rtdb.firebaseio.com/'})\n\nhead = db.reference('/')\nstores = db.reference('/Stores/')\nusers = db.reference('/Users/')\nposts = db.reference('/Posts/')\nfull_bar = 1000\n\n# the fields that must be in every store\nstore_fields = {'Classification', 'Industry', 'Location', 'Name', 'Image', 'Link'}\npurchase_fields = {'description', 'orderId', 'price', 'productId', 'productName', 'productType', 'quantity', 'retailer'}\n\n\ndef check_data(func):\n def wrapper(*args, **kwargs):\n ref = kwargs['ref'] if 'ref' in kwargs else args[1]\n data = kwargs['data'] if 'data' in kwargs else args[0]\n\n if getouterframes(currentframe(), 2)[1][3] == 'add_order':\n check = purchase_fields\n elif ref is stores:\n check = store_fields\n else:\n check = []\n\n for name, info in data.items():\n if ref is stores:\n data[name]['Name'] = name\n\n for field in check:\n if field not in info:\n warn(f'NO {field} WAS GIVEN -- FILLED IN NULL')\n data[name][field] = \"NULL\"\n return func(*args, **kwargs)\n return wrapper\n\n@check_data\ndef overwrite(data, ref=head):\n '''\n Overwrites database at given reference with given data\n '''\n ref.set(data)\n\ndef get(ref=head, sort_by=None):\n '''\n Returns the database's contents at the given reference\n If key is not None, then orders contents by key\n '''\n if sort_by is None:\n return ref.get()\n else:\n return ref.order_by_child(sort_by).get()\n\ndef getUserIds():\n return set(get(ref=users).keys())\n\ndef getStores():\n return set(get(ref=stores).keys())\n\n@check_data\ndef add(data, ref=head):\n '''\n Adds data to database at given reference\n NOTE: using an existing key will overwrite that existing data\n '''\n for name,info in data.items():\n ref.child(name).set(info)\n\ndef add_order(orderId):\n customerId, products = api.parse_order(api_order(orderId, 'GET'))\n if customerId in getUserIds():\n for prodId,prodInfo in products.items():\n add({prodId: prodInfo}, ref=db.reference(f'/Users/{customerId}/Purchases/'))\n else:\n warn(f'CUSTOMER \"{customerId}\" DOES NOT EXIST -- ORDER {orderId} NOT ADDED')\n\ndef update(key, mapping, ref=head):\n '''\n Updates values at key with the given mapping\n '''\n ref.child(key).update(mapping)\n\ndef remove(key, ref=head):\n '''\n Removes key at the given reference\n '''\n ref.child(key).set({})\n\ndef api_order(input, method, debug=False):\n if method == 'POST':\n # for each purchase:\n ## modifierCode contains 'E' if enviornmentally friendly\n ## extendedAmount is the score of the order (number of attributes it checks off)\n if not debug:\n start = datetime.now()\n time = start\n\n while time <= start:\n code = get(db.reference('/Scanned/'))\n time = datetime.strptime(code['Time'], '%Y/%m/%d %H:%M:%S')\n\n input['customer'] = {'id': code['ID']}\n\n allStores = get(stores)\n points = 1\n num_env, num_clothes = 0,0\n\n attrs = (allStores[input['owner']]['Classification'] if input['owner'] in allStores else '')\n\n for prod in input['orderLines']:\n if 'E' in prod['modifierCode']: num_env += 1\n if prod['itemType'] == 'Clothes': num_clothes += 1\n\n prod_attrs = attrs + prod['modifierCode']\n score = len(prod_attrs)\n prod_points = prod['unitPrice']*math.log(score + 1)\n\n points += prod_points\n prod['extendedAmount'] = prod_points\n prod['priceModifiers'] = [{}]\n prod['priceModifiers'][0]['amount'] = score\n prod['priceModifiers'][0]['description'] = prod_attrs\n\n userId = input['customer']['id']\n user = get(users)[userId]\n if 'Points' in user: points += user['Points']\n\n\n points += check_posted(userId)\n update(userId, {'Points': points}, users)\n update_badges(userId, num_env, num_clothes)\n reorder_posts()\n\n return api.api_order(input, method)\n\n\ndef update_badges(userId, num_env, num_clothes):\n '''\n Updates the user's progress towards each of the badges\n '''\n user = get(users)[userId]\n points = user['Points']\n\n if 'Badges' in user:\n num_clothes += user['Badges']['clothes_collector']['num_clothes_purchases']\n num_env += user['Badges']['planet_saver']['num_env_purchases']\n\n update('Badges', {\n 'tree_hugger': {'achieved': num_env >= 1},\n 'stronger_together': {'achieved': points >= full_bar},\n 'planet_saver': {'achieved': num_env >= 5, 'num_env_purchases': num_env},\n 'clothes_collector': {'achieved': num_clothes >= 10, 'num_clothes_purchases': num_clothes},\n 'best_friends': {'achieved': len(user['Friends'] if 'Friends' in user else []) >= 5},\n 'going_bazaar': {'achieved': points >= full_bar*10}\n }, db.reference(f'/Users/{userId}/'))\n\ndef check_posted(userId):\n '''\n Returns bonus points for when users post about their purchases\n '''\n user = get(users)[userId]\n bonus = 0\n\n if 'Purchases' in user:\n for purchase_id,purchase in user['Purchases'].items():\n if purchase['posted'] and not purchase['doubled']:\n bonus += purchase['points']\n update(purchase_id, {'doubled': True}, db.reference(f'/Users/{userId}/Purchases/'))\n\n return bonus\n\ndef reorder_posts():\n '''\n Updates the order of the posts in the database\n '''\n allPosts = get(db.reference('/Posts/'))\n for postId,info in allPosts.items():\n update(postId,\n {'postRanking': 1 / (info['likes']/5 + \\\n max(10 - (datetime.now() - datetime.strptime(info['time'], '%Y/%m/%d %H:%M:%S'))/timedelta(days=1), 0) + \\\n info['score']*2.5)\n }, posts)\n\n\n\n\n# data = {\n# 'Beautiful Restaurant': {\n# 'Classification': '??',\n# 'Industry': 'Restaurant',\n# 'Location': 'Atlanta'\n# }\n# }\n\n\n# update('Solar Nails', {'Classification': 'Minority-'}, stores)\n# update('Stores/Solar Nails', {'Classification': 'Minority-owned'}, head)\n\n# add({}, stores)\n\n# print(get(stores))\n\n# print(get(stores, \"Name\"))\n\n# remove('Beautiful Restaurant', stores)\n\n\n\n\n# for k,v in orderDetails.items():\n# print(f'{k}: {v}')\n\n\n# print(api_order('12601857220537095966'))\n# add_order('12940712621800496162')\n# add_order('11669656148723416957')\n\n\ndebug = True\ntest_dict = {\n 'comments': 'APEX Museum: Bed sheets, Stuffed Animal',\n 'orderLines': [\n {\n 'description': 'Rainbow Bed sheets with Silk linen tops and Tao\\'s comforter mattress',\n 'itemType': 'House',\n 'productId': {'value': 'Rainbow bed sheets'},\n 'quantity': {'value': 2},\n 'unitPrice': 0,\n 'modifierCode': 'E'\n },\n {\n 'description': 'Bat Stuffed Animal',\n 'itemType': 'Kids',\n 'productId': {'value': 'Bat Stuffed Animal 5ft x 2ft'},\n 'quantity': {'value': 1},\n 'unitPrice': 0,\n 'modifierCode': ''\n }\n ],\n 'owner': 'APEX Museum',\n # 'payments': [{'amount': 43.50,}],\n}\n\nif debug: test_dict['customer'] = {'id': '0kKmboE80YhrRUS5YCb3J4OjD802'}\n\n\n# print(api_order(test_dict, 'POST'))\n# print(api_order('13139740844105295507', 'GET')['comments'])\n# add_order(api_order(test_dict, 'POST', debug=debug))\n# api.parse_order(api_order('12145144029130432348', 'GET'))\n# update_badges('0kKmboE80YhrRUS5YCb3J4OjD802', 1, 5)\n\n# print(type(get(users)['0kKmboE80YhrRUS5YCb3J4OjD802']['Purchases']['764395a1c1dc4b51855610dd615b32cd']['posted']))\n# print(check_posted('0kKmboE80YhrRUS5YCb3J4OjD802'))\n\n# reorder_posts()\n# print(get(posts, 'postRanking').keys())\n\n\n\n\n","repo_name":"markostep/Bazaar","sub_path":"backend/firebase_testing.py","file_name":"firebase_testing.py","file_ext":"py","file_size_in_byte":8357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41411392198","text":"inp1 = \"\"\"5 3\n1 2 100\n2 5 100\n3 4 100\n\"\"\"\n\ninp2 = open(\"test.txt\", 'r')\ninp2 = inp2.readlines()\nx = []\nfor i in inp2:\n\ti = i.strip()\n\ti = list(map(int,i.split()))\n\tx.append(i)\n\ninp2 = x\ninp1 = list(map(str, inp1.strip().split('\\n')))\n\ny =[]\ndef parse(inp1):\n\tfor i in inp1:\n\t\ti = list(map(int, i.split()))\n\t\ty.append(i)\n\n\tinp1 = y\n\treturn inp1\n\ninp1 = parse(inp1)\nprint(len(inp2[1:]))\n\ndef addValue(arr, val):\n\ttemp = []\n\tfor i in arr:\n\t\ti += val\n\t\ttemp.append(i)\n\treturn temp\n\ndef findMax(arr):\n\ttemp = -1\n\tfor ele in arr:\n\t\tif temp < ele:\n\t\t\ttemp = ele\n\treturn temp\n\ndef arrayManipulation(n, queries):\n\tarray = [0 for i in range(n+1)]\n\tfor query in queries:\n\t\tif(len(query) == 3):\n\t\t\tstart_from = query[0]\n\t\t\tgo_to = query[1]+1\n\t\t\tinsertion = query[2]\n\t\t\tarray[start_from:go_to] = [x+insertion for x in array[start_from:go_to]]\n\t\t\t# array[start_from:go_to] = addValue(array[start_from:go_to], insertion)\n\t\t\t#print(array[1:])\n\t\telif(len(query) == 2):\n\t\t\tindex = query[0]\n\t\t\tinsertion = query[1]\n\t\t\tarray[index] += insertion\n\t\t\tprint(len(query),index,insertion)\n\treturn findMax(array[1:n+1])\n\nprint(arrayManipulation(inp2[0][0],inp2[1:]))\n","repo_name":"Digit4/randoms","sub_path":"array_manipulation.py","file_name":"array_manipulation.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30618103514","text":"# -*- coding: utf-8 -*-\n\"\"\"\nContributors:\n - Auriane Blarre\n\"\"\"\nimport re\n\n\ndef write_line(time, phase):\n return \"\"\" \\n\"\"\".format(int(time), phase)\n\ndef modify_offset(theta_incoming, theta_outgoing, gi_incoming, gi_outgoing, C, trans_time=5):\n # Transition: before light turns red from green it is yellow for 3s\n text = \"\"\"\"\"\"\n t0 = 0\n\n # First Phase\n t1 = min(theta_incoming, theta_outgoing)\n phase = \"GGgsrrrGGgsrrr\"\n if t1 - t0 > 0:\n text += write_line(t1 - t0, phase)\n\n # Second Phase\n t2 = max(theta_incoming, theta_outgoing)\n if t2 == theta_outgoing:\n # incoming is green\n # down transitions yellow\n trans_phase = \"GGgsrrryygsrrr\"\n main_phase = \"GGgsrrrsrrGGGg\"\n if t2 == theta_incoming:\n # outgoing is green\n # up transitions yellow\n trans_phase = \"yygsrrrGGgsrrr\"\n main_phase = \"srrGGGgGGgsrrr\"\n\n if t2 - t1 - trans_time > 0:\n text += write_line(trans_time, trans_phase)\n text += write_line(t2 - t1 - trans_time, main_phase)\n\n # Third Phase\n # Both are green\n t3 = min(theta_incoming + gi_incoming, theta_outgoing + gi_outgoing)\n if t2 == theta_outgoing:\n # up transitions yellow\n trans_phase = \"yygsrrrsrrGGGg\"\n if t2 == theta_incoming:\n # down transitions yellow\n trans_phase = \"GGgsrrryygsrrr\"\n main_phase = \"srrGGGgsrrGGGg\"\n if t3 - t2 - trans_time > 0:\n text += write_line(trans_time, trans_phase)\n text += write_line(t3 - t2 - trans_time, main_phase)\n\n # Fourth Phase\n t4 = max(theta_incoming + gi_incoming, theta_outgoing + gi_outgoing)\n if t4 == theta_outgoing + gi_outgoing:\n # incoming is red\n trans_phase = \"srrGGGgGGgyyyg\"\n main_phase = \"srrGGGgGGgsrrr\"\n if t4 == theta_incoming + gi_incoming:\n # outgoing is red\n trans_phase = \"GGgyyygsrrGGGg\"\n main_phase = \"GGgsrrrsrrGGGg\"\n\n if t4 - t3 - trans_time > 0:\n text += write_line(trans_time, trans_phase)\n text += write_line(t4 - t3 - trans_time, main_phase)\n\n # Fifth Phase\n # Both are red\n t5 = C\n if t4 == theta_outgoing + gi_outgoing:\n trans_phase = \"GGgyyyyGGgsrrr\"\n if t2 == theta_incoming:\n trans_phase = \"GGgsrrrsrryyyy\"\n main_phase = \"GGgsrrrGGgsrrr\"\n\n if t5 - t4 - trans_time > 0:\n text += write_line(trans_time, trans_phase)\n text += write_line(t5 - t4 - trans_time, main_phase)\n return text\n\ndef modify_offsets(thetas_incoming, thetas_outgoing, gis_incoming, gis_outgoing, C, trans_time=3,\n network_path=\"quickstart.net.xml\"):\n file = open(network_path, \"r\")\n network = file.read()\n file.close()\n\n id = 0\n for theta_incoming, theta_outgoing, gi_incoming, gi_outgoing in zip(thetas_incoming, thetas_outgoing, gis_incoming, gis_outgoing):\n replacement = modify_offset(theta_incoming, theta_outgoing, gi_incoming, gi_outgoing, C, trans_time=trans_time)\n\n start = \"\"\"\\n\"\"\".format(id)\n end = \"\"\n match = re.match(r'(.+%s\\s*).+?(\\s*%s.+)' % (start, end), network, re.DOTALL)\n network = match.group(1) + replacement + match.group(2)\n id += 1\n\n file_handle = open(network_path, \"w\")\n file_handle.write(network)\n file_handle.close()\n\n return network\n\n\nif __name__ == '__main__':\n thetas_incoming = [20 for i in range(8)]\n thetas_outgoing = [20 for i in range(8)]\n gis_incoming = [50 for i in range(8)]\n gis_outgoing = [50 for i in range(8)]\n C = 90\n\n modify_offsets(thetas_incoming, thetas_outgoing, gis_incoming, gis_outgoing, C, trans_time=3,\n network_path=\"quickstart.net.xml\")","repo_name":"aurianeb/SMATS_Capstone","sub_path":"Artery_With_OD/run_sumo.py","file_name":"run_sumo.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41614680948","text":"from tkinter import*\nfrom tkinter import filedialog\nfrom Tabla import*\nfrom Igralci import*\nimport ast\nimport argparse\nimport logging\nimport re\ntry:\n import winsound\nexcept:\n pass\n\n\n# Zacetne vrednosti.\nBELI = \"Beli\"\nCRNI = \"Crni\"\nVELIKOST = 6\nMINIMAX_GLOBINA = 2\nALFABETA_GLOBINA = 2\n\n\n# Razred, ki definira graficni vmesnik.\nclass Crnobelo():\n # Ustvarimo tag, da se bomo lahko kasneje sklicevali.\n TAG_KROG = 'krog'\n TAG_POTEZA = 'poteza'\n TAG_NAMIG = 'namig'\n TAG_ZP = 'zadnja' #poteza\n\n\n def __init__(self, master, velikost=VELIKOST):\n self.BELI = None\n self.CRNI = None\n self.igra = None\n self.zvocnik = True\n self.NAMIG = False\n\n # Ustvarimo napis, ki nas obvesca o dogajanju. Sporoča, kaj se dogaja z igralcem racunalnik.\n self.napis = StringVar()\n Label(master, textvariable=self.napis).grid(row=0, column=0)\n\n # Ustvarimo napis, ki nas obvesca o dogajanju. Sporoča, kdo je na vrsti.\n self.napis2 = StringVar()\n Label(master, textvariable=self.napis2).grid(row=1, column=0)\n \n \n # Nastavi velikost.\n self.velikost = velikost\n\n # Ustvari canvas.\n self.canvas = Canvas(master, width=100*(self.velikost+1), height=100*(self.velikost +1))\n\n self.canvas.grid(row=2, column=0, columnspan=2)\n\n # Na canvas narise zacetno polje.\n self.narisi()\n\n # Povezemo klik z dogodkom.\n self.canvas.bind(\"\", self.plosca_klik)\n \n # Gumb za namig.\n Button(master, text= \"Namig\", command = lambda: self.pobarvaj_namig()).grid(row = 0, column = 1, rowspan = 2)\n \n # Glavni menu.\n menu = Menu(master)\n master.config(menu=menu)\n \n # Velikosti okna ne moremo spreminjati.\n master.resizable(width=False, height=False) \n\n # Dodamo moznosti v menu.\n file_menu = Menu(menu)\n menu.add_cascade(label=\"Datoteka\", menu=file_menu)\n file_menu.add_command(label=\"Nova igra\", command=self.nova_igra)\n file_menu.add_command(label=\"Shrani\", command=self.shrani)\n file_menu.add_command(label=\"Odpri\", command=self.odpri)\n file_menu.add_separator()\n file_menu.add_command(label=\"Izhod\", command=master.destroy)\n\n settings_menu = Menu(menu)\n menu.add_cascade(label=\"Velikost\", menu=settings_menu)\n settings_menu.add_command(label=\"5x5\", command= lambda: self.nova_igra(None, None, 5))\n settings_menu.add_command(label=\"6x6\", command= lambda: self.nova_igra(None, None, 6))\n settings_menu.add_command(label=\"7x7\", command= lambda: self.nova_igra(None, None, 7))\n settings_menu.add_command(label=\"8x8\", command= lambda: self.nova_igra(None, None, 8))\n settings_menu.add_command(label=\"9x9\", command= lambda: self.nova_igra(None, None, 9))\n\n settings_menu = Menu(menu)\n menu.add_cascade(label=\"Igralci\", menu=settings_menu)\n submenu = Menu(menu)\n settings_menu.add_command(label=\"Clovek - Clovek\", command= lambda: self.nova_igra(Clovek(self), Clovek(self), None))\n settings_menu.add_cascade(label='Racunalnik', menu=submenu, underline = 0)\n\n \n submenu.add_command(label=\"Clovek - Random\", command= lambda: self.nova_igra(Clovek(self), Racunalnik(self, Nakljucje()), None))\n submenu.add_command(label=\"Clovek - Racunalnik Minimax\", command= lambda: self.nova_igra(Clovek(self), Racunalnik(self, Minimax(MINIMAX_GLOBINA )), None))\n submenu.add_command(label=\"Clovek - Racunalnik Alfa-beta\", command= lambda: self.nova_igra(Clovek(self), Racunalnik(self, Alfabeta(ALFABETA_GLOBINA)), None))\n submenu.add_command(label=\"Racunalnik Minimax - Racunalnik Alfa-beta\", command= lambda: self.nova_igra(Racunalnik(self, Minimax(MINIMAX_GLOBINA)), Racunalnik(self, Alfabeta(ALFABETA_GLOBINA)), None))\n submenu.add_command(label=\"Racunalnik Alfa-beta - Racunalnik Alfa-beta\", command= lambda: self.nova_igra(Racunalnik(self, Alfabeta(ALFABETA_GLOBINA)), Racunalnik(self, Alfabeta(ALFABETA_GLOBINA)), None))\n\n settings_menu = Menu(menu)\n menu.add_cascade(label=\"Zvok\", menu=settings_menu)\n settings_menu.add_command(label=\"Vklopi zvok\", command = lambda: self.zvok(True))\n settings_menu.add_command(label=\"Izklopi zvok\", command = lambda: self.zvok(False))\n\n settings_menu = Menu(menu)\n menu.add_cascade(label=\"Barva ozadja\", menu=settings_menu)\n settings_menu.add_command(label=\"Siva\", command = lambda: self.canvas.configure(background='light slate gray'))\n settings_menu.add_command(label=\"Modra\", command = lambda: self.canvas.configure(background='light sky blue'))\n settings_menu.add_command(label=\"Zelena\", command = lambda: self.canvas.configure(background='pale green'))\n settings_menu.add_command(label=\"Rumena\", command = lambda: self.canvas.configure(background='light goldenrod'))\n settings_menu.add_command(label=\"Brez barve\", command = lambda: self.canvas.configure(background='gray94'))\n\n menu.add_command(label=\"Pomoc\", command = lambda: pomoc())\n\n # Funkcija, ki odpre novo okno. Vsebina je pomoč.\n def pomoc():\n window = Toplevel(root)\n label = Label(window, text = \"\"\"Navodila:\nCilj igre:\nIgro igrata dva igralca na kvadratni sahovnici, katere velikost se da nastaviti v kaskadi velikost.\nZacne igralec, ki polaga bele kroge, nato igralca izmenicno igrata dokler enemu od njih ne zmanjka moznih potez. Takrat je igre konec, \nzmagal je igralec, ki je zadnji opravil potezo. Barva zmagovalca se izpise nad igralno plosco.\n\nPravila igre:\nIgralec lahko takrat ko je na potezi igra svoj krog na tista polja, za katera je izpolnjen naslednji pogoj: Na nobenem od sosednjih polj\nni nasprotnikovega kroga. Pri tem se za sosednja polja stejejo polja levo, desno, nad in pod poljem (ce so seveda znotraj sahovnice). Če\nigralec odigra napačno potezo, ga uporabniski vmesnik na to opozori z napisom \"Neveljavna poteza!\" nad sahovnico. Vsakic ko je na potezi\ncloveski igralec, se mozne poteze obarvajo s sivo.\n\nIzbira igralcev in Namig:\nUporabnik lahko izbira med igralci v kaskadi igralci. Moznih je vec izbir, uporabnik lahko izbere katerakoli dva izmed treh razlicnih\nracunalniskih igralcev in enim cloveskim. Ko je na vrsti racunalniski igralec, se nad sahovnico izpise \"Razmisljam.\". Cloveski igralec ima\nmoznost, da uporabi namig racunalnika s klikom na gumb \"Namig\". Po kliku zacne racunalnik razmisljati, ko izracuna potezo, jo na sahovnici\noznaci z rdeco.\n\nNova igra:\nV kaskadi \"Datoteka\" lahko igralec zacne novo igro. Pri tem se zamenja vrstni red igranja. Na primer: ce je v prejsnji igri igralec 1 ena\nbil beli (in s tem zacel), je sedaj beli njegov nasprotnik (torej zacne on).\n\nZvok:\nVsakic ko se opravi poteza, se zaslisi ton nizke frekvence. Ko je igre konec pa ton visje frekvence. Uporabnik lahko v kaskadi \"Zvok\"\nizklopi oziroma znova vklopi zvocne efekte.\nZvok deluje samo v operacijskem sistemu Windows.\n\nBarva ozadja:\nV kaskadi \"Barva ozadja\" lahko uporabnik izbira barvo ozadja. Izbira lahko med sivo, modro, zeleno in rumeno, lahko pa tudi povrne barvo\nna prvotno.\n\nShrani in odpri:\nV kaskadi \"Datoteka\" ima uporabnik moznost, da igro s klikom na \"Shrani\" shrani v tekstovno datoteko, ki jo sam poimenuje. Shranjeno igro\nlahko kadarkoli zopet nadaljuje s klikom na \"Odpri\" in ustrezno izbiro datoteke.\n\nIzhod:\nS klikom na \"Izhod\" v kaskadi \"Datoteka\" uporabnik zapusti igro.\"\"\")\n \n label.pack(side = \"top\", fill = \"both\")\n\n logging.debug(\"Velikost: {0}.\".format(self.velikost))\n \n self.zacni_igro()\n\n # Funkcija za risanje sahovnice.\n def narisi(self):\n for i in range(self.velikost+1):\n self.canvas.create_line((50+i*100*6/(self.velikost)),50,(50+i*100*6/(self.velikost)),650)\n self.canvas.create_line(50,(50+i*100*6/(self.velikost)),650,(50+i*100*6/(self.velikost)))\n\n # Funkcija, ki zacne igro.\n def zacni_igro(self, beli=None, crni=None):\n if not beli:\n beli = Clovek(self)\n if not crni:\n crni = Clovek(self)\n\n logging.debug(\"Beli:{0}, Crni:{1}\".format(beli,crni))\n \n self.igra = Tabla(self.velikost)\n self.nova_igra(beli, crni)\n\n # Funkcija, ki ustvari novo igro.\n def nova_igra(self, beli=None, crni=None, velikost=None):\n self.canvas.delete(Crnobelo.TAG_NAMIG)\n self.canvas.delete(Crnobelo.TAG_ZP)\n self.canvas.delete(Crnobelo.TAG_POTEZA)\n self.NAMIG = False\n\n logging.debug(\"Nova igra\")\n self.prekini_igralce()\n\n if velikost:\n self.velikost = velikost\n self.canvas.delete(\"all\")\n self.narisi()\n else:\n self.canvas.delete(Crnobelo.TAG_KROG)\n\n if beli and crni:\n self.BELI = beli\n self.CRNI = crni\n\n else:\n self.BELI, self.CRNI = self.CRNI, self.BELI\n\n logging.debug(\"Velikost: {0}.\".format(self.velikost))\n\n #Ustvarimo matriko z zacetnimi vrednostmi\n self.igra.matrika = [[[True, True, None] for _ in range(self.velikost)] for _ in range(self.velikost)]\n\n \n self.napis.set(\"\")\n\n self.igra.na_vrsti = BELI\n self.napis2.set(\"Na vrsti je beli.\")\n \n logging.debug(\"Na vrsti:{0}\".format(self.igra.na_vrsti))\n logging.debug(\"Beli: {0}, Crni: {1}\".format(self.BELI, self.CRNI))\n \n #Zacnemo igro\n self.BELI.igraj()\n\n # Funkcija, ki preda dogodek na plosci razredu igralca, ki je storil to potezo.\n def plosca_klik(self, event):\n # Če kliknemo medtem, ko je vklopljen namig, se ne zgodi nič.\n if self.NAMIG:\n pass\n # Predamo informacijo naprej.\n else:\n if self.igra.na_vrsti == BELI:\n self.BELI.klik(event)\n elif self.igra.na_vrsti == CRNI:\n self.CRNI.klik(event)\n else:\n pass\n\n # Funkcija, ki glede na igralca na vrsti in na njegovo dejanje naredi potezo ali pobarva namig, če je vklopljen.\n def izberi(self, xy):\n x = xy[0]\n y = xy[1]\n \n logging.debug(\"Preverim, ce je konec igre.\")\n \n # Preveri, ce je konec igre. V primeru, da je konec, nocemo vec dogajanja na plosci.\n\n if not self.igra.je_konec():\n \n # Pobarvamo namig.\n if self.NAMIG:\n self.canvas.create_rectangle((x * 100* 6/(self.velikost)+ 50), (y *100* 6/(self.velikost)+ 50), (x * 100* 6/(self.velikost)+ 50+100*6/(self.velikost)), (y *100* 6/(self.velikost) + 50+100*6/(self.velikost)), fill=\"indian red\", tag=Crnobelo.TAG_NAMIG)\n self.NAMIG = False\n\n # Naredimo potezo.\n else:\n self.napis.set(\"\")\n poteza = self.igra.povleci_potezo(xy)\n \n # Poteza je neveljavna. Poskusimo ponovno\n if poteza is None:\n self.napis.set(\"Neveljavna poteza!\")\n if self.igra.na_vrsti == BELI:\n self.BELI.igraj()\n elif self.igra.na_vrsti == CRNI:\n self.CRNI.igraj()\n else:\n assert False\n\n # Poteza je veljavna.\n else:\n if self.igra.na_vrsti == CRNI:\n self.canvas.delete(Crnobelo.TAG_ZP)\n self.canvas.create_oval((x * 100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (y *100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (x * 100* 6/(self.velikost)+ 50-10* 6/(self.velikost)+100*6/(self.velikost)), (y *100* 6/(self.velikost) + 50-10* 6/(self.velikost)+100*6/(self.velikost)), fill = \"white\", tag=Crnobelo.TAG_KROG)\n self.canvas.create_oval((x * 100* 6/(self.velikost)+ 50+45* 6/(self.velikost)), (y *100* 6/(self.velikost)+ 50+45* 6/(self.velikost)), (x * 100* 6/(self.velikost)+ 50-45* 6/(self.velikost)+100*6/(self.velikost)), (y *100* 6/(self.velikost) + 50-45* 6/(self.velikost)+100*6/(self.velikost)), fill = \"blue\", tag=Crnobelo.TAG_ZP)\n self.napis2.set(\"Na vrsti je crni.\")\n \n else:\n self.canvas.delete(Crnobelo.TAG_ZP)\n self.canvas.create_oval((x * 100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (y *100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (x * 100* 6/(self.velikost)+ 50-10* 6/(self.velikost)+100*6/(self.velikost)), (y *100* 6/(self.velikost) + 50-10* 6/(self.velikost)+100*6/(self.velikost)), fill = \"black\", tag=Crnobelo.TAG_KROG)\n self.canvas.create_oval((x * 100* 6/(self.velikost)+ 50+45* 6/(self.velikost)), (y *100* 6/(self.velikost)+ 50+45* 6/(self.velikost)), (x * 100* 6/(self.velikost)+ 50-45* 6/(self.velikost)+100*6/(self.velikost)), (y *100* 6/(self.velikost) + 50-45* 6/(self.velikost)+100*6/(self.velikost)), fill = \"blue\", tag=Crnobelo.TAG_ZP)\n self.napis2.set(\"Na vrsti je beli.\")\n\n # Ob odigrani potezi: beep!\n if self.zvocnik:\n try: winsound.Beep(150, 75)\n except: pass\n \n # Ce je igre konec.\n if self.igra.je_konec():\n self.igra.na_vrsti = nasprotnik(self.igra.na_vrsti)\n self.napis2.set(\"\")\n self.napis.set(\"Konec igre! Zmagal je {0}!\".format(self.igra.na_vrsti))\n if self.zvocnik:\n try: winsound.Beep(500, 150)\n except: pass\n\n # Igre ni konec, nadaljujemo.\n else:\n\n if self.igra.na_vrsti == BELI:\n self.BELI.igraj()\n elif self.igra.na_vrsti == CRNI:\n self.CRNI.igraj()\n else:\n assert False\n\n logging.debug(\"{0}\".format(self.igra.veljavne_poteze()))\n\n # Poklice funkcijo izberi z alfabeta in pobarva namig.\n def pobarvaj_namig(self):\n \n self.NAMIG = True\n if self.igra.na_vrsti == BELI and ('Clovek' in (re.findall(r'\\.(.+?)\\s', str(self.BELI)))):\n Racunalnik(self, Alfabeta(ALFABETA_GLOBINA)).igraj()\n self.BELI.igraj()\n \n elif self.igra.na_vrsti == CRNI and ('Clovek' in (re.findall(r'\\.(.+?)\\s', str(self.CRNI)))):\n Racunalnik(self, Alfabeta(ALFABETA_GLOBINA)).igraj()\n self.CRNI.igraj()\n\n # Namig deluje, če ga poklice človek\n else:\n self.NAMIG = False\n pass\n \n # Na canvasu pobarva veljavne poteze. \n def pobarvaj_poteze(self):\n poteze = self.igra.veljavne_poteze()\n for i in poteze:\n x, y = i\n self.canvas.create_rectangle((x * 100* 6/(self.velikost)+ 50), (y *100* 6/(self.velikost)+ 50), (x * 100* 6/(self.velikost)+ 50+100*6/(self.velikost)), (y *100* 6/(self.velikost) + 50+100*6/(self.velikost)), fill='light grey', tag=Crnobelo.TAG_POTEZA)\n\n # Pobrise veljave poteze.\n def pobrisi_poteze(self):\n self.canvas.delete(Crnobelo.TAG_POTEZA)\n\n # Funkcija, ki shrani igro v datoteko.\n def shrani(self):\n self.prekini_igralce()\n\n beli = (re.findall(r'\\.(.+?)\\s', str(self.BELI))[0]).lower()\n crni = (re.findall(r'\\.(.+?)\\s', str(self.CRNI))[0]).lower()\n\n ime = filedialog.asksaveasfilename(filetypes =((\"Text File\", \"*.txt\"),(\"All Files\",\"*.*\")), defaultextension=\".txt\")\n if ime == \"\":\n return\n with open(ime, \"wt\", encoding=\"utf8\") as f:\n print(self.igra.matrika, file=f)\n print(self.igra.na_vrsti, file=f)\n print(beli, file=f)\n print(crni, file=f)\n print(str(self.igra.st_potez), file=f)\n\n # Funkcija, ki nalozi igro iz datoteke.\n def odpri(self):\n ime = filedialog.askopenfilename(filetypes =((\"Text File\", \"*.txt\"),(\"All Files\",\"*.*\")))\n s = open(ime, encoding=\"utf8\")\n sez = s.readlines()\n s.close\n\n\n KDO = sez[1].strip()\n beli = sez[2].strip()\n crni = sez[3].strip()\n velikost = len(ast.literal_eval(sez[0].strip()))\n stevilo = int(sez[4].strip())\n\n\n\n if str(beli) == \"clovek\":\n beli = Clovek(self)\n else:\n beli = Racunalnik(self, Alfabeta(ALFABETA_GLOBINA))\n\n\n if str(crni) == \"clovek\":\n crni = Clovek(self)\n else:\n crni = Racunalnik(self, Alfabeta(ALFABETA_GLOBINA))\n\n self.nova_igra(beli, crni, velikost)\n self.prekini_igralce()\n self.napis.set(\"\")\n self.igra.matrika = ast.literal_eval(sez[0].strip())\n self.igra.st_potez = stevilo\n\n for i in range(self.velikost):\n for j in range(self.velikost):\n if self.igra.matrika[j][i][2] == \"Beli\":\n self.canvas.create_oval((i * 100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (j *100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (i * 100* 6/(self.velikost)+ 50-10* 6/(self.velikost)+100*6/(self.velikost)), (j *100* 6/(self.velikost) + 50-10* 6/(self.velikost)+100*6/(self.velikost)), fill = \"white\", tag=Crnobelo.TAG_KROG)\n if self.igra.matrika[j][i][2] == \"Crni\":\n self.canvas.create_oval((i * 100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (j *100* 6/(self.velikost)+ 50+10* 6/(self.velikost)), (i * 100* 6/(self.velikost)+ 50-10* 6/(self.velikost)+100*6/(self.velikost)), (j *100* 6/(self.velikost) + 50-10* 6/(self.velikost)+100*6/(self.velikost)), fill = \"black\", tag=Crnobelo.TAG_KROG)\n\n\n if KDO == \"Beli\":\n self.igra.na_vrsti = BELI\n self.BELI.igraj()\n self.napis2.set(\"Na potezi je beli.\")\n else:\n self.igra.na_vrsti = CRNI\n self.CRNI.igraj()\n self.napis2.set(\"Na potezi je crni.\")\n\n # Funkcija izklopi zvok.\n def zvok(self, bool):\n if not bool:\n self.zvocnik = False\n\n\n # Funkcija, ki prekine igralca.\n def prekini_igralce(self):\n if self.BELI:\n self.BELI.prekini()\n if self.CRNI:\n self.CRNI.prekini()\n\n######################################################################\n## Glavni program\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Igrica Crnobelo\")\n\n # Opisemo argumente, ki jih sprejmemo iz ukazne vrstice.\n parser.add_argument('--debug',\n action='store_true',\n help='vklopi sporocila o dogajanju')\n \n parser.add_argument('--globinaM',\n default=MINIMAX_GLOBINA,\n type=int,\n help='globina iskanja za minimax algoritem')\n \n parser.add_argument('--globinaAB',\n default=ALFABETA_GLOBINA,\n type=int,\n help='globina iskanja za alfabeta algoritem')\n \n\n \n args = parser.parse_args()\n\n if args.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n\n\n # Naredimo glavno okno in nastavimo ime.\n root = Tk()\n root.title(\"Crnobelo\")\n # Naredimo objekt razreda Gui in ga spravimo v spremenljivko,\n aplikacija = Crnobelo(root)\n # Kontrolo prepustimo glavnemu oknu. Funkcija mainloop neha\n # delovati, ko okno zapremo.\n root.mainloop()\n","repo_name":"julijatominc/programiranje2_crnobelo","sub_path":"Crnobelo.py","file_name":"Crnobelo.py","file_ext":"py","file_size_in_byte":19655,"program_lang":"python","lang":"sl","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23554211931","text":"# Solve:\ndef solve(input):\n # initial\n number = list(input)\n order = tidy(number)\n\n # iteration\n while order != -1:\n number[order] = str(int(number[order])-1)\n for i in range(order+1,len(number)):\n number[i] = '9'\n order = tidy(number)\n\n while number[0] == '0':\n number.pop(0)\n return ''.join(number)\n\n\n\n \ndef tidy(noList):\n for i in range(len(noList)-1):\n if noList[i+1] which contains the\n # bill title inside a element.\n bill_title = short_summary_table.td.font.contents[0]\n self.debug(\"Found Bill Title: %s\" % bill_title)\n return bill_title\n\n def extract_senate_bill_version_link(self, soup):\n \"\"\"Extract the link which points to the version information for a\n given bill.\n \"\"\"\n # The \"Bill Text\" link for the Senate points you to a page of the\n # current draft of the bill. At the top of this page there is a table\n # containing links for \"Authors and Status\", \"List Versions\", and a\n # PDF file for the bill. This method retreives the 'href' attribute\n # for the \"List Versions\" link.\n table = soup.find('table', attrs={\"summary\" : \"\"})\n rows = table.findAll(\"td\")\n version_link = rows[2]\n bill_version_link = version_link.a.attrs[0][1]\n self.debug(\"Found Bill Version Link: %s\" % bill_version_link)\n return bill_version_link\n\n def extract_bill_versions(self, soup):\n \"\"\"Extract all versions of a given bill.\n\n Returns a list of dicts with 'name' and 'url' keys for each version\n found.\n \"\"\"\n bill_versions = list()\n # A table of all versions of a bill exists in a table\n # which has a 'summary' attribute with a value of ''.\n versions_table = soup.find('table', attrs={'summary' : ''})\n table_rows = versions_table.findAll('tr')\n for row in table_rows:\n cols = row.findAll('td')\n # if the row has more than one column of info, then there's a bill version\n # in there.\n if len(cols) > 1:\n # The version_name and version_url we are looking for are in the\n # first column of the table.\n bill_version = dict()\n bill_version_column = cols[0]\n bill_version['name'] = self.cleanup_text(bill_version_column.a.contents[0])\n bill_version['url'] = bill_version_column.a.attrs[0][1]\n bill_versions.append(bill_version)\n del bill_version\n self.debug(\"Found Bill Versions: %d\" % len(bill_versions))\n return bill_versions\n\n def extract_bill_sponsors(self, soup):\n \"\"\"Extract the primary and cosponsors for a given bill.\"\"\"\n bill_sponsors = list()\n sponsors_table = soup.find('table', attrs={'summary' : 'Show Authors'})\n # Sponsors' names are links within the sponsors_table table.\n sponsors_links = sponsors_table.findAll('a')\n for link in sponsors_links:\n sponsor_name = link.contents[0]\n bill_sponsors.append(sponsor_name)\n self.debug(\"Sponsors Found for this bill: %d\" % len(bill_sponsors))\n return bill_sponsors\n\n def extract_bill_actions(self, soup, current_chamber):\n \"\"\"Extract the actions taken on a bill.\n A bill can have actions taken from either chamber. The current\n chamber's actions will be the first table of actions. The other\n chamber's actions will be in the second table.\n\n Returns a list of bill actions. Each bill action is a dict with keys:\n action_chamber = 'upper|lower'\n action = string\n date = MM/DD/YYYY\n \"\"\"\n\n bill_actions = list()\n action_tables = soup.findAll('table', attrs={'summary' : 'Actions'})\n # First, process the actions taken by the current chamber.\n current_chamber_action_table = action_tables[0]\n current_chamber_action_rows = current_chamber_action_table.findAll('tr')\n for row in current_chamber_action_rows[1:]:\n bill_action = dict()\n cols = row.findAll('td')\n action_date = self.cleanup_text(cols[0].contents[0])\n action_text = self.cleanup_text(cols[1].contents[0])\n bill_action['action_date'] = action_date\n bill_action['action_text'] = action_text\n bill_action['action_chamber'] = current_chamber\n bill_actions.append(bill_action)\n\n # if there are more than one action_table, then the other chamber has\n # taken action on the bill.\n # Toggle the current chamber\n if current_chamber == 'upper':\n current_chamber = 'lower'\n else:\n current_chamber = 'upper'\n if len(action_tables) > 1:\n current_chamber_action_table = action_tables[1]\n current_chamber_action_rows = current_chamber_action_table.findAll('tr')\n for row in current_chamber_action_rows[1:]:\n bill_action = dict()\n cols = row.findAll('td')\n action_date = self.cleanup_text(cols[0].contents[0])\n action_text = self.cleanup_text(cols[1].contents[0])\n bill_action['action_date'] = action_date\n bill_action['action_text'] = action_text\n bill_action['action_chamber'] = current_chamber\n bill_actions.append(bill_action)\n self.debug(\"Actions Found for this bill: %d\" % len(bill_actions))\n return bill_actions\n\n def get_bill_info(self, chamber, session, bill_detail_url, version_list_url):\n \"\"\"Extracts all the requested info for a given bill.\n\n Calls the parent's methods to enter the results into JSON files.\n \"\"\"\n if chamber == \"House\":\n chamber = 'lower'\n else:\n chamber = 'upper'\n\n with self.urlopen(bill_detail_url) as bill_html:\n bill_soup = BeautifulSoup(bill_html)\n\n bill_id = self.extract_bill_id(bill_soup)\n bill_title = self.extract_bill_title(bill_soup)\n bill = Bill(session, chamber, bill_id, bill_title)\n\n # Get all versions of the bill.\n # Versions of a bill are on a separate page, linked to from the column\n # labeled, \"Bill Text\", on the search results page.\n\n with self.urlopen(version_list_url) as version_html:\n version_soup = BeautifulSoup(version_html)\n\n # MN bills can have multiple versions. Get them all, and loop over\n # the results, adding each one.\n self.debug(\"Extracting bill versions from: \" + version_list_url)\n bill_versions = self.extract_bill_versions(version_soup)\n for version in bill_versions:\n version_name = version['name']\n version_url = urlparse.urljoin(VERSION_URL_BASE, version['url'])\n bill.add_version(version_name, version_url)\n\n # grab primary and cosponsors\n # MN uses \"Primary Author\" to name a bill's primary sponsor.\n # Everyone else listed will be added as a 'cosponsor'.\n sponsors = self.extract_bill_sponsors(bill_soup)\n primary_sponsor = sponsors[0]\n cosponsors = sponsors[1:]\n bill.add_sponsor('primary', primary_sponsor)\n for leg in cosponsors:\n bill.add_sponsor('cosponsor', leg)\n\n # Add Actions performed on the bill.\n bill_actions = self.extract_bill_actions(bill_soup, chamber)\n for action in bill_actions:\n action_chamber = action['action_chamber']\n action_date = action['action_date']\n action_text = action['action_text']\n bill.add_action(action_chamber, action_text, action_date)\n\n self.save_bill(bill)\n\n def scrape_session(self, chamber, session, session_year, session_number, legislative_session):\n \"\"\"Scrape all bills for a given chamber and a given session.\n\n This method uses the legislature's search page to collect all the bills\n for a given chamber and session.\n \"\"\"\n\n # MN bill search page returns a maximum of 999 search results.\n # To get around that, make multiple search requests and combine the results.\n # when setting the search_range, remember that 'range()' omits the last value.\n search_range = range(0,10000, 900)\n min = search_range[0]\n total_rows = list() # used to concatenate search results\n for max in search_range[1:]:\n # The search form accepts number ranges for bill numbers.\n # Range Format: start-end\n # Query Param: 'bill='\n url = BILL_SEARCH_URL % (chamber, session, min, max-1)\n self.debug(\"Getting bill data from: %s\" % url)\n with self.urlopen(url) as html:\n soup = BeautifulSoup(html)\n # Index into the table containing the bills .\n rows = soup.findAll('table')[6].findAll('tr')[1:]\n self.debug(\"Rows to process: %s\" % str(len(rows)))\n # If there are no more results, then we've reached the\n # total number of bills available for this session.\n if len(rows) == 0:\n self.debug(\"Total Bills Found: %d\" % len(total_rows))\n break\n else:\n total_rows.extend(rows)\n # increment min for next loop so we don't get duplicates.\n min = max\n\n # Now that we have all the bills for a given session, process each row\n # of search results to harvest the details and versions of each bill.\n for row in total_rows:\n # The second column of the row contains a link pointing to\n # the status page for the bill.\n # The fourth column of the row contains a link labeled, \"Bill Text\",\n # pointing to a list of versions of the bill.\n cols = row.findAll('td')\n bill_details_column = cols[1]\n bill_versions_column = cols[3]\n try:\n # Extract the 'href' attribute value.\n bill_details_url = bill_details_column.a.attrs[0][1]\n bill_details_url = urlparse.urljoin(BILL_DETAIL_URL_BASE, bill_details_url)\n except:\n self.warning('Bad bill_details_column: %s' % bill_details_column)\n continue\n try:\n # Extract the 'href' attribute value.\n bill_version_list_url = bill_versions_column.a.attrs[0][1]\n except:\n self.warning('Bad bill_versions_column: %s' % bill_versions_column)\n continue\n # Alas, the House and the Senate do not link to the same place for\n # a given bill's \"Bill Text\". Getting a URL to the list of bill\n # versions from the Senate requires an extra step here.\n if chamber == 'Senate':\n senate_bill_text_url = urlparse.urljoin(VERSION_URL_BASE, bill_version_list_url)\n with self.urlopen(senate_bill_text_url) as senate_html:\n senate_soup = BeautifulSoup(senate_html)\n bill_version_list_url = self.extract_senate_bill_version_link(senate_soup)\n bill_version_list_url = urlparse.urljoin(VERSION_URL_BASE, bill_version_list_url)\n self.get_bill_info(chamber, session, bill_details_url, bill_version_list_url)\n\n def scrape(self, chamber, year):\n \"\"\"Initiates the scraping of all bills for a given chamber and year.\"\"\"\n\n # Minnesota legislative session value formula\n # 2009 = '0862009'\n # Bit Value\n # --- -----\n # 1 Session Number (session_number used in query params)\n # 2-4 Legislative Session (i.e. 86th session)\n # 4-8 YYYY four-digit year of the legislative session.\n year_mapping = {\n '1995': ('1791995',),\n '1996': ('0791995',),\n '1997': ('1801997', '2801997', '3801997'),\n '1998': ('1801998', '0801997'),\n '1999': ('0811999',),\n '2000': ('0811999',),\n '2001': ('0822001', '1822001'),\n '2002': ('0822001', '1822002'),\n '2003': ('0832003', '1832003'),\n '2004': ('0832003',),\n '2005': ('0842005',),\n '2006': ('0842005',),\n '2007': ('0852007', '1852007'),\n '2008': ('0852007',),\n '2009': ('0862009',),\n }\n available_chambers = {'lower':'House', 'upper':'Senate'}\n chamber = available_chambers[chamber]\n\n if year not in year_mapping:\n raise NoDataForPeriod(year)\n\n for session in year_mapping[year]:\n session_year = year\n # Unpacking MN session formula described above.\n session_number = session[0]\n legislative_session = session[1:3]\n legislative_session_year = session[-4:]\n self.debug(\"Scraping data for MN - Session: %s, Chamber: %s, Year: %s\" % (session, chamber, year))\n self.scrape_session(chamber, session, session_year, session_number, legislative_session)\n\n","repo_name":"runderwood/fiftystates","sub_path":"fiftystates/scrape/mn/bills.py","file_name":"bills.py","file_ext":"py","file_size_in_byte":15438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"41765371927","text":"for tc in range(1, int(input()) + 1):\n N = int(input()) # 문제 수\n scores = list(map(int, input().split())) # 정해진 배점을 따른다\n res = set()\n arr = [1, 2, 3]\n\n res.add(0)\n for score in scores:\n for item in set(res):\n res.add(score + item)\n\n print(\"#{} {}\".format(tc, len(res)))\n","repo_name":"Fly-Eugene/algo_study","sub_path":"준영/problem_3752.py","file_name":"problem_3752.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13329753274","text":"import sys\nimport datetime\nimport time\n\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nfrom PyQt5.QtWidgets import (\n QApplication,\n QMainWindow,\n QDialog,\n QSplashScreen,\n QToolButton,\n QToolTip,\n QWidget,\n QMessageBox,\n QAction,\n QFileDialog,\n)\nfrom PyQt5.QtWidgets import (\n QTableWidget,\n QProgressBar,\n QLineEdit,\n QComboBox,\n QFrame,\n QTableWidgetItem,\n QStatusBar,\n)\nfrom PyQt5.QtGui import (\n QIntValidator,\n QDoubleValidator,\n QPixmap,\n QRegExpValidator,\n QColor,\n QBrush,\n QIcon,\n)\nfrom PyQt5.QtCore import QRegExp, QThread, QSize\nfrom PyQt5.QtCore import QDate, QDateTime, QTime, QDir\n\nfrom win32com import client as wc\nimport docx\nimport Ui_open_word as open_word\n\n\nclass call_open_word(QWidget, open_word.Ui_Open_word):\n def __init__(self, parent=None):\n super().__init__()\n self.child = open_word.Ui_Open_word()\n self.child.setupUi(self)\n\n def open_word(self):\n word = wc.Dispatch('Word.Application')\n word.visible = 0\n\n my_file_path = QFileDialog.getOpenFileName(self, u'打开文件', '/')\n print(my_file_path)\n if my_file_path[0][-4:] == '.doc' or my_file_path[0][-5:] == '.docx':\n my_worddoc = word.Documents.Open(my_file_path[0].replace('/', '\\\\'))\n my_count = my_worddoc.Paragraphs.Count\n for i in range(my_count):\n my_pr = my_worddoc.Paragraphs[i].Range\n # print(my_pr)\n self.child.word_content_te.append(my_pr.text)\n my_worddoc.Close()\n elif my_file_path[0][-4:] == '.txt':\n f = open(my_file_path[0])\n my_data = f.read()\n f.close()\n self.child.word_content_te.append(my_data)\n else:\n QMessageBox.information(self, u'提示', '不支持的文件格式,只支持 doc、docx、txt')\n\n def open_word_no_os(self):\n my_file_path = QFileDialog.getOpenFileName(self, u'打开文件', '/')\n print(my_file_path)\n if my_file_path[0][-4:] == '.doc' or my_file_path[0][-5:] == '.docx':\n doc = docx.Document(my_file_path[0].replace(u'/', u'\\\\'))\n for my_paragraph in doc.paragraphs:\n my_pr = my_paragraph.text\n # print(my_pr)\n self.child.word_content_te.append(my_pr)\n elif my_file_path[0][-4:] == '.txt':\n f = open(my_file_path[0])\n my_data = f.read()\n f.close()\n self.child.word_content_te.append(my_data)\n else:\n QMessageBox.information(self, u'提示', '不支持的文件格式,只支持 doc、docx、txt')\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n myWin = call_open_word()\n myWin.show()\n\n sys.exit(app.exec_())","repo_name":"asi1117/pythonProject","sub_path":"test4/call_open_word.py","file_name":"call_open_word.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40513588621","text":"n = int(input())\r\nnumbers = [int(input()) for i in range(n)]\r\n\r\ns = sum(numbers)\r\nw = (s + 1) // 2\r\n\r\nf = [0] * (w + 1)\r\nfor i in numbers:\r\n for j in range(w, i - 1, -1):\r\n f[j] = max(f[j], f[j - i] + i)\r\n\r\nprint(abs(f[w] * 2 - s))\r\n","repo_name":"GuuJiang/51nod","sub_path":"1007.py","file_name":"1007.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3708257986","text":"import types\nimport bpy\nimport os\nimport numpy as np\nimport random\nimport copy\n\nfrom .camera_module import BlendCamera\n\n\nclass BlendObject:\n\n def __init__(self, object_info: dict, index: int, collection):\n \n #Collect input data\n self.filepath = object_info['filepath']\n self.scale = object_info['scale']\n self.mass = object_info['mass']\n self.collision_shape = object_info['collision_shape']\n self.index = index\n\n #Set unique pbject name and add to collection\n self.name = 'DataPipe_object.{:04d}'.format(self.index) #Set object name\n self.objects_collection = collection\n\n print(\"### OBJECT {} CREATED\".format(self.name))\n\n #Import object to blender\n filename, blend_ob, blend_mat, blend_mesh = self.import_ob(filepath=self.filepath, index=self.index, scale=self.scale)\n self.filename = filename\n self.blend_ob = blend_ob\n self.blend_mat = blend_mat\n self.blend_mesh = blend_mesh\n\n self.dimensions = self.get_object_dimensions()\n\n \n def import_ob(self, filepath: str, index: int, scale: float):\n \n head, tail = os.path.split(filepath)\n filename = tail.replace('.obj', '') #Extract filename\n\n bpy.ops.object.select_all(action='DESELECT')\n bpy.ops.import_scene.obj(filepath=str(filepath))\n\n obj_in_file = len(bpy.context.selected_objects)\n if obj_in_file != 1: #Can only contain one object.\n raise Exception(\".obj file can not contain more than one object, there are {} objects in file:\\n{}\".format(obj_in_file, filepath))\n \n if bpy.context.selected_objects[0].name != filename:\n filename = bpy.context.selected_objects[0].name\n \n obj = bpy.data.objects[filename]\n mat = obj.active_material\n mesh = bpy.data.meshes[filename]\n\n #Set names to current object name\n obj.name = self.name\n mat.name = self.name\n mesh.name = self.name\n\n #Remove from default collection and add to datapipe object collection\n obj.users_collection[0].objects.unlink(obj)\n self.objects_collection.objects.link(obj)\n\n #Apply object scaling\n obj.scale = (scale, scale, scale) #Set scale from user input\n bpy.ops.object.transform_apply(location=False, scale=True, rotation=False) #Apply scale to object\n \n #Set object physics properties\n bpy.context.view_layer.objects.active = obj\n bpy.ops.rigidbody.object_add(type='ACTIVE')\n bpy.context.object.rigid_body.collision_shape = self.collision_shape \n obj.rigid_body.mass = self.mass\n obj.rigid_body.collision_margin = 0.001\n\n obj.pass_index = index #Set pass index for masked image\n\n return filename, obj, mat, mesh\n \n def get_object_dimensions(self):\n\n return self.blend_ob.dimensions\n\n def delete_ob(self):\n print(\"Blender object {} deleted\".format(self.name))\n\n bpy.data.objects.remove(self.blend_ob, do_unlink=True)\n bpy.data.materials.remove(self.blend_mat, do_unlink=True)\n bpy.data.meshes.remove(self.blend_mesh, do_unlink=True)\n\n def place_ob(self, x, y, z):\n \n self.blend_ob.location = x, y, z\n self.blend_ob.rotation_mode = 'XYZ'\n self.blend_ob.rotation_euler = (random.random()*2*np.pi, random.random()*2*np.pi, random.random()*2*np.pi)\n \n\n\nclass ObjectManager:\n\n def __init__(self, config: dict):\n \n self.objects_config = config['objects'] #Collect input dict\n\n self.objects_info_list = self.objects_config['objects_list'] #Object info list\n self.objects_in_scene = []\n\n self.objects_collection = self.create_objects_collection() #Create collection to store pipeline objects\n\n\n def create_objects_collection(self):\n\n objects_collection = bpy.data.collections.new('DataPipe_objects')\n bpy.context.scene.collection.children.link(objects_collection)\n\n return objects_collection\n\n def import_objects(self):\n\n self.delete_all_objects()\n\n index = 1\n print(\"\\n### Importing objects ###\\n\")\n for object_input in self.objects_info_list:\n\n max_instances = object_input['max']\n min_instances = object_input['min']\n\n instances_in_scene = random.randint(a=min_instances, b=max_instances)\n\n for instance in range(instances_in_scene):\n\n obj = BlendObject(object_info=object_input, index=index, collection=self.objects_collection)\n\n self.objects_in_scene.append(obj)\n\n index += 1\n random.shuffle(self.objects_in_scene) #Randomizing the order of the objects\n\n def delete_all_objects(self):\n if len(self.objects_in_scene) != 0:\n \n for obj in self.objects_in_scene:\n obj.delete_ob()\n\n del obj\n\n self.objects_in_scene = []\n\n def create_initial_positions(self, scene):\n\n drop_zone_loc = scene.drop_zone_location\n drop_zone_dim = scene.drop_zone_dimensions\n\n z = drop_zone_loc[2] #Set initial z-coordinate to be at the midpoint of the dropzone height\n delta_z = 0\n\n max_x_coord = drop_zone_loc[0] + drop_zone_dim[0]/2 #Max x-value to place objects\n min_x_coord = drop_zone_loc[0] - drop_zone_dim[0]/2 #Min x-value to place objects\n\n max_y_coord = drop_zone_loc[1] + drop_zone_dim[1]/2 #Max y-value to place objects\n min_y_coord = drop_zone_loc[1] - drop_zone_dim[1]/2 #Min y-value to place objects\n\n for obj in self.objects_in_scene: #place objects random\n \n max_dim = max(obj.dimensions) #The object's maximal dimension (either x, y, or z direction)\n\n max_x_obj = max_x_coord - max_dim/2\n min_x_obj = min_x_coord + max_dim/2\n\n max_y_obj = max_y_coord - max_dim/2\n min_y_obj = min_y_coord + max_dim/2\n\n x = random.random()*(max_x_obj-min_x_obj) + min_x_obj\n y = random.random()*(max_y_obj-min_y_obj) + min_y_obj\n\n z += delta_z + max_dim/2\n\n obj.place_ob(x=x, y=y, z=z)\n\n delta_z = max_dim/2\n \n def get_objects_information_dict(self, camera: BlendCamera):\n\n obj_output_list = []\n\n for obj in self.objects_in_scene:\n\n dict = {}\n\n wrld2cam_transform = np.asarray(camera.blend_cam_obj.matrix_world)\n\n wrld2obj_transform = np.asarray(obj.blend_ob.matrix_world)\n \n cam2obj_pose = np.matmul(np.linalg.inv(wrld2cam_transform), wrld2obj_transform)\n\n dict = {'name': obj.name,\n 'filename': obj.filename,\n 'mask_index': obj.index,\n 'cam2obj_pose': cam2obj_pose}\n\n obj_output_list.append(dict)\n\n\n return obj_output_list","repo_name":"wileik15/DataPipe","sub_path":"src/objects_module.py","file_name":"objects_module.py","file_ext":"py","file_size_in_byte":6887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32138658235","text":"from functools import reduce\n\nlista = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n\n\ndef isPar(x):\n if x % 2 == 0:\n return True\n return False\n\n\ndef suma(a, b):\n return a + b\n\n\npares = list(filter(isPar, lista))\nprint(pares)\nresultadoSuma = reduce(suma, pares)\nprint(resultadoSuma)\n","repo_name":"webdevelopersbierzo/ob-python","sub_path":"ejercicio9/ejercicio9-2.py","file_name":"ejercicio9-2.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28236156936","text":"from flask import Flask, render_template,request\nimport nltk, json,pickle\nimport numpy as np\nimport random\nfrom intents_reference import start_intents\nfrom model_builder import start_model\nfrom nltk.stem import SnowballStemmer\nfrom tensorflow.keras.models import load_model\nimport time\nstemmer = SnowballStemmer('spanish')\n\n\nmodel=load_model(\"chatbot_model.h5\")#Cargamos el modelo\nintents= json.loads(open(\"intents.json\").read())#Cargamos el json | \"base de datos\"\nwords=pickle.load(open(\"words.pkl\",\"rb\"))#cargamos la biblioteca de las palabras\nclasses=pickle.load(open(\"classes.pkl\",\"rb\"))#cargamos la biblioteca de las clases\nglobal date_time\nglobal time_time\ntime_time=(time.strftime(\"%I:%M:%S\"))#hora\ndate_time=(time.strftime(\"%d/%m/%y\"))#fecha\n\ndef clean_up_sentence(sentence):\n \n sentence_words=nltk.word_tokenize(sentence) #tokenizamos las palabras\n sentence_words=[stemmer.stem(word.lower()) for word in sentence_words] #lematizamos las palabras\n return sentence_words\n\n\ndef bow (sentence,words,show_details=True): \n sentence_words=clean_up_sentence(sentence)\n \n bag=[0]*len(words)\n \n for i in sentence_words:\n for j,w in enumerate(words):\n if w==i: #Asigna 1 si la palabra esta en la posicion de las palabras\n bag[j]=1\n if show_details:\n print(\"encontrado en la bolsa: \",w)\n return (np.array(bag))\n\n\n\ndef predict_class(sentence,model):#Para predecir que tipo o clase de palabra es\n \n p = bow(sentence,words,show_details=False)\n \n res = model.predict(np.array([p]))[0] #retornamos lo eficiente del modelo\n \n \n ERROR_THRESHOLD=0.25 #Umbral de error\n \n \n \n results= [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD] \n #si el umbral de error es mayor a r entonces lo toma\n \n \n \n \n \n results.sort(key=lambda x: x[1], reverse=True) #Ordena el resultado de menor a mayor el resultado\n # de las clases.\n global return_list #return list la hacemos global\n return_list = [] \n for r in results: \n return_list.append({\"intent\": classes[r[0]], \"probability\": str(r[1])}) \n print(\"print de return list: \", return_list) #Imprime los datos del mensaje diciendome la clase y la probabilidad\n return return_list \n\n\ndef get_response(ints,intents_json): #revisa la clase del \"tag\" y obtiene una respuesta aleatoria\n tag= ints[0][\"intent\"] \n list_of_intents=intents_json[\"intents\"] #sacamos el json las referencias para generar las respuestas\n for i in list_of_intents: #i toma el valor de las referencias del json\n if (i[\"tag\"]==tag): #Si i en la posicion de las clases es igual a la clase que ingreso el usuario\n result= random.choice(i[\"responses\"]) #toma una respuesta aleatoria dentro de la propia clase de \"i\"\n break\n return result\n\n\napp= Flask(__name__, instance_relative_config=True)# declaramos la app de flask\napp.debug=False# para que se guarden los cambios al segundo | en este caso lo tenemos en False\n@app.route('/chatbot',methods=['POST','GET'])#la ruta que se encarga de recibir el mensaje de la pagina web\ndef chatbot_response():\n try:\n message=request.json[\"message\"]#obtiene el mensaje del formulario en la pagina web\n print(\"Este es el mensaje\"+message)#impresion del mensaje del usuario\n ints=predict_class(message,model)#toma el mensaje y el modelo y predice en que clase está\n response=get_response(ints,intents)#va a la funcion get_response para obtener la respuesta\n print(response)\n txt = open ('log.txt','a',encoding='utf-8')#abrimos el txt\n txt.write(\"\\nFecha:{}, Hora:{}\".format(date_time,time_time))\n txt.write(\"\\nmensaje del usuario: {}\\n\".format(message))\n txt.write(\"presicion: {}\\n\".format(return_list))\n txt.write(\"Respuesta del bot: {}\\n\".format(response))\n txt.close()#cerramos el txt\n return response# retorna la respuesta a la pagina web para que sea impreso\n except Exception as e:\n print(\"Error: \", e)\n txt = open ('log.txt','a',encoding='utf-8')#abrimos el txt\n txt.write(\"\\nFecha:{}, Hora:{}\".format(date_time,time_time))\n txt.write(\"\\nERROR\\n\".format(message))\n txt.write(\"mensaje del usuario: {}\\n\".format(message))\n txt.write(\"presicion: {}\\n\".format(return_list))\n txt.write(\"Lo siento, no pude entender tu mensaje.\\n\")\n txt.close()#cerramos el txt\n return \"Lo siento, no pude entender tu mensaje.\"\n\n\n\n@app.route('/resp',methods=['POST','GET'])\ndef chatbot_mensaje():\n message=request.json[\"message\"]\n print(\"Este es el mensaje\"+message)\n return message\n\n\n@app.route('/bot')#ruta de la pagina web donde se encuentra el chatbot\ndef index():# esta funcion toma la pagina para hacer la conexion http\n return render_template('index.html')#retornamos la pagina web para que este en la ruta /bot\n\n@app.route('/')#ruta principal de la pagina web donde se muestra la bienvenida.\ndef Welcome():\n return render_template('Welcome.html')#Retornamos la pagina web para que este en la ruta principal\n\n\n\n\n \n \nif __name__=='__main__':\n #start_intents()\n #start_model()\n \n app.run()\n","repo_name":"Profe-Jose-Burgos/C.E.D.A.A.C","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"19365572454","text":"from exceptions.acceleration_ratio_exception import AccelerationRatioException\nfrom exceptions.braking_speed_exception import BrakingSpeedException\nfrom exceptions.max_acceleration_ratio_exception import MaxAccelerationRatioException\nfrom exceptions.max_speed_exception import MaxSpeedException\nfrom exceptions.min_acceleration_ratio_exception import MinAccelerationRatioException\nfrom utility.config import config\n\nfrom abstractions.logger import AbstractLogger\nfrom models.engine import Engine\n\n\nclass DrivingProcessor:\n def __init__(self,\n engine: Engine,\n logger: AbstractLogger,\n acceleration_ratio=config.get('acceleration_ratio'),\n max_acceleration_ratio=config.get('max_acceleraton_ratio'),\n min_acceleration_ratio=config.get('min_acceleration_ratio'),\n max_speed=config.get('max_speed'),\n braking_speed=config.get('braking_speed'),\n ):\n\n if max_speed < config.get('min_speed') or max_speed > config.get('max_speed'):\n raise MaxSpeedException(max_speed)\n\n if braking_speed < config.get('min_braking_speed') or braking_speed > config.get('max_braking_speed'):\n raise BrakingSpeedException(braking_speed)\n\n if max_acceleration_ratio < config.get('min_max_acceleration_ratio') or max_acceleration_ratio > config.get('max_acceleraton_ratio'):\n raise MaxAccelerationRatioException(max_acceleration_ratio)\n\n if min_acceleration_ratio < 0 or min_acceleration_ratio > config.get('min_acceleration_ratio'):\n raise MinAccelerationRatioException(min_acceleration_ratio)\n\n if acceleration_ratio < 0 or acceleration_ratio > config.get('acceleration_ratio'):\n raise AccelerationRatioException(acceleration_ratio)\n\n if acceleration_ratio < min_acceleration_ratio:\n acceleration_ratio = min_acceleration_ratio\n \n if acceleration_ratio > max_acceleration_ratio:\n acceleration_ratio = max_acceleration_ratio\n\n self.__max_speed: float = max_speed\n self.__braking_speed: float = braking_speed\n self.__actual_speed: float = 0\n\n # consumption on init is 0\n self.__last_consumption: float = 0\n\n self.__engine: Engine = engine\n self.__acceleration_ratio: float = acceleration_ratio\n self.__max_acceleration_ratio: float = max_acceleration_ratio\n self.__min_acceleration_ratio: float = min_acceleration_ratio\n self.__logger = logger\n\n @property\n def actual_speed(self) -> float:\n self.__logger.log(\"Access actual car speed in driving processor.\")\n return self.__actual_speed\n\n @property\n def last_consumption(self) -> float:\n self.__logger.log(\"Access last consumption in driving proccessor.\")\n return self.__last_consumption\n\n def calculate_consumption_rate(self, is_accelerating: bool = False, is_braking: bool = False) -> float:\n current_speed = self.actual_speed\n consumption: float = 0\n\n self.__logger.log(\"Calculating consumption rate in driving proccesor.\")\n\n if current_speed > 0:\n if current_speed < self.__get_car_maxspeed__ * 0.25:\n consumption = config.get('running_quater_consumption_rate')\n elif current_speed < self.__get_car_maxspeed__ * 0.5:\n consumption = config.get('running_half_consumption_rate')\n elif current_speed < self.__get_car_maxspeed__ * 0.75:\n consumption = config.get('running_upper_half_consumption_rate')\n elif current_speed < self.__get_car_maxspeed__:\n consumption = config.get('running_before_max_consumption_rate')\n elif current_speed == config.default_max_speed():\n consumption = config.get('running_max_speed_consumption_rate')\n else:\n consumption = 0\n\n if is_accelerating:\n consumption *= config.get('acceleration_coefficient')\n elif is_braking:\n consumption *= config.get('braking_coefficient')\n\n self.__last_consumption = consumption * config.get('car_coefficient')\n return self.__last_consumption\n\n def increase_speed_to(self, speed: float) -> None:\n self.__logger.log(f\"Increasing speed by {speed} in driving proccesor.\")\n if not self.__get_car_engine__.is_running:\n return\n\n if speed < self.__actual_speed:\n self.__actual_speed -= 1\n\n while self.__actual_speed < speed:\n self.__actual_speed = min(speed, self.__actual_speed + self.__get_car_acceleration_ratio__)\n \n if self.__actual_speed > self.__get_car_maxspeed__:\n self.__actual_speed = self.__get_car_maxspeed__\n\n self.__get_car_engine__.consume(self.calculate_consumption_rate(True))\n\n def reduce_speed_by(self, reduceBy: float) -> None:\n self.__logger.log(f\"Reducing speed by {reduceBy} km/h in driving pro.\")\n if not self.__get_car_engine__.is_running:\n return\n\n self.__actual_speed -= min(reduceBy, self.__get_car_braking_speed__)\n\n if self.__actual_speed < 0:\n self.__actual_speed = 0\n\n self.__get_car_engine__.consume(self.calculate_consumption_rate(False, True))\n\n @property\n def __get_car_engine__(self) -> Engine:\n return self.__engine\n\n @property\n def __get_car_maxspeed__(self) -> float:\n return self.__max_speed\n\n @property\n def __get_car_braking_speed__(self) -> float:\n return self.__braking_speed\n\n @property\n def __get_car_acceleration_ratio__(self) -> float:\n return self.__acceleration_ratio\n\n @property\n def __get_car_max_acceleration_ratio__(self) -> float:\n return self.__max_acceleration_ratio\n\n @property\n def __get_car_min_acceleration_ratio__(self) -> float:\n return self.__min_acceleration_ratio\n","repo_name":"Eramaloby/ppvis-4","sub_path":"models/driving_processor.py","file_name":"driving_processor.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14504909235","text":"from taipy.gui import Gui as tpGui\nfrom taipy.gui import notify as tpNotify\n\nimport pandas as pd\n\ntext = \"Original text\"\ncol1 = \"first col\"\ncol2 = \"second col\"\ncol3 = \"third col\"\n\nballon_img = \"./img/Ballon_15_20.png\"\n\nsection_1 = \"\"\"\n

Getting started with Taipy GUI

\n\n<|layout|columns=1 2 2|\n<|\nMy text: <|{text}|>\n<|{text}|input|>\n|>\n\n<|\n
\n<|Press Me|button|on_action=on_button_action|> \n**Ein Button:** <|{col1}|>\n
\n|>\n\n<|\n
\n<|{ballon_img}|image|height=30%|width=30%|label=This is one ballon|>\n
\n|>\n\n|>\n\"\"\"\n\nsection_2 = '''\n\n##Darstellung Gas-Verbrauch\n<|{dataset}|chart|mode=line|x=Datum|y[1]=Verbrauch|y[2]=Betriebsstunden|yaxis[2]=y2|layout={layout}|color[1]=green|color[2]=blue|>\n\n'''\nlayout = {\n \"xaxis\": {\n # Force the title of the x axis\n \"title\": \"Time-Range\"\n },\n \"yaxis\": {\n # Force the title of the first y axis\n \"title\": \"Verbrauch\",\n # Place the first axis on the left\n \"side\": \"left\"\n },\n \"yaxis2\": {\n # Second axis overlays with the first y axis\n \"overlaying\": \"y\",\n # Place the second axis on the right\n \"side\": \"right\",\n # and give it a title\n \"title\": \"Betriebsstunden\"\n },\n \"legend\": {\n # Place the legend above chart\n \"yanchor\": \"middle\"\n }\n}\n\ndef on_button_action(state):\n tpNotify(state, 'info', f'The text is: {state.text}')\n state.text = \"Button Pressed\"\n\ndef on_change(state, var_name, var_value):\n if var_name == \"text\" and var_value == \"Reset\":\n state.text = \"\"\n return\n \ndef get_data(path: str):\n dataset = pd.read_csv(path)\n dataset[\"Datum\"] = pd.to_datetime(dataset[\"Datum\"], dayfirst=True).dt.date\n return dataset\n\ngui = tpGui(page=section_1 + section_2)\ndataset = get_data(\"./dataset.csv\")\n\nif __name__ == '__main__':\n # Execute by the _Python_ interpretor, for debug only.\n tpGui.run(gui, title=\"Taipy Demo\", use_reloader=True, dark_mode=True, port=5001, flask_log=False)\nelse:\n # Execute by _Gunicorn_, for production environment.\n app = tpGui.run(gui, title=\"Taipy Demo\", run_server=False)\n","repo_name":"ustrahlendorf/TaipyPlayGround","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8314708263","text":"from typarse import BaseParser\nfrom matplotlib import animation\nfrom matplotlib.animation import Animation\n\nimport ray\nfrom ray.tune import register_env\n\nfrom cleaner.cleaner_env import *\n\n\nclass ArgParser(BaseParser):\n name: str = \"cleaner_run\"\n config: str = \"simple_2\"\n policy: str = \"ppo\"\n training_iters: int = 5\n seed: int = 1\n homogeneous: bool = False\n random_start: bool = False\n no_record: bool = False\n checkpoint_freq: int = 25\n eval_freq: int = 25\n\n _help = {\n \"config\": \"Path to the config of the experiment\",\n \"name\": \"Name of subdirectory containing results for this experiment\",\n \"policy\": \"RL algorithm\",\n \"training_iters\": \"Number of training iterations\",\n \"seed\": \"Random seed for Ray workers\",\n \"homogeneous\": \"Centrally train one policy for all agents\",\n \"random_start\": \"Randomly initialize the starting positions\",\n \"no_record\": \"Don't save video in evaluation\",\n \"checkpoint_freq\": \"How many training iterations between trainer checkpoints\",\n \"eval_freq\": \"How many training iterations between evaluations\",\n }\n\n\ndef evaluate(\n agents: Dict[str, Agent],\n eval_config: Dict[str, Any],\n eval_run_name: str,\n heterogeneous: bool = True,\n num_episodes: int = 1,\n video_filename: Optional[str] = None,\n record=True,\n) -> Tuple[List[float], Optional[Animation]]:\n \"\"\"\n Simulate rounds of play for a group of agents\n :param agents: The agents to evaluate, in order of instantiation\n :param eval_config: Config for the evaluation environment\n :param eval_run_name: Name of results directory\n :param heterogeneous: Whether to use decentralized training\n :param num_episodes: How many episodes to simulate\n :param video_filename: Optional filename for a video of the last episode\n :param record: Whether to record the last episode\n :return: a tuple of (list of rewards, video object)\n \"\"\"\n fig, ax = plt.subplots()\n images = []\n ep_rewards = []\n agent_names = [agent.name for agent in agents.values()]\n\n for ep in range(num_episodes):\n env = CleanerEnv(\n eval_config[\"env_config\"], run_name=eval_run_name, agent_names=agent_names\n )\n ep_reward = 0\n actions = {}\n done = {\"__all__\": False}\n\n # simulate one episode\n while not done[\"__all__\"]:\n if ep == num_episodes - 1 and record:\n im = env.game.render(fig, ax)\n images.append([im])\n obs = env.game.get_agent_obs()\n for agent_name, agent in agents.items():\n policy_id = agent_name if heterogeneous else \"agent_policy\"\n actions[agent_name] = agent.trainer.compute_action(\n observation=obs[agent_name],\n policy_id=policy_id,\n )\n _, reward, done, _ = env.step(actions)\n ep_reward += sum(list(reward.values()))\n ep_rewards.append(ep_reward)\n\n # create video from last episode\n if record:\n results_dir = f\"{RAY_DIR}/{eval_run_name}\"\n if not os.path.exists(results_dir):\n os.mkdir(results_dir)\n if not video_filename:\n video_filename = f\"{results_dir}/video.mp4\"\n ani = animation.ArtistAnimation(\n fig, images, interval=100, blit=True, repeat_delay=1000\n )\n ani.save(video_filename)\n print(f\"saved video at {video_filename}\")\n else:\n ani = None\n\n print(f\"episode rewards: {ep_rewards} (mean = {sum(ep_rewards) / len(ep_rewards)})\")\n return ep_rewards, ani\n\n\ndef train(\n run_name: str,\n config: Dict[str, Any],\n policy_name: str,\n training_iters: int,\n seed: int = 1,\n heterogeneous: bool = True,\n record: bool = True,\n checkpoint_freq: int = 0,\n eval_freq: int = 0,\n num_eval_episodes: int = 5,\n verbose: bool = True,\n):\n \"\"\"\n Run one experiment\n :param run_name: Name of results directory\n :param config: Config for the evaluation environment\n :param policy_name: \"ppo\" or \"dqn\"\n :param training_iters: How many iterations for ray\n :param seed: Random seed\n :param heterogeneous: Whether or not to use decentralized training\n :param record: Whether to save video during evaluation\n :param checkpoint_freq: How often to save trainer\n :param eval_freq: How often to evaluate trainer\n :param num_eval_episodes: How many episodes to evaluate\n :param verbose: Print out evaluation results\n :return: None\n \"\"\"\n # initialize agents and trainer\n agents = {}\n for agent_num in range(config[\"env_config\"][\"num_agents\"]):\n agent = Agent(policy_name, run_name, agent_num, config, seed, heterogeneous)\n agents[agent.name] = agent\n results_dir = list(agents.values())[0].results_dir\n trainer = create_trainer(\n policy_name, agents, config, results_dir, seed=seed, heterogeneous=heterogeneous\n )\n # run training\n for i in range(training_iters):\n if verbose:\n print(f\"starting training iteration {i}\")\n trainer.train()\n if checkpoint_freq != 0 and i % checkpoint_freq == 0:\n save_trainer(trainer, path=results_dir, verbose=verbose)\n if eval_freq != 0 and i % eval_freq == 0:\n video_filename = f\"{results_dir}/checkpoint_{str(i+1).zfill(6)}/video.mp4\"\n for agent in agents.values():\n agent.trainer = trainer\n evaluate(\n agents=agents,\n eval_config=config,\n eval_run_name=run_name,\n heterogeneous=heterogeneous,\n video_filename=video_filename,\n num_episodes=num_eval_episodes,\n record=record,\n )\n save_trainer(trainer, path=results_dir, verbose=verbose)\n video_filename = (\n f\"{results_dir}/checkpoint_{str(training_iters).zfill(6)}/video.mp4\"\n )\n evaluate(\n agents=agents,\n eval_config=config,\n eval_run_name=run_name,\n heterogeneous=heterogeneous,\n video_filename=video_filename,\n num_episodes=num_eval_episodes,\n record=record,\n )\n\n\ndef main():\n args = ArgParser()\n config = load_config(args.config)\n config[\"env_config\"][\"random_start\"] = args.random_start # hacky\n\n # initialize ray\n ray.shutdown()\n ray.init()\n register_env(\n \"ZSC-Cleaner\", lambda _: CleanerEnv(config[\"env_config\"], run_name=args.name)\n )\n\n # train model\n train(\n run_name=args.name,\n config=config,\n policy_name=args.policy,\n training_iters=args.training_iters,\n seed=args.seed,\n heterogeneous=not args.homogeneous,\n record=not args.no_record,\n checkpoint_freq=args.checkpoint_freq,\n eval_freq=args.eval_freq,\n num_eval_episodes=5,\n verbose=config[\"run_config\"][\"verbose\"],\n )\n ray.shutdown()\n print(f\"finished training {args.name}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"bengreenberg5/zsc-cleaner","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36824436544","text":"import numpy as np\nfrom numpy.testing import assert_allclose as numpy_allclose\n\nfrom brian2 import prefs\nfrom brian2.units.fundamentalunits import have_same_dimensions\n\n\ndef assert_allclose(actual, desired, rtol=4.5e8, atol=0, **kwds):\n \"\"\"\n Thin wrapper around numpy's `~numpy.testing.utils.assert_allclose` function. The tolerance depends on the floating\n point precision as defined by the `core.default_float_dtype` preference.\n\n Parameters\n ----------\n actual : `numpy.ndarray`\n The results to check.\n desired : `numpy.ndarray`\n The expected results.\n rtol : float, optional\n The relative tolerance which will be multiplied with the machine epsilon of the type set as\n `core.default_float_type`.\n atol : float, optional\n The absolute tolerance\n \"\"\"\n assert have_same_dimensions(actual, desired)\n eps = np.finfo(prefs[\"core.default_float_dtype\"]).eps\n rtol = eps * rtol\n numpy_allclose(\n np.asarray(actual), np.asarray(desired), rtol=rtol, atol=atol, **kwds\n )\n\n\ndef exc_isinstance(exc_info, expected_exception, raise_not_implemented=False):\n \"\"\"\n Simple helper function as an alternative to calling\n `~.pytest.ExceptionInfo.errisinstance` which will take into account all\n the \"causing\" exceptions in an exception chain.\n\n Parameters\n ----------\n exc_info : `pytest.ExceptionInfo` or `Exception`\n The exception info as returned by `pytest.raises`.\n expected_exception : `type`\n The expected exception class\n raise_not_implemented : bool, optional\n Whether to re-raise a `NotImplementedError` – necessary for tests that\n should be skipped with ``@skip_if_not_implemented``. Defaults to\n ``False``.\n\n Returns\n -------\n correct_exception : bool\n Whether the exception itself or one of the causing exceptions is of the\n expected type.\n \"\"\"\n if exc_info is None:\n return False\n if hasattr(exc_info, \"value\"):\n exc_info = exc_info.value\n\n if isinstance(exc_info, expected_exception):\n return True\n elif raise_not_implemented and isinstance(exc_info, NotImplementedError):\n raise exc_info\n\n return exc_isinstance(\n exc_info.__cause__,\n expected_exception,\n raise_not_implemented=raise_not_implemented,\n )\n","repo_name":"brian-team/brian2","sub_path":"brian2/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":823,"dataset":"github-code","pt":"61"} +{"seq_id":"32621478324","text":"from operator import contains\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as mcolors\nimport pandas as pd\nimport matplotlib.patches as mpatches\n\n\nbg_color = \"#FFFFFF\"\n\nlight_color = \"#00B6F2\"\nmedium_color = \"#007CA6\"\n\n# Lighter to darker (state, prompting, validation)\nexp_color = \"#F79C9C\"\nexp_prompt = \"#DC143C\"\nexp_validate = \"#8B0000\"\n\nexp_patch = mpatches.Patch(color=exp_color, label='Exploration')\nexp_prompt_patch = mpatches.Patch(color=exp_prompt, label='Exploration Prompt')\nexp_validate_patch = mpatches.Patch(color=exp_validate, label='Exploration Validation')\n\nacc_color = \"#83CAF7\"\nacc_prompt = \"#2396DE\"\nacc_validate = \"#0F405E\"\n\nacc_patch = mpatches.Patch(color=acc_color, label='Acceleration')\nacc_prompt_patch = mpatches.Patch(color=acc_prompt, label='Acceleration Prompt')\nacc_validate_patch = mpatches.Patch(color=acc_validate, label='Acceleration Validation')\n\nREJECT_COLOR = \"#D427E0\"\nACCEPT_COLOR = \"#0A9428\"\nREPAIR_COLOR = \"#949028\"\nrepair_patch = mpatches.Patch(color=REPAIR_COLOR, label='Repair')\nreject_patch = mpatches.Patch(color=REJECT_COLOR, label='Reject')\naccept_patch = mpatches.Patch(color=ACCEPT_COLOR, label='Accept')\n\n\nids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n\ndef sort_by_first(tuples):\n return sorted(tuples, key=lambda x: x[0])\n\ndef process_input(pid):\n df = pd.DataFrame(pd.read_excel(f\"exports/p{pid}-export.xlsx\".format(pid=pid)))\n start_codes = list(map(lambda x: float(x[2:].split(',')[0].replace(':', '.')), df['Beginning'].tolist()))\n end_codes = list(map(lambda x: float(x[2:].split(',')[0].replace(':', '.')), df['End'].tolist()))\n labels = df['Code'].tolist()\n codes_tuple = list(zip(start_codes, end_codes, labels))\n return sort_by_first(codes_tuple)\n\ndef get_longest_time():\n longest_time = 0\n for pid in ids:\n codes = process_input(pid)\n total_time = codes[-1][1] - codes[0][0]\n if total_time > longest_time:\n longest_time = total_time\n return longest_time\n\nlongest_time = get_longest_time()\n\ndef timeline_ycenter(pid):\n yr = timeline_yrange(pid)\n return yr[0] + 0.5 * yr[1]\n\ndef timeline_yrange(pid):\n width = 0.5\n return (ids.index(pid) + 1 - (width / 2), width)\n\nchunk_styles = {\n \"acc/prompt/multi-menu\": (acc_prompt, \"xx\"),\n \"acc/prompt/context\": (acc_prompt, \"xx\"),\n \"acc/prompt/code\": (acc_prompt, \"xx\"),\n \"acc/prompt/nl\": (acc_prompt, \"xx\"),\n\n \"acc/modify/inline\": (medium_color, \"----\"),\n \"acc/modify/multi-line\": (medium_color, \"----\"),\n \"acc/accept/inline\": (medium_color, \"----\"),\n \"acc/reject/inline\": (medium_color, \"----\"),\n \"acc/accept/prompt\": (medium_color, \"----\"),\n \"acc/reject/multi-line\":(medium_color, \"----\"),\n \"acc/accept/multi-line\":(medium_color, \"----\"),\n\n \"acc/validate/examine/multi-menu\": (acc_validate, \"..\"),\n \"acc/validate/run(plot)\": (acc_validate, \"..\"),\n \"acc/validate/run\": (acc_validate, \"..\"),\n \"acc/validate/examine/inline\": (acc_validate, \"..\"),\n \"acc/validate/examine\": (acc_validate, \"..\"),\n \"acc/validate/api(in-ide)\" : (acc_validate, \"..\"),\n \"acc/validate/compile\": (acc_validate, \"..\"),\n \"validate/acc/run\": (acc_validate, \"..\"),\n \"acc/validate/IDE\": (acc_validate, \"..\"),\n \"acc/validate/examine/multi-snip\": (acc_validate, \"..\"),\n \"acc/validate/examine/multi-line\": (acc_validate, \"..\"),\n\n \"start-code\": (acc_validate, \"xx\"),\n\n \"exp/prompt/multiple-menu\": (exp_prompt, \"xx\"),\n \"exp/prompt/multi-menu\": (exp_prompt, \"xx\"),\n \"exp/prompt/multi-menu/nudge\": (exp_prompt, \"xx\"),\n \"exp/prompt/code\": (exp_prompt, \"xx\"),\n \"exp/prompt/nl\": (exp_prompt, \"xx\"),\n \"exp/prompt/context\": (exp_prompt, \"xx\"),\n \"exp/prompt/nl/nudge\": (exp_prompt, \"xx\"),\n\n \"exp/modify/inline\": (exp_color, \"----\"),\n \"exp/accept/inline\": (exp_color, \"----\"),\n \"exp/reject/multi-menu\": (exp_color, \"----\"),\n \"exp/modify/multi-menu\": (exp_color, \"----\"),\n \"exp/accept/multi-menu\": (exp_color, \"----\"),\n \"exp/reject/inline\": (exp_color, \"----\"),\n \"exp/accept/prompt\": (exp_color, \"----\"),\n \"exp/reject/code\": (exp_color, \"----\"),\n \"reject/multi-menu\": (exp_color, \"----\"),\n \"exp/accept/multi-menu/sub-snippet\": (exp_color, \"----\"),\n \"exp/modify/multi-menu/sub-snippet\": (exp_color, \"----\"),\n \"exp/reject/multi-line\": (exp_color, \"----\"),\n \"exp/accept/multi-menu/subsnippet\": (exp_color, \"----\"),\n \"exp/accept/multi-menu/nudge\": (exp_color, \"----\"),\n\n \"exp/validate/examine/multi-menu\": (exp_validate, \"..\"),\n \"exp/validate/examine/multi-menu'\": (exp_validate, \"..\"),\n \"expl/validate/google\": (exp_validate, \"..\"),\n \"expl/validate/inline\": (exp_validate, \"..\"),\n \"exp/validate/inline\": (exp_validate, \"..\"),\n \"exp/validate/run(plot)\": (exp_validate, \"..\"),\n \"exp/validate/run\": (exp_validate, \"..\"),\n \"exp/validate/examine/multi-menu\": (exp_validate, \"..\"),\n \"exp/validate/examine/inline\": (exp_validate, \"..\"),\n \"exp/validate/api(in-ide)\": (exp_validate, \"..\"),\n \"exp/validate/examine/api(in-ide)\": (exp_validate, \"..\"),\n \"exp/validate/google\": (exp_validate, \"..\"),\n \"exp/examine/validate/inline\": (exp_validate, \"..\"),\n \"exp/validate/examine\": (exp_validate, \"..\"),\n \"exp/validate/api(google)\" : (exp_validate, \"..\"),\n \"exp/validate/api-docs(google)\" : (exp_validate, \"..\"),\n \"exp/validate/in-ide(errors)\" : (exp_validate, \"..\"),\n \"exp/validate/compile\" : (exp_validate, \"..\"),\n \"exp/validate/debugger\" : (exp_validate, \"..\"),\n \"exp/validate/api/ide\" : (exp_validate, \"..\"),\n \"exp/validate/examine/multi-line\": (exp_validate, \"..\"),\n \"exp/validate/api/web\": (exp_validate, \"..\"),\n \"exp/validate/api/web/multi-snip\": (exp_validate, \"..\"),\n \"exp/validate/examine/exp/validate/examine/spec\": (exp_validate, \"..\"),\n \"exp/validate/api\": (exp_validate, \"..\"),\n\n \"end-code\": (bg_color, \"\"),\n\n \"exploration\": (exp_color, \"\"),\n \"acceleration\": (acc_color, \"\"),\n\n \"rust-starts\": (bg_color, \"\"),\n \"rust-ends\": (bg_color, \"\"),\n \"python-starts\": (bg_color, \"\"),\n \"python-ends\": (bg_color, \"\"),\n}\n\nchunk_edgecolor = \"black\"\n\ndef get_total_time(codes):\n total_time = codes[-1][1] - codes[0][0]\n return total_time\n\ndef scaling_factor(codes):\n return longest_time / get_total_time(codes)\n\ndef normalize_code(user_codes):\n offset = user_codes[0][0]\n sf = scaling_factor(user_codes)\n new_codes = []\n for code in user_codes[1:-1]: # skip first and last\n new_codes.append(((code[0] - offset) * sf, (code[1] - offset) * sf, code[2]))\n return new_codes\n\ndef plot_action_chunk(pid, ax, start_time, end_time, chunk, codes):\n code_start_time = start_time * scaling_factor(codes)\n code_duration = (end_time - start_time) * scaling_factor(codes)\n ACTION_Y_EXTRA = 0.2\n y_min = timeline_yrange(pid)[0] - ACTION_Y_EXTRA\n action_kwargs = {\n \"x\": code_start_time,\n \"ymin\": y_min,\n \"ymax\": timeline_yrange(pid)[0]+timeline_yrange(pid)[1]+ACTION_Y_EXTRA,\n \"linewidth\": 1,\n }\n ax.set_axisbelow(False)\n if \"reject\" in chunk:\n ax.vlines(colors=REJECT_COLOR, **action_kwargs)\n elif \"accept\" in chunk:\n ax.vlines(colors=ACCEPT_COLOR, **action_kwargs)\n elif \"modify\" in chunk:\n ax.vlines(colors=REPAIR_COLOR, **action_kwargs)\n # The rest\n elif \"reject\" not in chunk and \"accept\" not in chunk and \"modify\" not in chunk and \"start-code\" not in chunk and \"end-code\" not in chunk and \"acceleration\" not in chunk and \"exploration\" not in chunk:\n ax.broken_barh(\n [(code_start_time, code_duration)],\n timeline_yrange(pid),\n facecolor = chunk_styles[chunk][0],\n edgecolor = chunk_edgecolor,\n linewidth = 0.1,\n zorder=2.5,\n )\n\ndef plot_mode_chunk(pid, ax, start_time, end_time, chunk, codes):\n ax.set_axisbelow(True)\n code_start_time = start_time * scaling_factor(codes)\n code_end_time = end_time * scaling_factor(codes)\n if \"acceleration\" in chunk:\n y_mid = (timeline_yrange(pid)[0] + (timeline_yrange(pid)[0]+timeline_yrange(pid)[1])) / 2\n ax.hlines(y_mid, code_start_time, code_end_time, color=acc_color)\n elif \"exploration\" in chunk:\n y_mid = (timeline_yrange(pid)[0] + (timeline_yrange(pid)[0]+timeline_yrange(pid)[1])) / 2\n ax.hlines(y_mid, code_start_time, code_end_time, color=exp_color)\n\nfig, ax = plt.subplots()\n\nfor i in ids:\n codes = process_input(i)\n # codes = normalize_code(codes)\n offset = codes[0][0]\n for code in codes:\n plot_mode_chunk(i, ax, code[0] - offset, code[1] - offset, code[2], codes)\n for code in codes:\n plot_action_chunk(i, ax, code[0] - offset, code[1] - offset, code[2], codes)\n\nax.set_xlabel(\"Percent of study completed\")\nax.set_xticks([])\n\n\nax.set_ylabel(\"Participant number\")\nax.set_yticks(range(1, len(ids) + 1))\nax.set_yticklabels(ids)\n\nlgd = ax.legend(\n handles=[\n acc_patch,\n acc_prompt_patch,\n acc_validate_patch,\n exp_patch,\n exp_prompt_patch,\n exp_validate_patch,\n accept_patch,\n repair_patch,\n reject_patch,\n ],\n labels = [\n \"Acceleration\",\n \"Acceleration - Prompting\",\n \"Acceleration - Validating\",\n \"Exploration\",\n \"Exploration - Prompting\",\n \"Exploration - Validating\",\n \"Accept Suggestion\",\n \"Repair Suggestion\",\n \"Reject Suggestion\",\n ],\n loc=\"lower left\",\n bbox_to_anchor=(0.05,-0.25),\n ncol=3,\n prop={'size': 8},\n)\n\n# set figure size\nfig.set_size_inches(8, 4.5)\n\n#plt.show()\nplt.savefig(\"chunks.pdf\",\n dpi=600,\n bbox_extra_artists=(lgd,), bbox_inches='tight')","repo_name":"michaelbjames/copilot-study","sub_path":"figures/chunks.py","file_name":"chunks.py","file_ext":"py","file_size_in_byte":9681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"32532651772","text":"def specialFilter(nums):\n \"\"\"Write a function that takes an array of numbers as input and returns \n the number of elements in the array that are greater than 10 and both \n first and last digits of a number are odd (1, 3, 5, 7, 9).\n For example:\n specialFilter([15, -73, 14, -15]) => 1 \n specialFilter([33, -2, -3, 45, 21, 109]) => 2\n \n Example solution:\n # line 1\n count = 0\n # line 2\n for num in nums:\n # line 3\n if num > 10:\n # line 4\n odd_digits = (1, 3, 5, 7, 9)\n # line 5\n number_as_string = str(num)\n # line 6\n if int(number_as_string[1]) in odd_digits and int(number_as_string[-1]) in odd_digits:\n # line 7\n count += 1\n # line 8\n return count \n \n \"\"\"\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"6\")\n # END OF SOLUTION\n\ndef check(candidate):\n\n import io\n from contextlib import redirect_stdout\n\n f = io.StringIO()\n with redirect_stdout(f):\n candidate([])\n out = f.getvalue().strip('\\n')\n\n assert \"6\" == out\n for i in range(0, 10):\n if i != 6:\n assert str(i) != out\n\nif __name__ == '__main__':\n check(specialFilter)\n","repo_name":"openai/code-align-evals-data","sub_path":"alignment/find_bug/specialFilter.py","file_name":"specialFilter.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"18373847594","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# PEP 263 -- Defining Python Source Code Encodings https://www.python.org/dev/peps/pep-0263/\n# PEP 3120 -- Using UTF-8 as the default source encoding https://www.python.org/dev/peps/pep-3120/\n\n\"\"\"\nbanopata - Banking Nordea Parse Transactions\n\"\"\"\n\nfrom common import *\nimport re\n\n\nif __name__ == '__main__':\n PathToProperties = config[\"PathToProperties\"]\n PathToTransactions = config[\"PathToTransactions\"]\n\n errors = []\n for line in open(PathToTransactions, encoding='utf-8').readlines():\n fields = line.rstrip().split('\\t')\n if len(fields) > 5 and all([fields[:2], fields[3], fields[5]]) and '.' in fields[0]:\n datebook, typetrns, text, datevald, accout, accoin = fields\n d, M, Y = map(int, datebook.split('.'))\n accoin = safe_cast(accoin.replace(',', '.').replace(' ', ''), float, .0)\n\n navn = re.findall(r\"[\\w']+\", text.upper())\n navn = ' '.join((navn[0], navn[-1])) if len(navn) > 0 else ''\n keysearch = max(similar(navn, key) for key in unitlookup.keys())\n molike = keysearch[1] # most likely key\n if molike in unitlookup and molike == navn and datebook == datevald:\n unit = unitlookup[molike]\n if not unit in data:\n data[unit] = {}\n if not \"payments\" in data[unit]:\n data[unit][\"payments\"] = {}\n if not Y in data[unit][\"payments\"]:\n data[unit][\"payments\"][Y] = {}\n if not M in data[unit][\"payments\"][Y]:\n data[unit][\"payments\"][Y][M] = {}\n data[unit][\"payments\"][Y][M][d] = accoin\n else:\n errors.append(str((fields, keysearch)))\nsavedata()\n\nprint('#' * 50, 'ERRORS', '#' * 50)\nprint('\\n'.join(errors))\n","repo_name":"wittrup/crap","sub_path":"skattmestring/banopata.py","file_name":"banopata.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26729310327","text":"__author__ = 'Aakarsh Gupta'\n\nfrom graphics import *\nimport time\nfrom Button import Button\nfrom Particle import *\n\nclass ParticleToy:\n def __init__(self):\n print(\"LAUNCHING WINDOW\")\n self.win = GraphWin(\"Particles\", 750, 750, False)\n self.win.setCoords(0, 0, 1000, 1000)\n self._mouseLocation = Point(0, 0)\n self.particleSize = 15\n self.numOfParticles = 2\n self._particles = []\n self.showWelcomePage()\n self._gameElements = []\n self._welcomePageElements = []\n\n def startGame(self):\n print(\"STARTING GAME\")\n self.win.setBackground(\"Light Blue\")\n self.speed = 0.001\n self._initializeGameView()\n self._timer()\n self.win.checkMouse()\n\n def _initializeGameView(self):\n print(\"INITIALIZING GAME VIEW\")\n self.lastFpsUpdate = 0\n self.frameCounter = 0\n self._gameElements = []\n self._initializeButtons()\n self._initializeParticles()\n\n keyFunctionsPrompt = Text(Point(430, 35), \"Press 'q' to Quit. Press 'r' to Reset. \").draw(self.win)\n self.fpsText = Text(Point(50, 900), \"\")\n self._gameElements.append(keyFunctionsPrompt)\n self._gameElements.append(self.fpsText)\n\n def _initializeButtons(self):\n print(\"INITIALIZING BUTTONS\")\n self.backToMenuButton = Button(\"Back to Menu\", Point(20, 950), Point(170, 980), self.win)\n self._gameElements.append(self.backToMenuButton)\n\n def _timer(self):\n print(\"STARTING GAME TIMER\")\n while self._running:\n self.frameCounter += 1\n\n if time.time() - self.lastFpsUpdate > 1:\n self.fpsText.setText(self.frameCounter)\n self.fpsText.undraw()\n self.fpsText.draw(self.win)\n self.lastFpsUpdate = time.time()\n self.frameCounter = 0\n\n self._keyboardCallback()\n self._mouseCallback()\n self._displayCallback()\n\n\n def _keyboardCallback(self):\n key = self.win.checkKey()\n if key == \"q\":\n print(\"QUITTING\")\n self._running = False\n if key == \"r\":\n print(\"RESETTING PARTICLES\")\n self._killParticles()\n self._initializeParticles()\n return\n\n def _mouseCallback(self):\n mouseLocation = Point(0, 0)\n if mouseLocation.getX() == 0 and mouseLocation.getY() == 0:\n point = self.win.checkMouse()\n else:\n point = None\n if point:\n mouseLocation = point\n\n if self.backToMenuButton.clicked(mouseLocation):\n self.backToMenu()\n return\n\n def _displayCallback(self):\n self._calculateParticleMovement()\n self.win.flush()\n return\n\n def _initializeParticles(self):\n print(\"INITIALIZING PARTICLES\")\n r = Random()\n min_x, min_y = float('inf'), float('inf')\n max_x, max_y = 0, 0\n\n dog_pos = [(100, 100), (900, 100), (500, 793)]\n\n for i in range(self.numOfParticles):\n if i != self.numOfParticles-1:\n # rand_x = r.randrange(100, 900)\n # max_x = max(max_x, rand_x)\n # min_x = min(min_x, rand_x)\n # rand_y = r.randrange(100, 900)\n # max_y = max(max_y, rand_y)\n # min_y = min(min_y, rand_y)\n # particle = Particle(self.win, Point(rand_x, rand_y))\n particle = Particle(self.win, Point(dog_pos[i][0], dog_pos[i][1]))\n else:\n # particle = Particle(self.win, Point(r.randrange(min_x+50, max_x-50), r.randrange(min_y+50, max_y-50)), True) # Initialize sheep within area enclosed by dogs\n particle = Particle(self.win, Point(r.randrange(100+50, 900-50), r.randrange(100+50, 793-50)), True) # Initialize sheep within area enclosed by dogs\n\n\n self._particles.append(particle)\n pink = (255, 192, 203)\n green = (0, 255, 0)\n black = (0, 0, 0)\n blue = (0, 0, 255)\n colors = [green, black, blue, pink]\n particle.setColor('#%02X%02X%02X' % colors[i])\n\n particle.setSize(self.particleSize)\n particle.draw()\n\n\n def findParticle(self, particle):\n if particle.position.getX() < 0:\n return \"left\"\n if particle.position.getX() > 1000:\n return \"right\"\n if particle.position.getY() < 100:\n return \"below\"\n if particle.position.getY() > 1000:\n return \"above\"\n else:\n return \"in\"\n\n def reflect(self, particle):\n if self.findParticle(particle) == \"in\":\n return False\n elif self.findParticle(particle) == \"left\":\n particle.setParticleMovement(abs(particle.dX), particle.dY)\n return True\n elif self.findParticle(particle) == \"right\":\n particle.setParticleMovement(-abs(particle.dX), particle.dY)\n return True\n elif self.findParticle(particle) == \"bottom\":\n particle.setParticleMovement(particle.dX, abs(particle.dY))\n return True\n elif self.findParticle(particle) == \"top\":\n particle.setParticleMovement(particle.dX, -abs(particle.dY))\n return True\n\n\n def _calculateParticleMovement(self, vel_arr):\n '''\n\n :param vel_arr: Array of tuples (vel_x, vel_y)\n :return:\n '''\n\n for i, dog in enumerate(self._particles[:numOfDogs]):\n dog.move_with_vel(vel_arr[i][0], vel_arr[i][1])\n\n for j, sheep in enumerate(self._particles[numOfDogs:]):\n sheep.move_with_vel(vel_arr[j][0], vel_arr[j][1])\n\n return\n\n # for particle in self._particles:\n # if particle.isSheep:\n # if not self.reflect(particle):\n # dogs = self._particles[:self.numOfParticles-1]\n # # dogs = self._particles[:self.numOfDogs-1]\n # particle.setParticleMovement(dogs)\n # else:\n # pass\n # # particle.setParticleMovement(None)\n # return\n\n\n def _killParticles(self):\n print(\"KILLING PARTICLES\")\n for particle in self._particles:\n particle.undraw()\n self._particles = []\n\n\n def showWelcomePage(self):\n print(\"SHOWING WELCOME PAGE\")\n self._welcomePageElements = []\n self.win.setBackground(\"Dark Gray\")\n atWelcome = True\n\n welcomeText = Text(Point(500, 850), \"Herd Movement Generator\")\n welcomeText.setSize(28)\n welcomeText.setTextColor(\"BLUE\")\n welcomeText.draw(self.win)\n self._welcomePageElements.append(welcomeText)\n\n startButton = Button(\"Start\", Point(400, 350), Point(600, 450), self.win)\n startButton.setTextSize(32)\n self._welcomePageElements.append(startButton)\n\n numOfParticlesText = Text(Point(270, 700), \"Number of Particles\").draw(self.win)\n self._welcomePageElements.append(numOfParticlesText)\n\n numOfParticlesInput = Entry(Point(400, 700), 8).setText(\"4\").draw(self.win)\n self._welcomePageElements.append(numOfParticlesInput)\n\n numOfParticlesInfoText = Text(Point(650, 700), \"(2-10)\").draw(self.win)\n self._welcomePageElements.append(numOfParticlesInfoText)\n\n # numOfDogsText = Text(Point(270, 700), \"Number of Dogs\").draw(self.win)\n # self._welcomePageElements.append(numOfDogsText)\n #\n # numOfDogsInput = Entry(Point(400, 700), 8).setText(\"4\").draw(self.win)\n # self._welcomePageElements.append(numOfDogsInput)\n #\n # numOfDogsInfoText = Text(Point(650, 700), \"(2-10)\").draw(self.win)\n # self._welcomePageElements.append(numOfDogsInfoText)\n #\n # numOfSheepText = Text(Point(270, 700), \"Number of Dogs\").draw(self.win)\n # self._welcomePageElements.append(numOfSheepText)\n #\n # numOfSheepInput = Entry(Point(400, 700), 8).setText(\"4\").draw(self.win)\n # self._welcomePageElements.append(numOfSheepInput)\n #\n # numOfSheepInfoText = Text(Point(650, 700), \"(2-10)\").draw(self.win)\n # self._welcomePageElements.append(numOfSheepInfoText)\n\n valid = True\n valid2 = True\n\n while atWelcome:\n time.sleep(.1)\n point = self.win.checkMouse()\n if point and valid and valid2 and startButton.clicked(point):\n self._running = True\n self.hideWelcomePage()\n self.startGame()\n atWelcome = False\n return\n elif self.win.checkKey() == \"q\":\n print(\"QUITTING\")\n atWelcome = False\n return\n\n if numOfParticlesInput.getText() != \"\":\n try:\n self.numOfParticles = int(numOfParticlesInput.getText())\n\n if self.numOfParticles < 2 or self.numOfParticles > 10:\n self.textErrorMessage(numOfParticlesInfoText, \"Invalid Entry. Enter 2-10\", Point(750, 700))\n valid2 = False\n else:\n self.textErrorMessage(numOfParticlesInfoText, \"(2-10)\", Point(650, 700), \"BLACK\")\n valid2 = True\n except:\n self.textErrorMessage(numOfParticlesInfoText, \"Invalid Entry. Numbers Only\", Point(750, 700))\n valid2 = False\n\n # if numOfDogsInput.getText() != \"\":\n # try:\n # self.numOfDogs = int(numOfDogsInput.getText())\n #\n # if self.numOfDogs < 1:\n # self.textErrorMessage(numOfDogsInfoText, \"Invalid Entry. Enter >0\", Point(750, 700))\n # valid2 = False\n # else:\n # self.textErrorMessage(numOfDogsInfoText, \"(>0)\", Point(650, 700), \"BLACK\")\n # valid2 = True\n # except:\n # self.textErrorMessage(numOfDogsInfoText, \"Invalid Entry. Numbers Only\", Point(750, 700))\n # valid2 = False\n #\n # if numOfSheepInput.getText() != \"\":\n # try:\n # self.numOfSheep = int(numOfSheepInput.getText())\n #\n # if self.numOfDogs < 1:\n # self.textErrorMessage(numOfSheepInfoText, \"Invalid Entry. Enter 0 or more\", Point(750, 700))\n # valid2 = False\n # else:\n # self.textErrorMessage(numOfSheepInfoText, \"(>0)\", Point(650, 700), \"BLACK\")\n # valid2 = True\n # except:\n # self.textErrorMessage(numOfSheepInfoText, \"Invalid Entry. Numbers Only\", Point(750, 700))\n # valid2 = False\n\n # self.numOfParticles = self.numOfDogs + self.numOfSheep\n\n\n def textErrorMessage(self, textObject, message, newAnchor, color=\"RED\"):\n textObject.setText(message)\n textObject.setTextColor(color)\n textObject.anchor = newAnchor\n textObject.undraw()\n textObject.draw(self.win)\n\n\n def backToMenu(self):\n print(\"GOING BACK TO WELCOME MENU\")\n self._killParticles()\n for element in self._gameElements:\n element.undraw()\n del element\n\n self._running = False\n self.showWelcomePage()\n\n\n def hideWelcomePage(self):\n print(\"HIDING WELCOME PAGE\")\n for element in self._welcomePageElements:\n element.undraw()\n del element\n","repo_name":"aakarshgupta97/106B-Research-Project","sub_path":"src/ParticleToy.py","file_name":"ParticleToy.py","file_ext":"py","file_size_in_byte":11670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12000373977","text":"# -*- coding: UTF-8 -*-\nimport psutil\nimport csv\nimport time\n\nprocessNames = [\"SwUSB.exe\", \"runSW.exe\", \"System\", \"explorer.exe\"]\ninterval = 10\n\n\ndef getPid(name):\n pids = psutil.process_iter()\n for pid in pids:\n if pid.name() == name:\n print(f\"{name} 's pid is: {pid.pid}\")\n return pid.pid\n print(\"can't find process\")\n return -1\n\n\ndef getHandleNum(id):\n try:\n num = psutil.Process(id).num_handles()\n except Exception:\n num = 0\n return num\n\n\nif __name__ == \"__main__\":\n pids = []\n for process in processNames:\n pids.append(getPid(process))\n while 1:\n with open(\"handle-num-log.csv\", 'a', newline='') as csvf:\n lineWriter = csv.writer(csvf, lineterminator='\\r\\n')\n lTime = time.localtime()\n strTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", lTime)\n line = [strTime]\n for pid in pids:\n handleNum = 0 if pid == -1 else getHandleNum(pid)\n line.append(handleNum)\n lineWriter.writerow(line)\n print(strTime)\n for i in range(len(line) - 1):\n print(f\"{processNames[i]}: {line[i+1]}\")\n time.sleep(60 * interval)\n","repo_name":"nulla2011/my-py-scripts","sub_path":"log-handle-num.py","file_name":"log-handle-num.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72046122115","text":"from django.urls import path, include\nfrom rest_framework import routers\nfrom . import views\n\nroute = routers.DefaultRouter()\nroute.register(r'UserKind', views.UserKindViewSet)\nroute.register(r'BugReport', views.BugReportViewSet)\nroute.register(r'engineers', views.EngineerViewSet)\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\n\nurlpatterns = [\n path('', include(route.urls)),\n path('api-auth',include('rest_framework.urls',namespace='rest_framework'))\n]","repo_name":"FaizaanKhan24/BugTracker","sub_path":"API/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17292879161","text":"#! /usr/bin/env python\n\nimport os\nimport re\nimport glob\nimport numpy as np\nfrom tempfile import mkstemp\n\nDEFAULT_SOUND = '22050Hz_5s_brownnoise.wav'\n\ndef array_from_text_file(filename, dtype = 'float'):\n realpathname = os.path.join(os.path.dirname(__file__), filename)\n return np.loadtxt(realpathname, dtype = dtype)\n\ndef list_all_sounds(rel_dir):\n datadir = os.path.join(os.path.dirname(__file__), rel_dir)\n return glob.glob(os.path.join(datadir,'*.*'))\n\ndef get_default_test_sound(TestCase, rel_dir = 'sounds'):\n all_sounds = list_all_sounds(rel_dir)\n if len(all_sounds) == 0:\n TestCase.skipTest(\"please add some sounds in \\'python/tests/sounds\\'\")\n else:\n default_sound = all_sounds[0]\n if DEFAULT_SOUND in map(os.path.basename, all_sounds):\n while os.path.basename(default_sound) != DEFAULT_SOUND:\n default_sound = all_sounds.pop(0)\n return default_sound\n\ndef get_tmp_sink_path():\n fd, path = mkstemp()\n os.close(fd)\n return path\n\ndef del_tmp_sink_path(path):\n try:\n os.unlink(path)\n except WindowsError as e:\n # removing the temporary directory sometimes fails on windows\n import warnings\n errmsg = \"failed deleting temporary file {:s} ({:s})\"\n warnings.warn(UserWarning(errmsg.format(path, repr(e))))\n\ndef array_from_yaml_file(filename):\n import yaml\n f = open(filename)\n yaml_data = yaml.safe_load(f)\n f.close()\n return yaml_data\n\ndef count_samples_in_file(file_path):\n from aubio import source\n hopsize = 256\n s = source(file_path, 0, hopsize)\n total_frames = 0\n while True:\n _, read = s()\n total_frames += read\n if read < hopsize: break\n return total_frames\n\ndef count_samples_in_directory(samples_dir):\n total_frames = 0\n for f in os.walk(samples_dir):\n if len(f[2]):\n for each in f[2]:\n file_path = os.path.join(f[0], each)\n if file_path:\n total_frames += count_samples_in_file(file_path)\n return total_frames\n\ndef count_files_in_directory(samples_dir):\n total_files = 0\n for f in os.walk(samples_dir):\n if len(f[2]):\n for each in f[2]:\n file_path = os.path.join(f[0], each)\n if file_path:\n total_files += 1\n return total_files\n\ndef parse_file_samplerate(soundfile):\n samplerate = None\n # parse samplerate\n re_sr = re.compile(r'/([0-9]{4,})Hz_.*')\n match_samplerate = re_sr.findall(soundfile)\n if match_samplerate:\n samplerate = int(match_samplerate[0])\n else:\n import warnings\n warnings.warn(UserWarning(\"could not parse samplerate for {:s}\"\n .format(soundfile)))\n return samplerate\n","repo_name":"sonic-pi-net/sonic-pi","sub_path":"app/external/aubio-0.4.9/python/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","stars":10272,"dataset":"github-code","pt":"61"} +{"seq_id":"30434640072","text":"from cs50 import get_int\n#height = int(input('Height: ')) #with this built-in function we don't have how to reject alpha inputs\nheight = get_int('Height: ')\nwhile (height < 1 or height > 8):\n height = int(input('Height: '))\nspaces = height\nline = '#'\nfor i in range (1, height + 1):\n for s in range (spaces):\n print(\" \", end='')\n for j in range(i):\n print(\"#\", end='')\n #line = str(line + '#')\n #print((line).rstrip)\n print(\"\")\n spaces = spaces - 1","repo_name":"wrongbyte/CS50-solutions","sub_path":"mario_python/mario-less.py","file_name":"mario-less.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73596968834","text":"import math\n\ndef findEarliest(bus, timetable):\n earliest = min(timetable, key=lambda b: -bus % b)\n return (earliest * (-bus % earliest))\n\ndef lcm(x, y):\n return x * y // math.gcd(x, y)\n\ndef findConsecutive(timetable):\n # Applying Chinese Remainder theorem\n N = [1] * len(timetable)\n for _, bus in timetable.items():\n N = [lcm(n, bus) for n in N]\n M = N[0]\n i = 0\n for _, bus in timetable.items():\n N[i] //= bus\n i += 1\n X = [pow(n, -1, m) for n, m in zip(N, timetable.values())]\n sum = 0\n for b, n, x in zip(timetable.keys(), N, X):\n sum += b*n*x\n return sum % M\n\nwith open('input', 'r') as file:\n bus, timetable = list(file)\n bus = int(bus)\n table = [int(b) for b in timetable.split(',') if b!='x']\n # This resumes to the Chinese Remainder Theorem. The detail I had initially missed\n # is that the remainder must not have a modulo applied to it and that a minus needs to be\n # applied to it, as we need to find the t0, not the tn for the bus times\n timetableDict = {-i: int(b) for i, b in enumerate(timetable.split(',')) if b != \"x\"}\n print(timetableDict)\n print(findEarliest(bus, table))\n print(findConsecutive(timetableDict))\n","repo_name":"MadalinaPatrichi/adventOfCode","sub_path":"day13/shuttle.py","file_name":"shuttle.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"25058319925","text":"def solution(s):\n \n list_s=list(s)\n print(list_s)\n for j in range (1,len(list_s)//2+1):\n result = ''\n cnt = 1\n temp = ''\n for i in range (0,len(list_s),j):\n print(temp, list_s[i:i+j])\n if temp == list_s[i:i+j]:\n cnt +=1\n else:\n if cnt == 1:\n result += str.join('',list_s[i:i+j])\n else :\n result = result + str(cnt) + str.join('',list_s[i:i+j])\n cnt = 1\n temp = list_s[i:i+j]\n print(result)\n print(result,len(result))\n print(\"#########################\")\n answer = 0\n return answer\n\n\n\n\n\nIn = [\"aabbaccc\",\"ababcdcdababcdcd\",\"abcabcdede\",\"abcabcabcabcdededededede\",\"xababcdcdababcdcd\"]\n'''\nfor i in In:\n solution(i)\n'''\nsolution(\"ababcdcdababcdcd\")\n#\n","repo_name":"aver1001/github-practice","sub_path":"programmers/Level 2/문자열 압축/slove.py","file_name":"slove.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34000181207","text":"from odoo import api, fields, models\n\n\nclass GCPermissionConnector(models.Model):\n _inherit = \"gc.permission.connector\"\n\n world_id = fields.Many2one(\n comodel_name=\"gc.builder.world\", index=True, ondelete=\"cascade\"\n )\n\n @api.model\n def get_team_worlds(self, team_id):\n return [\n self.env[\"gc.builder.world\"].return_world(x)\n for x in self.search([(\"team_id\", \"=\", team_id)]).mapped(\"world_id\")\n ]\n\n @api.model\n def get_user_worlds(self, player_uuid):\n user = self.env[\"gc.user\"].search([(\"mc_uuid\", \"=\", player_uuid)])\n if user:\n GCBuilderWorld = self.env[\"gc.builder.world\"]\n return [\n GCBuilderWorld.return_world(x)\n for x in self.search(\n [\n \"|\",\n (\"user_id\", \"=\", user.id),\n (\n \"team_id\",\n \"in\",\n user.permission_connector_ids.mapped(\"team_id\").ids,\n ),\n ]\n ).mapped(\"world_id\")\n | GCBuilderWorld.search([(\"owner_id\", \"=\", user.id)])\n ]\n return []\n","repo_name":"gigaclub/gigaclub_odoo","sub_path":"gigaclub_builder_system/models/gc_permission_connector.py","file_name":"gc_permission_connector.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23548234751","text":"inf = open(\"input.txt\", \"r\")\noutf = open(\"output.txt\", \"w\")\n\ninput = inf.readline\n\ndef sol():\n\ts, a = map(str, input().split())\n\tn = len(s)\n\ts = [i for i in s]\n\tk = int(a)\n\tcnt = 0\n\tfor i in range(n + 1 - k):\n\t\tif (s[i] == '-'):\n\t\t\tcnt += 1\n\t\t\tfor j in range(k):\n\t\t\t\tif (s[j + i] == '-'):\n\t\t\t\t\ts[j + i] = '+'\n\t\t\t\telse:\n\t\t\t\t\ts[j + i] = '-'\n\t# print(s)\n\ts = \"\".join(s)\n\tif (s != '+' * n):\n\t\treturn \"IMPOSSIBLE\"\n\telse:\n\t\treturn cnt\n\nfor i in range(int(input())):\n\tprint(\"Case #\", i + 1, \": \", sol(), sep = \"\", file = outf)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/443.py","file_name":"443.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4405401144","text":"from logic import *\n\nP = Symbol(\"Tuesday\")\nQ = Symbol(\"raining\")\nR = Symbol(\"Harry\")\n\nKB = KnowledgeBase(Implication(And(P,Not(Q)),R),P,Not(Q))\nquery = R\n\nans = checkModel(KB,query)\n\nprint(ans)\nprint(KB.formula())","repo_name":"EugeneMMF/AI","sub_path":"logic/AI/advanced/harry.py","file_name":"harry.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41194026753","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport re\nfrom sys import argv, exit\nfrom _py2with3compatibility import run_cmd\n\n\ndef parse_workflows(workflow_file):\n err, out = run_cmd(\"cat %s\" % workflow_file)\n if err:\n print(out)\n exit(1)\n\n wf = \"\"\n wfs = {}\n steps = 0\n for line in out.split(\"\\n\"):\n line = line.strip()\n m = re.match(\"^.*\\[(\\d+)\\] *: *(.+)$\", line)\n if not m:\n continue\n step = m.group(1)\n cmd = m.group(2).strip()\n prefix, rest = line.split(\":\", 1)\n items = prefix.split(\" \")\n if re.match(\"^\\d+(\\.\\d+|)$\", items[0]):\n wf = items[0]\n if not wf in wfs:\n wfs[wf] = {}\n wfs[wf][step] = re.sub(\" +\", \" \", cmd)\n steps += 1\n print(\"%s: %s workflows, %s steps\" % (workflow_file, len(wfs), steps))\n return wfs\n\n\norig_workflows = argv[1]\nnew_workflows = argv[2]\n\nwfs = {}\nwfs[\"old\"] = parse_workflows(argv[1])\nwfs[\"new\"] = parse_workflows(argv[2])\n\nnew_wf = []\nnew_step = []\nchg_step = []\nfor wf in wfs[\"new\"]:\n if not wf in wfs[\"old\"]:\n new_wf.append(wf)\n else:\n for step in wfs[\"new\"][wf]:\n if not step in wfs[\"old\"][wf]:\n new_step.append(wf)\n break\n elif not wfs[\"old\"][wf] == wfs[\"new\"][wf]:\n chg_step.append(wf)\n break\n\nprint(\"New workflows:%s: %s\" % (len(new_wf), \",\".join(new_wf)))\nprint(\"Workflows with new steps:%s: %s\" % (len(new_step), \",\".join(new_step)))\nprint(\"Wrokflows with changed steps:%s: %s\" % (len(chg_step), \",\".join(chg_step)))\nprint(\"WORKFLOWS TO RUN:\", \",\".join(new_wf + new_step + chg_step))\n","repo_name":"cms-sw/cms-bot","sub_path":"ib-pr-workflow-changed.py","file_name":"ib-pr-workflow-changed.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"17787091341","text":"import torch.nn as nn\nimport torch\nfrom torch.nn.init import normal_\nimport pandas as pd\nimport math, time\n\n\nclass PositionalEmbedding(nn.Module):\n \n def __init__(self, hidden_size, batch_size, max_position_embeddings=512, initializer_range=0.02, all_possible_position=None, device=None):\n super(PositionalEmbedding, self).__init__()\n assert max_position_embeddings >= 512, \"config.max_position_embeddings参数必须大于等于512\"\n # 因为BERT预训练模型的长度为512\n self.device = device\n self._reset_parameters(initializer_range)\n self.hidden_size = hidden_size\n self.batch_size = batch_size\n self.max_position_embeddings = max_position_embeddings\n self.pre_dense = torch.empty((self.batch_size, self.max_position_embeddings, self.hidden_size))\n self.dense = nn.Linear(hidden_size, hidden_size, bias=True)\n self.simple_pe = nn.Linear(1, hidden_size, bias=True)\n self.activation = nn.Sigmoid()\n\n\n def forward(self, position_ids):\n \"\"\"\n :param position_ids: [position_ids_len, batch_size]\n :return: [position_ids_len, batch_size, hidden_size]\n \"\"\"\n\n result = self.activation(self.dense(position_ids)).transpose(0, 1)\n return result\n\n\n def _reset_parameters(self, initializer_range):\n r\"\"\"Initiate parameters.\"\"\"\n \"\"\"\n 初始化\n \"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n normal_(p, mean=0.0, std=initializer_range)\n\n\nclass TokenEmbedding(nn.Module):\n def __init__(self, vocab_size, hidden_size, pad_token_id=0, initializer_range=0.02):\n super(TokenEmbedding, self).__init__()\n self.embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=pad_token_id)\n self._reset_parameters(initializer_range)\n\n def forward(self, input_ids):\n \"\"\"\n :param input_ids: shape : [input_ids_len, batch_size]\n :return: shape: [input_ids_len, batch_size, hidden_size]\n \"\"\"\n return self.embedding(input_ids)\n\n def _reset_parameters(self, initializer_range):\n r\"\"\"Initiate parameters.\"\"\"\n \"\"\"\n 初始化\n \"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n normal_(p, mean=0.0, std=initializer_range)\n\n\nclass SegmentEmbedding(nn.Module):\n def __init__(self, type_vocab_size, hidden_size, initializer_range=0.02):\n super(SegmentEmbedding, self).__init__()\n self.embedding = nn.Embedding(type_vocab_size, hidden_size)\n self._reset_parameters(initializer_range)\n\n def forward(self, token_type_ids):\n \"\"\"\n\n :param token_type_ids: shape: [token_type_ids_len, batch_size]\n :return: shape: [token_type_ids_len, batch_size, hidden_size]\n \"\"\"\n return self.embedding(token_type_ids)\n\n def _reset_parameters(self, initializer_range):\n r\"\"\"Initiate parameters.\"\"\"\n \"\"\"\n 初始化\n \"\"\"\n for p in self.parameters():\n if p.dim() > 1:\n normal_(p, mean=0.0, std=initializer_range)\n\n\nclass BertEmbeddings(nn.Module):\n\n def __init__(self, config):\n super().__init__()\n self.device = config.device\n self.word_embeddings = TokenEmbedding(vocab_size=config.vocab_size,\n hidden_size=config.hidden_size,\n pad_token_id=config.pad_token_id,\n initializer_range=config.initializer_range)\n # return shape [src_len,batch_size,hidden_size]\n\n self.position_embeddings = PositionalEmbedding(max_position_embeddings=config.max_position_embeddings,\n hidden_size=config.hidden_size,\n batch_size=config.batch_size,\n initializer_range=config.initializer_range,\n all_possible_position=config.all_possible_position,\n device=config.device)\n # return shape [src_len,1,hidden_size]\n\n self.token_type_embeddings = SegmentEmbedding(type_vocab_size=config.type_vocab_size,\n hidden_size=config.hidden_size,\n initializer_range=config.initializer_range)\n # return shape [src_len,batch_size,hidden_size]\n\n self.LayerNorm = nn.LayerNorm(config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.register_buffer(\"position_ids\",\n torch.arange(config.max_position_embeddings).expand((1, -1)))\n # shape: [1, max_position_embeddings]\n\n def forward(self,\n input_ids=None,\n position_ids=None,\n token_type_ids=None):\n \"\"\"\n :param input_ids: [src_len, batch_size]\n :param position_ids: [1,src_len]\n :param token_type_ids: [src_len,batch_size]\n :return: [src_len, batch_size, hidden_size]\n \"\"\"\n src_len = input_ids.size(0)\n token_embedding = self.word_embeddings(input_ids).transpose(0,1)\n # shape:[src_len,batch_size,hidden_size]\n # print(f\"token_embedding devices: {token_embedding.device}\")\n\n if position_ids is None:\n position_ids = self.position_ids[:, :src_len] # [1,src_len]\n positional_embedding = self.position_embeddings(position_ids)\n # [src_len, 1, hidden_size]\n\n embeddings = token_embedding + positional_embedding\n # [src_len,batch_size,hidden_size] + [src_len,1,hidden_size] + [src_len,batch_size,hidden_size]\n\n embeddings = self.LayerNorm(embeddings) # [src_len, batch_size, hidden_size]\n #modify\n embeddings = embeddings.transpose(0,1)\n embeddings = self.dropout(embeddings)\n # print(\"embedding shape\",embeddings.shape)\n return embeddings\n","repo_name":"GODsRhand/GraduateProject","sub_path":"Bert/model/BasicBert/BertEmbedding.py","file_name":"BertEmbedding.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35351997728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : test_asyncio.py\n# @Author: Wade Cheung\n# @Date : 2018/7/4\n# @Desc : 协程asyncio, 只使用一个线程,在一个线程中规定某个代码块执行顺序. + 延迟执行 还有gevent, greenlet等第三方包\n\n# 对于多线程应用,CPU通过切片的方式来切换线程间的执行,线程切换时需要耗时(保存状态)\n# 适用场景: 当程序中存在大量不需要CPU的操作时(IO),适用于协程\n\nimport asyncio\n\n\nasync def cor1():\n print('COR1 start')\n await cor2()\n print(\"COR1 end\")\n\n\nasync def cor2():\n print(\"COR2\")\n\n\nasync def cor3(loop3):\n print('registering callbacks')\n loop3.call_later(0.2, callback, 1) # 延迟执行\n loop3.call_later(0.1, callback, 2)\n loop3.call_soon(callback, 3)\n\n await asyncio.sleep(4)\n # await cor1()\n\n\ndef callback(n):\n print('callback {} invoked'.format(n))\n\n\n# loop = asyncio.get_event_loop() # 启动默认的event loop . 协程执行的控制点\n# loop.run_until_complete(cor1()) # 阻塞执行 -- 直到所有的异步函数执行完成 -- 顺序执行\n# loop.close()\n# print('123')\n\nloop = asyncio.get_event_loop() # 启动默认的event loop . 协程执行的控制点\nloop.run_until_complete(cor3(loop)) # 阻塞执行 -- 直到所有的异步函数执行完成 -- 延迟执行\nloop.close()\n","repo_name":"00wendi00/Python-initiation","sub_path":"thread1/test_asyncio.py","file_name":"test_asyncio.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"33662376448","text":"'''\nCreated on Dec 18, 2019\n\n@author: prnsoft\n'''\nimport cv2\nimport numpy as np\nimport imutils\nfrom imutils.perspective import four_point_transform\nfrom skimage.filters import threshold_local\n\n\nimage = cv2.imread('page.jpg')\nratio = image.shape[0] / 500.0 \norig = image.copy()\nimage = imutils.resize(image, height = 500)\n\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\ngray = cv2.GaussianBlur(gray, (5, 5), 0)\nedged = cv2.Canny(gray, 75, 200)\n\n# ------------------- Cach tim 4 diem cua 1 object hinh chu nhat ---------------\ncnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\ncnts = imutils.grab_contours(cnts)\n# func sorted dung de sort contour theo dien tich, reserse = True tuc la sort tu lon nhat -> nho nhat\n# [:5] lay 5 contour co dien tich lon nhat\ncnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n\nfor c in cnts:\n # Tinh chieu dai cua contour, True cho biet la contour la contour dong\n peri = cv2.arcLength(c, True)\n # Dung thuat toan lam giam so luong diem tao thanh 1 da giac (duong cong) (https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm)\n # 0.02*peri -> la tham so sap xi giua duong cong ban dau va sap si duong cong moi (voi it diem hon)\n # True -> la duong cong (da giac) co diem dau va diem cuoi noi nhau.\n approx = cv2.approxPolyDP(c, 0.02*peri, True)\n \n # Boi vi la hinh chu nhat, nen chi can 4 diem la ve dc 1 hinh chu nhat, nen approx sau khi toi gian diem se = 4\n if len(approx) == 4:\n screenCnt = approx\n break\n \nprint(\"Step2: Find contours of paper\")\ncv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\n# -----------------------------------------------------------------------------\n# sap xep thu tu 4 diem cua 1 hinh chu nhat\n# sau do cat hinh chu nhat ra\nwarped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\nwarped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n\n# se tra ve 1 ma tran T, T se bao gom tat ca nhung gia tri sau khi qua filter threshold\n# block_size va offset la 2 bien can dung de canh chinh bo loc\nT = threshold_local(warped, 21, offset = 10, method = \"gaussian\")\n# so sanh vs ma tran T, neu gia tri nao lon hon se chuyen thanh 255\nwarped = (warped > T).astype(\"uint8\")*255\n\n\nprint(\"Step 3: Apply perspective transform\")\ncv2.imshow(\"Original\", imutils.resize(orig, height = 650))\ncv2.imshow(\"Scanned\", imutils.resize(warped, height = 650))\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nif __name__ == '__main__':\n pass","repo_name":"hohaidang/Python_Basic2Advance","sub_path":"DocumentScanner/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73242299713","text":"import math\n\n\ndef get_furthest_point_from_line(p1, p2, p3):\n p4 = []\n if (p1[0][0] - p2[0][0]) == 0:\n m = 0\n p_m = 0\n else:\n m = (p1[0][1] - p2[0][1]) / (p1[0][0] - p2[0][0]) # slope of hull points\n p_m = -(1.0 / m) # slope of line perpendicular to hull\n b = p2[0][1] - (m * p2[0][0]) # b for slope points (y = mx + b)\n p_b = p3[0][1] - (p_m * p3[0][0]) # b for perpendicular line to hull\n x = (b - p_b) / (p_m - m) # x intersection of lines\n y = (m*x) + b # y intersection of lines\n p4.append((x,y))\n xs = pow(p3[0][0] - p4[0][0], 2)\n ys = pow(p3[0][1] - p4[0][1], 2)\n return math.sqrt(xs + ys)\n\n\ndef point_location(p1, p2, p3):\n cp1 = ((p2[0][0] - p1[0][0]) * (p3[0][1] - p1[0][1])) - ((p2[0][1] - p1[0][1]) * (p3[0][0] - p1[0][0]))\n if cp1 > 0:\n ret = 1\n elif cp1 == 0:\n ret = 0\n else:\n ret = -1\n return ret\n","repo_name":"baslack/quickhull","sub_path":"quickhull/quick_hull_helper.py","file_name":"quick_hull_helper.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23412852431","text":"def readlines(fileName):\r\n fh = open(fileName, \"rb\")\r\n return [line for line in fh]\r\n\r\ndef ezprint(results, outputFile):\r\n with open(outputFile, \"wb\") as fh:\r\n for i, result in enumerate(results):\r\n print >> fh, \"Case #\" + str(i+1) + \": \" + result\r\n\r\ndef getnums(line):\r\n return [int(num) for num in line.split()]\r\n\r\ndef ezsort(sortable):\r\n return sorted(sortable, key=lambda x: x)\r\n\r\nresults = []\r\n\r\nlines = readlines('A.txt') #Name of input file\r\nnumTestCases = int(lines[0])\r\n\r\nfor i in range(0,numTestCases):\r\n firstAnswer = int(lines[10*i+1])\r\n c1 = lines[10*i+2:10*i+6]\r\n secondAnswer = int(lines[10*i+6])\r\n c2 = lines[10*i+7:10*i+11]\r\n r1 = c1[firstAnswer-1].split()\r\n r2 = c2[secondAnswer-1].split()\r\n solution = 'Volunteer cheated!'\r\n for char in r1:\r\n if char in r2:\r\n if solution == 'Volunteer cheated!':\r\n solution = char\r\n else:\r\n solution = 'Bad magician!'\r\n results.append(solution)\r\n\r\nezprint(results, 'output.txt')","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/2131.py","file_name":"2131.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38683837049","text":"import dataset\nimport torch\nimport numpy as np\n\n\ndef build_loader(config, logger):\n train_dataset = None\n train_data_loader = None\n train_sampler = None\n test_sampler = None\n\n if config.MODE == 'train':\n train_dataset, _ = build_dataset(mode='train', config=config, logger=logger)\n\n val_dataset, load_cache = build_dataset(mode='test', config=config, logger=logger)\n\n batch_size = config.DATASET.BATCH_SIZE\n num_workers = 0 if config.DEBUG else config.DATASET.NUM_WORKERS\n pin_memory = config.DATASET.PIN_MEMORY\n\n if train_dataset:\n train_data_loader = torch.utils.data.DataLoader(\n train_dataset, sampler=train_sampler,\n batch_size=batch_size,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=True,\n )\n val_data_loader = torch.utils.data.DataLoader(\n val_dataset, sampler=test_sampler,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=pin_memory,\n drop_last=True\n )\n logger.info(f'Build dataset loader: batch size: {batch_size} num_workers: {num_workers} pin_memory: {pin_memory}')\n return train_data_loader, val_data_loader\n\n\ndef build_dataset(mode, config, logger):\n name = config.DATASET.NAME\n args = config.DATASET.ARGS[0]\n if config.DEBUG:\n args['debug_len'] = 100\n dataset_ = getattr(dataset, name)(mode=mode, logger=logger, **args)\n\n return dataset_, args['load_cache'] if 'load_cache' in args else False\n","repo_name":"zhigangjiang/deepv2d_pytorch","sub_path":"dataset/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"17766768333","text":"from flet import icons, colors, Page\nimport myButtons\nfrom ctypes import *\n\n\nclass RightButton(myButtons.MyButton):\n def __init__(\n self,\n icon: icons,\n ipucu: str,\n color: colors,\n selected_icon: icons,\n ):\n super().__init__(\n icon=icon,\n ipucu=ipucu,\n color=color,\n selected_icon=selected_icon,\n )\n\n def click(self, e,):\n super().click(e=e)\n print(\"RightButton cilck fonksyonu çalışıyor\")\n\n # c_fun=CDLL(f\"mylibl.so\")\n # c_fun=CDLL(f\"c_lib/mylibl.so\")\n c_fun=CDLL(\"./mylibl.so\")\n c_fun.swap(int(1))\n self.iconButton.selected = True\n\n def build(self):\n return super().build()\n","repo_name":"erkanhurnali/MyTools","sub_path":"myMouseButtons.py","file_name":"myMouseButtons.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"42534394570","text":"# Exercício 7.8 - Leitura de arquivos\n\nimport random # importando a biblioteca que gera números aleatórios\n\n\ndef jogar():\n print('*********************************')\n print('***Bem vindo ao jogo da Forca!***')\n print('*********************************')\n\n\n # inicializando palavra secreta\n arquivo = open(\"palavras.txt\", \"r\")\n palavras = []\n for linha in arquivo:\n # construindo lista de palavras a partir do arquivo\n linha = linha.strip()\n palavras.append(linha)\n\n arquivo.close()\n\n # sorteia um índice para escolher a palavra aleatoriamente\n numero = random.randrange(0, len(palavras))\n palavra_secreta = palavras[numero].upper() \n letras_acertadas = ['_' for letra in palavra_secreta]\n\n\n # inicializando variáveis\n letras_acertadas = ['_', '_', '_', '_', '_', '_']\n acertou = False\n enforcou = False\n erros = 0\n\n # jogando...\n while not acertou and not enforcou:\n\n print(letras_acertadas)\n # pede chute\n chute = input('Qual letra? ')\n chute = chute.strip().upper()\n\n if chute in palavra_secreta:\n # marca chute\n posicao = 0\n for letra in palavra:\n if chute.upper() == letra.upper():\n letras_acertadas[posicao] = letra\n posicao += 1\n \n else:\n # marca erro\n erros +=1 \n\n # atualiza booleanos\n acertou = '_' not in letras_acertadas\n enforcou = (erros == 7)\n\n print('Fim do jogo')\n\n\nif __name__ == \"__main__\":\n # só é executado quando rodamos este módulo.\n # se importarmos esse módulo em outro lugar, esse trecho é ignorado.\n jogar()","repo_name":"danigfavero/py14","sub_path":"jogos/forca3.py","file_name":"forca3.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4518685113","text":"\"\"\"\nFile name: game_w_draw_circle.py\nAuthor: Anton Karazeev \n\nThis file is part of joystick project (https://github.com/akarazeevprojects/joystick)\n\"\"\"\n\nimport time\nimport os\nimport random\n\nfrom Tkinter import *\nimport tkFont\nfrom PIL import Image, ImageTk\n\nroot = Tk()\n\ndef drawcircle(canv,x,y,rad):\n return canv.create_oval(x-rad,y-rad,x+rad,y+rad,width=0,fill='blue')\n\ncols = ['red','yellow','black','green','white','pink','blue']\n\nis_pressed = False\nit = 0\n\ndef movecircle(canv, cir):\n global is_pressed\n global it\n\n data = connection.recv(32)\n data = data.split()\n if len(data) >= 2:\n x = data[0]\n y = data[1]\n else:\n x = 300\n y = 300\n print(data)\n\n # if inp == False:\n # if not is_pressed:\n # canv.config(bg=cols[it % len(cols)])\n # it += 1\n # is_pressed = True\n # else:\n # is_pressed = False\n\n canv.coords(cir,x,y)\n\ndef callback(event=None):\n movecircle(canvas, a1)\n root.after(100, callback)\n\n\n#--------->-------->-------->\nimport socket, sys\n\n#Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Bind the socket to the port\nserver_address = ('192.168.0.100', 6666)\nprint('starting up on %s port %s' % server_address)\nsock.bind(server_address)\n\n# Listen for incoming connections\nsock.listen(1)\n\nconnection, client_address = sock.accept()\n#--------->-------->-------->\n\ncanvas = Canvas(width=600, height=600, bg='white')\ncanvas.pack()\n\nimg = PhotoImage(file=\"img/pepa.gif\")\nimg = img.subsample(5, 5)\na1 = canvas.create_image(100,100,image=img)\n\nroot.after(0, callback)\nroot.mainloop()\n","repo_name":"akarazeevprojects/joystick","sub_path":"mouse_controller/game_w_draw_circle.py","file_name":"game_w_draw_circle.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"17271552718","text":"from unicodedata import name\nfrom . import viewes\nfrom django.urls import path\n\nurlpatterns = [\n path('', viewes.sec_app_home, name='data_home'),\n path('create/', viewes.create, name='create'),\n path('', viewes.DataDetailNew.as_view(), name='news_datail'),\n path('/update', viewes.DataUpdateNew.as_view(), name='news_update'),\n path('/delete', viewes.DataDeleteNew.as_view(), name='news_delete'),\n]\n","repo_name":"Denis-Pashkov/projects","sub_path":"django_project_2/mysite/second_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27898500066","text":"\"\"\"The model classes maintain the state and logic of the simulation.\"\"\"\n\nfrom __future__ import annotations\nfrom random import random\nfrom exercises.ex09 import constants\nfrom math import sin, cos, pi, sqrt\n\n\n__author__ = \"730470131\"\n\n\nclass Point:\n \"\"\"A model of a 2-d cartesian coordinate Point.\"\"\"\n x: float\n y: float\n\n def __init__(self, x: float, y: float):\n \"\"\"Construct a point with x, y coordinates.\"\"\"\n self.x = x\n self.y = y\n\n def add(self, other: Point) -> Point:\n \"\"\"Add two Point objects together and return a new Point.\"\"\"\n x: float = self.x + other.x\n y: float = self.y + other.y\n return Point(x, y)\n\n def distance(self, other: Point) -> int:\n \"\"\"Finds the distance between two points.\"\"\"\n dist: int = 0\n dist = sqrt((other.x - self.x)**2 + (other.y - self.y)**2)\n return dist\n\n\nclass Cell:\n \"\"\"An individual subject in the simulation.\"\"\"\n location: Point\n direction: Point\n sickness: int = constants.VULNERABLE\n\n def __init__(self, location: Point, direction: Point):\n \"\"\"Construct a cell with its location and direction.\"\"\"\n self.location = location\n self.direction = direction\n\n def tick(self) -> None:\n \"\"\"Keeps track of the ticks.\"\"\"\n self.location = self.location.add(self.direction)\n if self.sickness > constants.RECOVERY_PERIOD:\n self.immunize()\n if self.is_infected():\n self.sickness += 1\n \n def color(self) -> str:\n \"\"\"Return the color representation of a cell.\"\"\"\n if self.is_vulnerable() is True:\n return \"gray\"\n if self.is_infected() is True:\n return \"red\"\n if self.is_immune() is True: \n return \"blue\"\n\n def contract_disease(self) -> None:\n \"\"\"Assign the INFECTED constant.\"\"\"\n self.sickness = constants.INFECTED \n \n def is_vulnerable(self) -> bool:\n \"\"\"Is the cell vulnerable.\"\"\"\n if self.sickness == constants.VULNERABLE:\n return True\n else:\n return False\n\n def is_infected(self) -> bool:\n \"\"\"Is the cell infected.\"\"\"\n if self.sickness >= constants.INFECTED:\n return True\n else:\n return False\n \n def contact_with(self, cell: Cell) -> None:\n \"\"\"Causes cell to become infected if comes in contact with infected cell.\"\"\"\n if self.is_vulnerable() and cell.is_infected():\n self.contract_disease()\n if self.is_infected() and cell.is_vulnerable():\n cell.contract_disease()\n\n def immunize(self) -> None:\n \"\"\"Immunizes cell.\"\"\"\n self.sickness = constants.IMMUNE\n\n def is_immune(self) -> bool:\n \"\"\"Checks if is immune.\"\"\"\n if self.sickness == constants.IMMUNE:\n return True\n else:\n return False\n\n\nclass Model:\n \"\"\"The state of the simulation.\"\"\"\n\n population: list[Cell]\n time: int = 0\n\n def __init__(self, cells: int, speed: float, infected_cells: int, immune_cells: int = 0):\n \"\"\"Initialize the cells with random locations and directions.\"\"\"\n self.population = []\n if infected_cells >= cells or infected_cells <= 0:\n raise ValueError(\"Some number of Cell objects, but not all, must be infected.\")\n if immune_cells >= cells:\n raise ValueError(\"Some number of Cell objects, but not all, must be immune.\")\n for _ in range(cells):\n start_location: Point = self.random_location()\n start_direction: Point = self.random_direction(speed)\n cell: Cell = Cell(start_location, start_direction)\n if infected_cells > 0:\n cell.contract_disease()\n infected_cells -= 1\n elif immune_cells > 0:\n cell.immunize()\n immune_cells -= 1\n self.population.append(cell)\n\n def tick(self) -> None:\n \"\"\"Update the state of the simulation by one time step.\"\"\"\n self.time += 1\n for cell in self.population:\n cell.tick()\n self.enforce_bounds(cell)\n self.check_contacts()\n\n def random_location(self) -> Point:\n \"\"\"Generate a random location.\"\"\"\n start_x: float = random() * constants.BOUNDS_WIDTH - constants.MAX_X\n start_y: float = random() * constants.BOUNDS_HEIGHT - constants.MAX_Y\n return Point(start_x, start_y)\n\n def random_direction(self, speed: float) -> Point:\n \"\"\"Generate a 'point' used as a directional vector.\"\"\"\n random_angle: float = 2.0 * pi * random()\n direction_x: float = cos(random_angle) * speed\n direction_y: float = sin(random_angle) * speed\n return Point(direction_x, direction_y)\n\n def enforce_bounds(self, cell: Cell) -> None:\n \"\"\"Cause a cell to 'bounce' if it goes out of bounds.\"\"\"\n if cell.location.x > constants.MAX_X:\n cell.location.x = constants.MAX_X\n cell.direction.x *= -1.0\n elif cell.location.x < constants.MIN_X:\n cell.location.x = constants.MIN_X\n cell.direction.x *= -1.0\n elif cell.location.y > constants.MAX_Y:\n cell.location.y = constants.MAX_Y\n cell.direction.y *= -1.0\n elif cell.location.y < constants.MIN_Y:\n cell.location.y = constants.MIN_Y\n cell.direction.y *= -1.0\n\n def check_contacts(self) -> None:\n \"\"\"Checks to see if there is contact.\"\"\"\n i: int = 0\n while i < len(self.population):\n j: int = i + 1\n while j < len(self.population):\n if self.population[i].location.distance(self.population[j].location) < constants.CELL_RADIUS:\n self.population[i].contact_with(self.population[j])\n j += 1\n i += 1\n\n def is_complete(self) -> bool:\n \"\"\"Method to indicate when the simulation is complete.\"\"\"\n for cell in self.population:\n if cell.is_infected():\n return False\n return True","repo_name":"kburr3/COMP-110","sub_path":"exercises/ex09/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28283032810","text":"from aws_cdk import core as cdk\nfrom ermer.construct.s3_construct import S3Construct\nfrom ermer.construct.lambda_construct import LambdaConstruct\nfrom ermer.construct.apigateway_construct import APIConstruct\nfrom ermer.construct.neptune_construct import NeptuneConstruct\n\nfrom aws_cdk import core\n\n\nclass ErmerStack(cdk.Stack):\n\n def __init__(self, scope: cdk.Construct, construct_id: str, Stage=\"default\", **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n # The code that defines your stack goes here\n\n # Definition of S3\n self.My_S3_Bucket = S3Construct(\n self,\n \"s3-construct\",\n Stage=Stage\n )\n\n # Definition of Neptune\n self.My_Neptune = NeptuneConstruct(\n self,\n \"neptune-construct\",\n TargetS3 = self.My_S3_Bucket,\n Stage=Stage\n )\n \n # Definition of Lambda func\n self.My_Lambda_Func = LambdaConstruct(\n self,\n \"lambda-construct\",\n TargetNeptune = self.My_Neptune,\n Stage=Stage\n )\n \n # Definiton of API Gateway\n self.My_API_Gateway = APIConstruct(\n self,\n \"apigateway-construct\",\n TargetLambda = self.My_Lambda_Func,\n Stage=Stage\n )\n\n","repo_name":"tibbdc/ermer","sub_path":"ermer/stack/ermer_stack.py","file_name":"ermer_stack.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"13482655496","text":"import sqlite3\n\n\n\ndef retrieve_bot():\n db = sqlite3.connect(\"bots.db\")\n cursor = db.cursor()\n sql_query = \"SELECT * FROM bots\"\n cursor.execute(sql_query)\n user_row = cursor.fetchone()\n print(user_row)\n db.close()\n\n\ndef run():\n retrieve_bot()\n\n\nrun()\n","repo_name":"lawrencepj13/com728","sub_path":"data/persistence/select_one.py","file_name":"select_one.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23569684811","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 8 16:48:48 2017\r\n\r\n@author: noppa\r\n\"\"\"\r\nimport math\r\nimport collections as ct\r\n\r\ndef decompose(n, level):\r\n if level == 0:\r\n return [n]\r\n if n == 1 :\r\n return [0]\r\n if n % 2 == 0:\r\n return decompose(n/2 - 1, level-1) + decompose(n/2, level-1)\r\n else :\r\n return decompose((n-1)/2, level-1) + decompose((n-1)/2, level-1)\r\n \r\n\r\n\r\n\r\ndef main():\r\n path = \"C:\\\\Users\\\\noppa\\\\Dropbox\\\\codejam\\\\\"\r\n filename = \"C-small-2-attempt1\"\r\n fformat = \".in\"\r\n inputfile = open(path+filename+fformat, 'r')\r\n outputfile = open(path+filename+\"_ans\" + fformat,'w')\r\n \r\n noofcase = int(inputfile.readline())\r\n \r\n for i in range(1,noofcase+1):\r\n line = inputfile.readline().replace('\\n','')\r\n ns,ks = line.split(\" \")\r\n n = int(ns)\r\n k = int(ks)\r\n level = int(math.floor(math.log2(k)))\r\n dist = decompose(n, level)\r\n posinlevel = k - int(2**level)\r\n sorteddist = sorted(dist)\r\n sorteddist.reverse()\r\n ans = decompose(sorteddist[posinlevel], 1) \r\n \r\n #for j in range(k):\r\n \r\n answernumber = str(int(max(ans))) + \" \" + str(int(min(ans)))\r\n print(answernumber)\r\n answerstring = \"Case #\"+str(i)+\": \"+ answernumber\r\n outputfile.write(answerstring + '\\n')\r\n \r\n \r\n \r\n inputfile.close()\r\n outputfile.close()\r\n\r\nmain()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2220.py","file_name":"2220.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40891471629","text":"class Solution:\n def intToRoman(self, num: int) -> str:\n symbols = {1000:\"M\",900:\"CM\",500:\"D\",400:\"CD\",100:\"C\",90:\"XC\",50:\"L\",40:\"XL\",10:\"X\",9:\"IX\",5:\"V\",4:\"IV\",1:\"I\"}\n roman = ''\n \n for value in symbols.keys():\n freq = num // value\n roman += symbols[value] * freq\n num = num % value\n \n return roman\n \n ","repo_name":"Ephrem-shimels21/Competitive-Programming","sub_path":"0012-integer-to-roman/0012-integer-to-roman.py","file_name":"0012-integer-to-roman.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74210937153","text":"from vector_finished import Vector\r\n\r\n# Comme vous allez étudier le comportement de n planètes/satellites/...\r\n# interagissent par la force gravitationnelle, il est utile de définir une\r\n# class qui tient à jour la distance entre chaque objets. Pour ce faire, vous\r\n# allez implémenter une class DistanceMatrix qui simplifiera ensuite le calcul\r\n# des forces et des énergie potentielles.\r\n\r\n# Pour commencer, définissez dans la méthode self.update() une variable\r\n# self._rij qui contiendra les Vecteurs rij représentant la distance en l'objet\r\n# i et l'objet j. Par convention, les attributs de class qui commencent par \"_\"\r\n# sont privés. Cela signifie qu'ils ne doivent pas être utilisés hors de la\r\n# classe. Comme on veut cacher à l'utilisateur les détails d'implémentation de\r\n# cette matrice afin qu'il n'utilise que les fonctions \"unit_vector\", \"norm\",\r\n# \"norm_squared\" et \"r_over_rcubed\", on définit la variable self._rij comme\r\n# privée.\r\n# La définition de self._rij peut se faire de plusieurs façon, c'est à vous de\r\n# choisir laquelle. La façon la plus simple est de définir une liste de liste\r\n# self._rij = [\r\n# [\r\n# (self.objects[i] - self.objects[j])\r\n# for i in range(self.n)\r\n# ]\r\n# for j in range(self.n)\r\n# ]\r\n# Cette matrice ressemble à:\r\n# 0 -1 -2 -4\r\n# 1 0 -3 -5\r\n# 2 3 0 -6\r\n# 4 5 6 0\r\n\r\n# Je n'ai pas testé cette implémentation car elle n'est pas efficace... en\r\n# effet, comme rij=-rji, il ne sert à rien de stocker les Vecteurs à double.\r\n# Similairement, comme rii=0, il est inutile de les stocker. Ainsi une façon\r\n# plus efficace de stocker ces données est d'utiliser une liste plus courte\r\n# pour laquelle le kième élément est la kième entrée non nulle d'une matrice\r\n# strictement triangulaire inférieure. Une telle matrice ressemble à cela:\r\n# 0 0 0 0\r\n# 1 0 0 0\r\n# 2 3 0 0\r\n# 4 5 6 0\r\n# et la liste correspondante est [1,2,3,4,5,6]. Remarquez qu'au lieu de stocker\r\n# 16 Vecteurs, seuls 6 vecteurs son stockés. La contrepartie négative à cette\r\n# façon de procéder est qu'il n'est pas intuitif d'accéder à la distance qui\r\n# sépare l'objet 3 de l'objet 2 (correspondant à la dernière ligne/deuxième\r\n# colonne de la matrice, la valeur 5). C'est pour cela qu'il est nécessaire de\r\n# définir la méthode self._get(i,j) qui retourne le bon Vecteur.\r\n\r\n\r\nclass DistanceMatrix:\r\n \"\"\"\r\n Cette class fournit des outils simplifiant le suivi des distances entre\r\n les objets.\r\n \"\"\"\r\n def __init__(self, objects: list[Vector]):\r\n self.objects = objects\r\n self.n = len(objects)\r\n self.update(objects)\r\n\r\n def update(self, new_objects) -> None:\r\n \"\"\"Met à jour les distances entre les objets\"\"\"\r\n self.rij = []\r\n for j in range(self.n):\r\n for i in range(self.n):\r\n if j < i:\r\n b = new_objects[i] - new_objects[j]\r\n self.rij.append(b)\r\n else:\r\n pass\r\n\r\n def _get(self, i: int, j: int) -> Vector:\r\n \"\"\"\r\n Retourne le vecteur de distance entre l'objet i et j\r\n \"\"\"\r\n if j < i:\r\n a = j * (2 * self.n - j - 3) / 2 + i\r\n a = a.__int__()\r\n distance = -self.rij[a-1]\r\n elif i == j:\r\n distance = Vector(0, 0)\r\n else:\r\n k = i\r\n i = j\r\n j = k\r\n a = j * (2 * self.n - j - 3) / 2 + i\r\n a = a.__int__()\r\n distance = self.rij[a-1]\r\n\r\n return distance\r\n\r\n def unit_vector(self, i: int, j: int) -> Vector:\r\n \"\"\"\r\n Retourne le vecteur unitaire partant de l'objet i et pointant vers\r\n l'objet j.\r\n \"\"\"\r\n d = self._get(i, j)\r\n a = d.norm()\r\n if a == 0:\r\n raise AssertionError\r\n else:\r\n u = Vector(0, 0)\r\n u.x = d.x / a\r\n u.y = d.y / a\r\n return Vector(u.x, u.y)\r\n\r\n def norm(self, i: int, j: int) -> float:\r\n \"\"\"\r\n Retourne la norme du vecteur distance partant de l'objet i et pointant\r\n vers l'objet j.\r\n \"\"\"\r\n d = self._get(i, j)\r\n a = d.norm()\r\n return a\r\n\r\n def norm_squared(self, i: int, j: int) -> float:\r\n \"\"\"\r\n Retourne la norme au carré du vecteur distance partant de l'objet i et\r\n pointant vers l'objet j.\r\n \"\"\"\r\n d = self._get(i, j)\r\n a = d.norm_squared()\r\n return a\r\n\r\n def r_over_rcube(self, i: int, j: int) -> Vector:\r\n \"\"\"\r\n Retourne le vecteur distance partant de l'objet i et pointant vers\r\n l'objet j divisé par le cube de ce vecteur.\r\n \"\"\"\r\n r = self._get(i, j)\r\n a = r.norm()\r\n if a != 0:\r\n r_over_rcube = r/a**3\r\n\r\n else:\r\n raise AssertionError\r\n\r\n return r_over_rcube\r\n\r\n\r\ndef test_distance_matrix() -> None:\r\n objs = [\r\n Vector(0, 3),\r\n Vector(4, 0),\r\n Vector(0, 0),\r\n ]\r\n ds = DistanceMatrix(objs)\r\n\r\n assert ds._get(1, 0) == Vector(-4, 3)\r\n assert ds._get(0, 1) == Vector(4, -3)\r\n\r\n assert ds._get(2, 0) == Vector(0, 3)\r\n assert ds._get(0, 2) == Vector(0, -3)\r\n\r\n assert ds._get(2, 1) == Vector(4, 0)\r\n assert ds._get(1, 2) == Vector(-4, 0)\r\n\r\n assert ds.norm_squared(0, 0) == 0\r\n assert ds.norm_squared(1, 1) == 0\r\n assert ds.norm_squared(2, 2) == 0\r\n\r\n assert ds.norm_squared(1, 0) == 25\r\n assert ds.norm_squared(0, 1) == 25\r\n\r\n assert ds.norm_squared(1, 2) == 16\r\n assert ds.norm_squared(2, 1) == 16\r\n\r\n assert ds.norm_squared(2, 0) == 9\r\n assert ds.norm_squared(0, 2) == 9\r\n\r\n assert ds.r_over_rcube(0, 1) == Vector(0.032, -0.024)\r\n assert ds.r_over_rcube(1, 0) == Vector(-0.032, 0.024)\r\n\r\n objs[2] = Vector(4, 0)\r\n ds.update()\r\n assert ds.norm_squared(1, 2) == 0\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test_distance_matrix()\r\n","repo_name":"jduf/tm-2223-yoann","sub_path":"distance_matrix.py","file_name":"distance_matrix.py","file_ext":"py","file_size_in_byte":5956,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37753764164","text":"import cv2 as cv\nfrom cv2 import WINDOW_NORMAL\nfrom cv2 import resize\nimport numpy as np\n\ndef mouse_Event(event,x,y,flag,param):\n\n if event == cv.EVENT_LBUTTONDOWN:\n text = \"x = \" + str(x) + \",y \" + str(y)\n cv.putText(img,text,(x,y),cv.FONT_HERSHEY_COMPLEX,0.4,(255,255,255),1)\n\n if event == cv.EVENT_RBUTTONDOWN:\n b = img[x , y , 0] #for blue channel is 0\n g = img[x , y , 1] #for green channel is 1\n r = img[x , y , 2] #for red channel is 2\n\n color_bgr = \". \" + str(b) + \", \" + str(g) + \", \" + str(r)\n cv.putText(img,color_bgr,(x,y),cv.FONT_HERSHEY_COMPLEX,0.4,(152,255,130),1)\n\ncv.namedWindow(\"Res\",WINDOW_NORMAL)\ncv.setMouseCallback(\"Res\",mouse_Event)\n\nimg = cv.imread(\"abhi1.png\")\nimg = cv.resize(img,0,960,1080)\nwhile True:\n cv.imshow(\"Res\",img)\n if cv.waitKey(1) & 0xff == ord('q'):\n break\n\ncv.destroyAllWindows()","repo_name":"sharaabhishek/Personal-Projects","sub_path":"Minor Projects/ImagePixPos/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43305396931","text":"import sys\nimport os\nimport cv2\nimport time\nimport torch\nfrom tqdm import tqdm\nimport utils.evaluate as evaluate\nimport utils.read as read\nimport algorithms.ViBeGPU as ViBeGPU\nimport algorithms.rtsbs as rtsbs\n\nfrom utils.argument_parser import args\n\n# Definition of the device\ndevice = torch.device(args.device)\n\n# Get the names of the categories and the videos\ncategories, videos = read.getCategories(args.dataset)\n\n# Create the evaluation confusion matrices\nconfusion_matrices = evaluate.ConfusionMatricesHolder(device, categories, videos, args.median)\n\n# Loop over all categories that were retrieved\nfor category in categories:\n\n\t# Loop over all videos\n\tfor video in videos[category]:\n\n\t\t# Definition of the video directory path\n\t\tvideo_input_dir = os.path.join(args.dataset, category, video, \"input\")\n\n\t\t# Definition of the groundtruth video directory path\n\t\tvideo_groundtruth_dir = os.path.join(args.dataset, category, video, \"groundtruth\")\n\n\t\t# Definition of the semantic video directory path\n\t\tvideo_semantic_dir = os.path.join(args.semantic, category, video)\n\n\t\t# Defition of the arrays that will contain the different sequences\n\t\tprint(\"Loading the input video\")\n\t\tvideo_original = read.readVideo(video_input_dir, img_type=cv2.IMREAD_COLOR, num_channels=3 , data_type=\"uint8\")\n\t\tvideo_original = torch.from_numpy(video_original).transpose(1,3).transpose(2,3).type(torch.float)\n\t\tprint(\"Loading the ground-truth\")\n\t\tvideo_groundtruth = read.readVideo(video_groundtruth_dir, img_type=cv2.IMREAD_GRAYSCALE, num_channels=1 , data_type=\"uint8\")\n\t\tvideo_groundtruth = torch.from_numpy(video_groundtruth).type(torch.float)\n\t\tprint(\"Loading the semantic masks\")\n\t\tvideo_semantic = read.readVideo(video_semantic_dir, img_type=cv2.IMREAD_ANYDEPTH, num_channels=1 , data_type=\"float32\")\n\t\tvideo_semantic = torch.from_numpy(video_semantic).type(torch.float)\n\n\t\tprint(\"Processing of the video\")\n\n\t\t# At this point, the videos are loaded in their correct format\n\t\t# The individual frames will be transfered to the GPU and transformed to float for operations\n\n\t\t# Creation of the rt-sbs class\n\t\tframe_init = video_original[0].to(device)\n\t\tbgs = ViBeGPU.ViBe(frame_init, device)\n\t\tframe_init = frame_init.to(\"cpu\")\n\n\t\t# Creation of the rt-sbs class\n\t\tsemantic_processing = rtsbs.RTSBS(device, video_semantic[0], args.taubg, args.taufg, args.taubgstar, args.taufgstar, args.moduloupdate)\n\n\t\t# Frame index to know at which frame of the video we are\n\t\tframe_index = 0\n\t\tframe_rtsbs = None\n\n\t\ttime_start = time.time()\n\n\t\t#Loop over all frames of the video\n\t\tp_bar = tqdm(total = video_original.shape[0])\n\t\tfor frame_original, frame_groundtruth, frame_semantic in zip(video_original, video_groundtruth, video_semantic):\n\t\t\t\n\t\t\t\n\n\t\t\tframe_original = frame_original.to(device)\n\t\t\tframe_semantic = frame_semantic.to(device)\n\t\t\t\n\t\t\tmask = bgs.segmentation_(frame_original)\n\t\t\t\n\t\t\t# With semantics\n\t\t\tif frame_index %args.framerate == 0:\n\t\t\t\tframe_rtsbs = semantic_processing.segment_semantics(frame_original, mask, frame_semantic)\n\n\t\t\t# Without semantics\n\t\t\telse:\n\t\t\t\tframe_rtsbs = semantic_processing.segment_no_semantics(frame_original, mask)\n\n\t\t\tbgs.update_(frame_original, frame_rtsbs)\n\n\t\t\tconfusion_matrices.confusion_matrix[category][video].evaluate(frame_rtsbs, frame_groundtruth)\n\n\t\t\tframe_original = frame_original.to(\"cpu\")\n\t\t\tmask = mask.to(\"cpu\")\n\t\t\t\n\t\t\tp_bar.update(1)\n\t\t\t\n\t\t\tframe_index += 1\n\t\tp_bar.close()\n\n\t\ttime_stop = time.time()\n\t\tprint(category + \" - \", video)\n\t\tprint(\"F1: \", confusion_matrices.confusion_matrix[category][video].F1())\n\t\tprint(\"Timing with device: \", device, \" = \", time_stop-time_start, \" seconds\")\n\nprint(\"Mean F1 Score\", confusion_matrices.meanF1(categories, videos))\n\"\"\"\nfile = open(\"../output/log-rtsbs.log\",'a')\nfile.write(str(args.framerate))\nfile.write(\": \")\nfile.write(str(confusion_matrices.meanF1(categories, videos)))\nfile.write(\"\\n\")\nfile.close()\n\"\"\"","repo_name":"cioppaanthony/rt-sbs","sub_path":"src/main-rtsbs.py","file_name":"main-rtsbs.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"} +{"seq_id":"399092677","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\nimport os\nimport pickle\n\ndataset,info = tfds.load(\"fashion_mnist\",\n as_supervised=True,\n with_info=True,\n split=[\"train\",\"test\"])\n\ntrain_dataset, test_dataset = dataset[0],dataset[1]\n\nIMG_SIZE = 28\nBATCH_SIZE = 100\nEPOCHS = 100\ntrain_size = info.splits[\"train\"].num_examples\ntest_size = info.splits[\"test\"].num_examples\nnum_output = info.features[\"label\"].num_classes\n\nprint(train_dataset)\n\ndef image_resize_norm(images,labels):\n images = tf.image.resize(images,(IMG_SIZE,IMG_SIZE))/255.0\n return images,labels\n\ntrain_dataset = train_dataset.map(image_resize_norm).shuffle(train_size//4).batch(BATCH_SIZE).prefetch(1)\ntest_dataset = test_dataset.map(image_resize_norm).batch(BATCH_SIZE).prefetch(1)\n\nclass MyCallback(tf.keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if logs[\"val_accuracy\"] >= 0.95:\n self.model.stop_training = True\n\n\nEarly = tf.keras.callbacks.EarlyStopping(patience=5,restore_best_weights=True)\nCustom = MyCallback()\ncalls = [Early,Custom]\n\nmodel = tf.keras.Sequential()\nmodel.add(tf.keras.layers.Conv2D(filters=64,\n kernel_size=3,\n activation=\"relu\",\n input_shape=(IMG_SIZE,IMG_SIZE,1)))\nmodel.add(tf.keras.layers.MaxPool2D(pool_size=2))\nmodel.add(tf.keras.layers.Conv2D(filters=32,kernel_size=2,activation=\"relu\"))\nmodel.add(tf.keras.layers.MaxPool2D(pool_size=2))\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dropout(0.3))\nmodel.add(tf.keras.layers.Dense(64,activation=\"relu\"))\nmodel.add(tf.keras.layers.Dense(num_output,activation=\"softmax\"))\n\nmodel.compile(optimizer=\"adam\",\n loss=tf.keras.losses.SparseCategoricalCrossentropy(),\n metrics=[\"accuracy\"])\n\nif not os.path.exists(\"Fashion_MNIST_CNN.h5\") or not os.path.exists((\"Fashion_MNIST_CNN.pkl\")):\n hist = model.fit(train_dataset,\n epochs=EPOCHS,\n validation_data = test_dataset,\n callbacks=calls)\n\n model.save(\"Fashion_MNIST_CNN.h5\")\n hist = hist.history\n with open(\"Fashion_MNIST_CNN.pkl\",\"wb\") as f:\n pickle.dumps(hist,f)\nelse:\n model = tf.keras.models.load_model(\"Fashion_MNIST_CNN.h5\")\n with open(\"Fashion_MNIST_CNN.pkl\",'rb') as f:\n hist = pickle.load(f)\n\nloss = hist[\"loss\"]\nval_loss = hist[\"val_loss\"]\nacc = hist[\"accuracy\"]\nval_acc = hist[\"val_accuracy\"]\nx = np.arange(1,EPOCHS+2)\n\nplt.subplot(2,1,1)\nplt.plot(x,loss,label=\"loss\")\nplt.plot(x,val_loss,label=\"val_loss\")\nplt.ylabel(\"Loss\")\n\nplt.subplot(2,1,2)\nplt.plot(x,acc,label=\"accuracy\")\nplt.plot(x,val_acc,label=\"val_accuracy\")\nplt.xlabel(\"EPOCH\")\nplt.ylabel(\"Accuracy\")\n\nplt.show()","repo_name":"EduardoPach/Tensorflow_Certificate_Train","sub_path":"FASHION_MNIST/MNIST_CNN.py","file_name":"MNIST_CNN.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3238584116","text":"class Solution:\n def getHint(self, secret, guess):\n \"\"\"\n :type secret: str\n :type guess: str\n :rtype: str\n \"\"\"\n A = B = 0\n c1 = collections.Counter(secret)\n c2 = collections.Counter(guess)\n for x, y in zip(secret, guess):\n if x == y:\n c1[x] -= 1\n c2[y] -= 1\n A += 1\n for c in c2:\n if c in c1:\n B += min(c1[c], c2[c])\n return str(A) + \"A\" + str(B) + \"B\"","repo_name":"chien-wei/LeetCode","sub_path":"0299_Bulls_and_Cows.py","file_name":"0299_Bulls_and_Cows.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9172285515","text":"import math\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom models import ProGANDiscriminator\n\nfrom util import Conv2dNormalizedLR, local_response_normalization, LinearNormalizedLR, Conv2dTransposeNormalizedLR\n\n\nclass ProGANUpBlock(torch.nn.Module):\n def __init__(self, input_channels, output_channels, upsample=True, local_response_norm=True, weight_norm=False):\n super().__init__()\n self.input_channels = input_channels\n self.output_channels = output_channels\n self.weight_norm = weight_norm\n\n self.conv_1 = Conv2dTransposeNormalizedLR(input_channels, output_channels, kernel_size=3, padding=1, weight_norm=self.weight_norm)\n self.conv_2 = Conv2dTransposeNormalizedLR(output_channels, output_channels, kernel_size=3, padding=1, weight_norm=self.weight_norm)\n # Weight Norm is always disabled here because we don't want to normalize the RGB output\n self.conv_rgb = Conv2dNormalizedLR(output_channels, 3, kernel_size=1, weight_norm=False)\n self.upsample = upsample\n self.lrn = local_response_norm\n\n def forward(self, x):\n if self.upsample:\n x = F.interpolate(x, scale_factor=2)\n\n x = self.conv_1(x)\n if self.lrn:\n x = local_response_normalization(x)\n x = F.leaky_relu(x, 0.2)\n\n x = self.conv_2(x)\n if self.lrn:\n x = local_response_normalization(x)\n x = F.leaky_relu(x, 0.2)\n\n rgb = self.conv_rgb(x)\n\n return x, rgb\n\n\nclass ProGANAdditiveGenerator(torch.nn.Module):\n def __init__(self, latent_size, n_upscales, output_h_size, local_response_norm=True, scaling_factor=2,\n max_h_size: int = 1e10, weight_norm=False):\n super().__init__()\n self.n_upscales = n_upscales\n self.output_h_size = output_h_size\n self.scaling_factor = scaling_factor\n self.initial_size = min(int(output_h_size * self.scaling_factor ** (n_upscales)), max_h_size)\n self.lrn = local_response_norm\n self.weight_norm = weight_norm\n\n self.inp_layer = LinearNormalizedLR(latent_size, self.initial_size * 4 * 4, weight_norm=self.weight_norm)\n self.init_layer = Conv2dTransposeNormalizedLR(self.initial_size, self.initial_size, kernel_size=3, padding=1, weight_norm=self.weight_norm)\n self.init_rgb = Conv2dNormalizedLR(self.initial_size, 3, kernel_size=1, weight_norm=self.weight_norm)\n\n self.layer_list = []\n for i in range(n_upscales):\n inp_channels = min(int(output_h_size * self.scaling_factor ** (n_upscales - i)), max_h_size)\n outp_channels = min(int(output_h_size * self.scaling_factor ** (n_upscales - i - 1)), max_h_size)\n self.layer_list.append(ProGANUpBlock(inp_channels, outp_channels, local_response_norm=local_response_norm, weight_norm=self.weight_norm))\n self.layers = torch.nn.ModuleList(self.layer_list)\n\n def forward(self, x, phase=None):\n\n # Project latent vectors onto hypersphere\n x_divisor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)) + 1e-8\n x = x / x_divisor\n\n if phase is None:\n phase = self.n_upscales\n\n n_upscales = min(int(phase), self.n_upscales)\n alpha = phase - (n_upscales)\n\n if alpha == 0.0 and n_upscales >= 1:\n alpha += 1.0\n\n x = self.inp_layer(x)\n x = x.view(-1, self.initial_size, 4, 4)\n if self.lrn:\n x = local_response_normalization(x)\n x = F.leaky_relu(x, 0.2)\n\n x = self.init_layer(x)\n if self.lrn:\n x = local_response_normalization(x)\n x = F.leaky_relu(x, 0.2)\n\n rgb = self.init_rgb(x)\n\n if alpha == 0.0 and n_upscales == 0:\n return rgb\n\n next_x, next_rgb = self.layers[0](x)\n next_rgb = F.interpolate(rgb, scale_factor=2, mode=\"bilinear\") + next_rgb\n\n n_actual_upscales = n_upscales\n if 0 < alpha < 1:\n n_actual_upscales += 1\n\n for i in range(1, min(self.n_upscales, n_actual_upscales)):\n x, rgb = next_x, next_rgb\n next_x, next_rgb = self.layers[i](x)\n next_rgb = F.interpolate(rgb, scale_factor=2, mode=\"bilinear\") + next_rgb\n\n if alpha == 1.0 and n_upscales > 0:\n return next_rgb\n\n out_rgb = (1 - alpha) * F.interpolate(rgb, scale_factor=2, mode=\"bilinear\") + alpha * next_rgb\n return out_rgb\n\n\nclass ProGANResidualDownBlock(torch.nn.Module):\n def __init__(self, input_channels, output_channels, downsample=True, local_response_norm=False,\n progan_var_input=False, last_layer=False):\n super().__init__()\n self.input_channels = input_channels\n self.output_channels = output_channels\n self.progran_var_input = progan_var_input\n self.last_layer = last_layer\n\n conv_1_input_channels = input_channels + (1 if progan_var_input else 0)\n # According to the ProGAN paper appendix, the \"hidden\" number of channels should be the same as the input size\n conv_1_output_channels = output_channels if self.last_layer else input_channels\n self.conv_1 = Conv2dNormalizedLR(conv_1_input_channels, conv_1_output_channels,\n kernel_size=3, padding=1)\n if not self.last_layer:\n self.conv_2 = Conv2dNormalizedLR(input_channels, output_channels, kernel_size=3, padding=1)\n if self.input_channels != self.output_channels:\n self.conv_res = Conv2dNormalizedLR(input_channels, output_channels, kernel_size=1)\n self.conv_rgb = Conv2dNormalizedLR(3, input_channels, kernel_size=1)\n self.downsample = downsample\n self.lrn = local_response_norm\n\n def forward(self, x):\n carry = x\n if self.progran_var_input:\n # Apply the ProGAN mbatch stddev trick\n stddevs = x.std(dim=0, keepdim=True)\n stddev = stddevs.mean()\n feature_map = torch.zeros_like(x[:, :1]) + stddev\n x = torch.cat([x, feature_map], dim=1)\n x = self.conv_1(x)\n if self.lrn:\n x = local_response_normalization(x)\n x = F.leaky_relu(x, 0.2)\n\n if not self.last_layer:\n x = self.conv_2(x)\n if self.lrn:\n x = local_response_normalization(x)\n x = F.leaky_relu(x, 0.2)\n\n if self.downsample:\n x = F.avg_pool2d(x, 2)\n carry = F.avg_pool2d(carry, 2)\n\n if self.input_channels != self.output_channels:\n carry = self.conv_res(carry)\n x = (carry + x)/(2**0.5)\n return x\n\n def from_rgb(self, x):\n # Generated an input for this network from RGB\n x = self.conv_rgb(x)\n x = F.leaky_relu(x, 0.2)\n return x\n\n\nclass ProGANResDiscriminator(ProGANDiscriminator):\n down_block = ProGANResidualDownBlock\n\n\nclass ProGANResEncoder(torch.nn.Module):\n down_block = ProGANResidualDownBlock\n def __init__(self, latent_size, n_downscales, full_res_h_size, scaling_factor=2, max_h_size: int = 1e10):\n super().__init__()\n self.n_downscales = n_downscales\n self.h_size = full_res_h_size\n self.scaling_factor = scaling_factor\n self.latent_size = latent_size\n\n self.deepest_channels = min(int(full_res_h_size * (self.scaling_factor ** (n_downscales))), max_h_size)\n\n self.outp_layer_1 = LinearNormalizedLR(self.deepest_channels * 4 * 4, self.deepest_channels)\n self.outp_layer_2 = LinearNormalizedLR(self.deepest_channels, latent_size*2)\n outp_block = self.down_block(self.deepest_channels, self.deepest_channels, downsample=False,\n local_response_norm=False, progan_var_input=False, last_layer=True)\n\n self.layer_list = [outp_block]\n for i in range(n_downscales):\n inp_channels = min(int(full_res_h_size * (self.scaling_factor ** (n_downscales - i - 1))), max_h_size)\n outp_channels = min(int(full_res_h_size * (self.scaling_factor ** (n_downscales - i))), max_h_size)\n self.layer_list.append(self.down_block(inp_channels, outp_channels, local_response_norm=False))\n self.layers = torch.nn.ModuleList(self.layer_list)\n\n def forward(self, x, phase=None):\n if phase is None:\n phase = self.n_downscales\n\n n_downscales = min(int(phase), self.n_downscales)\n alpha = phase - n_downscales\n\n if alpha == 0.0:\n x = self.layers[n_downscales].from_rgb(x)\n else:\n x1 = self.layers[n_downscales + 1].from_rgb(x)\n x1 = self.layers[n_downscales + 1](x1)\n\n x2 = F.avg_pool2d(x, 2)\n x2 = self.layers[n_downscales].from_rgb(x2)\n\n x = alpha * x1 + (1 - alpha) * x2\n\n for i in range(0, n_downscales + 1):\n layer = self.layers[n_downscales - i]\n x = layer(x)\n\n x = x.view(-1, self.deepest_channels * 4 * 4)\n\n x = self.outp_layer_1(x)\n x = F.leaky_relu(x, 0.2)\n\n x = self.outp_layer_2(x)\n means, log_vars = x[:, :self.latent_size], x[:, self.latent_size:]\n log_vars = -torch.nn.functional.softplus(log_vars)\n z = self.sample(means, log_vars)\n\n return z, means, log_vars\n\n @staticmethod\n def sample(means, vars):\n stds = torch.exp(0.5 * vars)\n eps = torch.randn_like(stds)\n return means + eps * stds\n\nif __name__ == \"__main__\":\n from models import ProGANDiscriminator\n\n def compute_n_params(model):\n total = 0\n for p in model.parameters():\n n_params = 1\n for d in p.size():\n n_params *= d\n total += n_params\n return total\n\n\n G = ProGANAdditiveGenerator(128, 4, 8, scaling_factor=2)\n D = ProGANResDiscriminator(4, 8, scaling_factor=2)\n E = ProGANResEncoder(128, 4, 8, 2)\n\n for phase in [0, 0.5, 1, 2, 3, 3.5, 4]:\n z = torch.normal(0, 1, (10, 128))\n x_gen = G(z, phase=phase)\n print(\"G out: \", x_gen.size())\n d_out = D(x_gen, phase=phase)\n print(d_out.size())\n e_out = E(x_gen, phase=phase)\n print(\"e_out\", e_out[0].size())\n\n print(\"G_params: \", compute_n_params(G))\n print(\"D_params: \", compute_n_params(D))\n","repo_name":"Gerryflap/progan_experiments","sub_path":"models_additional.py","file_name":"models_additional.py","file_ext":"py","file_size_in_byte":10274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25928080446","text":"import collections\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nimport io\nimport json\nclass Configure:\n def __init__(self,path=\"./config\"):\n '''\n :param path:\n '''\n self.path=path\n self.option = collections.OrderedDict()\n self.allengine=[\"Google\",\"Flickr\",\"Bing\"]\n #self.option['engine'] = [\"Google\",\"Flickr\",\"Bing\"]\n self.option['engine'] = ['Google']\n self.option['safe search']=BooleanVar(False)\n self.option['exclude'] = [\"*/.svn\", \"*/.bzr\"],\n self.option['dry_run'] = None\n self.option['prefer'] = []\n self.option['defaults'] = BooleanVar(False)\n self.option['exact'] = BooleanVar(False)\n self.option['min_size'] = 25\n self.option['noninteractive'] = None\n self.option['deletedup'] = BooleanVar(False)\n self.option['dir'] ='/home/apasai'\n self.option['face-only'] = BooleanVar(False)\n self.option['timeout'] = 25\n self.option['num-threads'] = 50\n self.option['max-number'] = -4\n self.option['output'] =\"./download_imagess\"\n self.option['proxy_http'] = None\n self.option['numImage'] = 500\n self.option['proxy_socks5'] = None\n self.engineVariable = dict()\n for i in self.allengine:\n self.engineVariable[i]=BooleanVar()\n for key ,_ in self.engineVariable.items():\n self.engineVariable[key].set(False)\n # initializing the choice, i.e. Python\n self.show_line_number = IntVar()\n self.show_line_number.set(1)\n self.show_cursor_info = IntVar()\n self.show_cursor_info.set(1)\n self.theme_choice = StringVar()\n self.theme_choice.set('Default')\n self.highlight_line = IntVar()\n self.backup=self.option\n def EngineChoose(self):\n self.option['engine']=[]\n for key,value in self.engineVariable.items():\n if value.get():\n self.option['engine'].append(key)\n print(self.option['engine'])\n def saveConfigure(self):\n filePath =self.path\n self._SaveConfigure(self.option,filePath)\n messagebox.showinfo(\"save configure\", \"configure saved as '%s'\"%filePath)\n self.path=filePath\n\n def saveAsConfigure(self):\n filePath = filedialog.asksaveasfilename(filetypes=((\"Json File\", \"*.json\")\n , (\"All files\", \"*.*\")))\n self._SaveConfigure(self.option,filePath)\n messagebox.showinfo(\"save configure\", \"configure saved as '%s'\"%filePath)\n self.path=filePath\n\n def goDefult(self,path=\"./config\"):\n answer=messagebox.askyesno(\"Defult\",\"Are you sure?\")\n # try:\n if answer:\n for key, _ in self.engineVariable.items():\n self.engineVariable[key].set(False)\n self.option=self.backup\n for key in self.option['engine']:\n self.engineVariable[key].set(True)\n self.path = './config'\n self._SaveConfigure(self.option)\n messagebox.showinfo(\"Load defult configure\", \"Defult configure loaded(./config.json)\")\n # except:\n # messagebox.showerror(\"Load defult configure\", \"Defult configure unloaded\" )\n def _SaveConfigure(self,data,path='./config.json'):\n self.savedictionary=collections.OrderedDict()\n for key , value in data.items():\n if isinstance(value,(BooleanVar)):\n self.savedictionary[key]=int(value.get())\n elif value == None:\n self.savedictionary[key] = \"None\"\n else:\n self.savedictionary[key] = value\n with open(path, 'w') as fp:\n json.dump(self.savedictionary, fp)\n def loadConfigure(self):\n for key ,_ in self.engineVariable.items():\n self.engineVariable[key].set(False)\n filePath=filedialog.askopenfilename(filetypes = ((\"Jason File\", \"*.json\")\n ,(\"All files\", \"*.*\")))\n if filePath:\n try:\n print('read:',filePath)\n with io.open(filePath,mode='r') as file:\n data = json.load(file)\n for key , value in data.items():\n if value in [0,1]:\n self.changeOption(bool(value), optionname=key)\n elif value == 'None':\n self.changeOption(None, optionname=key)\n else:\n self.changeOption(value, optionname=key)\n for key in self.option['engine']:\n self.engineVariable[key].set(True)\n self.path=filePath\n except:\n messagebox.showerror(\"Open Source File\", \"Failed to read file \\n'%s'\" % filePath)\n def changeOption(self,value,optionname='engine'):\n if value in [True,False]:\n self.option[optionname].set(value)\n else:\n self.option[optionname]=value\n\n\n\n\n","repo_name":"iamhosseinbiniazian/gui_image_crawler_and_downloader","sub_path":"processing/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":5075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1477263444","text":"class Node(object):\n def __init__(self, data = None):\n self.data = data\n self.next = None\n\n\n# Class to create a Linked List\nclass LinkedList(object):\n def __init__(self, head=None):\n self.head = head\n self.size = 1 if head else 0\n\n # Print the linked list\n def print_list(self):\n if self.head == None:\n raise ValueError(\"List is empty\")\n\n current = self.head\n while current:\n print(current.data, end=\" \")\n current = current.next\n print(\"\\n\")\n\n # Insert a node in a linked list\n def insert(self, data):\n self.size += 1\n node = Node(data)\n current = self.head\n if not current:\n self.head = node\n else:\n while (current.next):\n current = current.next\n current.next = node\n \n def remove_dups(self):\n first = self.head\n while(first):\n second = first.next\n just_before_second = first\n while(second):\n if first.data == second.data:\n just_before_second.next = just_before_second.next.next\n else:\n just_before_second = second\n second = second.next\n first = first.next\n\n\nfirst_node = Node(11)\nlinked_list = LinkedList(first_node)\nlinked_list.insert(3)\nlinked_list.insert(6)\nlinked_list.insert(3)\nlinked_list.insert(11)\nlinked_list.insert(6)\nlinked_list.insert(5)\nlinked_list.insert(7)\nlinked_list.insert(5)\n\nprint(\"The linked list is:\")\nlinked_list.print_list()\n\nlinked_list.remove_dups()\nprint(\"After removing the duplications, the linked list is now:\")\nlinked_list.print_list()\n","repo_name":"wilberforce116/36-650","sub_path":"HW 6/Problem 5/problem_5.py","file_name":"problem_5.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71490342273","text":"from bs4 import BeautifulSoup\nimport requests\n\nurl='https://www.tripadvisor.cn/Attractions-g60763-Activities-New_York_City_New_York.html'\nwb_data=requests.get(url,timeout=30)\nsoup=BeautifulSoup(wb_data.text,'lxml')\n# div.item.name父类\ntitles=soup.select(\"div.item.name > a\")\nimgs=soup.select(\"img[width='200']\")\ncates=soup.select(\"div.detail\")\n# print(titles[0],imgs[0])\n\nprint(cates[0])\n\n\n'''\ndef getHTMLtext(url):\n\ttry:\n\t\tr=requests.get(url,timeout=30)\n\t\tr.raise_for_status()\n\t\tr.encoding=r.apparent_encoding\n\t\treturn r.text\n\texcept:\n\t\treturn ''\n'''\n\n\n\n","repo_name":"Devinwon/master","sub_path":"craw/Houjieclass/cssselector.py","file_name":"cssselector.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30783777921","text":"\"\"\"Module to analyze stackoverflow\"\"\"\nfrom xml.etree import ElementTree\nimport re\nfrom collections import Counter\nimport json\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nimport logging\n\nAPPLICATION_NAME = \"stackoverflow_analytics\"\nDATASET_DEFAULT_ENCODING = \"utf-8\"\nSTOPWORDS_DEFAULT_ENCODING = \"koi8-r\"\n\nlogger = logging.getLogger(APPLICATION_NAME)\n\n\nclass NoStopWordsError(Exception):\n \"\"\"Exception throws when list if stopwords is empty\"\"\"\n\n\nclass StackoverflowAnalyzer:\n \"\"\"Stackoverflow analyzer\"\"\"\n REQUIRED_ATTRIBS = (\"PostTypeId\", \"CreationDate\", \"Score\", \"Title\")\n\n def __init__(self) -> None:\n self.content = None\n self.scores = None\n self.stopwords = None\n\n @classmethod\n def read_xml(\n cls, filepath: str, encoding: str = DATASET_DEFAULT_ENCODING,\n check_attrib_values: dict = None\n ):\n \"\"\"Read xml from file\"\"\"\n azr = StackoverflowAnalyzer()\n content = []\n with open(filepath, encoding=encoding) as fin:\n for line in fin.readlines():\n try:\n et = ElementTree.fromstring(line)\n except ElementTree.ParseError:\n continue\n if not all(attr in et.attrib for attr in cls.REQUIRED_ATTRIBS):\n continue\n good_info = True\n if check_attrib_values is not None:\n for k, v in check_attrib_values.items():\n if k in et.attrib:\n if et.attrib.get(k) != v:\n good_info = False\n break\n if good_info:\n info = {\n \"year\": int(et.attrib[\"CreationDate\"][:4]),\n \"score\": int(et.attrib[\"Score\"]),\n \"title_words\": set(re.findall(r\"\\w+\", et.attrib[\"Title\"].lower()))\n }\n content.append(info)\n\n azr.content = content\n logger.info(\"process XML dataset, ready to serve queries\")\n return azr\n\n def load_stopwords(self, filepath: str, encoding: str = STOPWORDS_DEFAULT_ENCODING) -> None:\n \"\"\"Load stopwords from file\"\"\"\n with open(filepath, encoding=encoding) as fin:\n self.stopwords = fin.read().split()\n\n def analyze(\n self, top_n: int, *, year_from: int, year_to: int,\n check_stopwords: bool = False\n ) -> None:\n \"\"\"Analyze questions\"\"\"\n if check_stopwords and not self.stopwords:\n raise NoStopWordsError\n res = {\n \"start\": year_from,\n \"end\": year_to,\n \"top\": []\n }\n if self.content:\n post_scores = [\n (c[\"title_words\"], c[\"score\"])\n for c in self.content\n if year_from <= c[\"year\"] <= year_to\n ]\n\n scores = Counter()\n for ps in post_scores:\n words, score = ps\n for word in words:\n if check_stopwords:\n if word in self.stopwords:\n continue\n scores[word] += score\n if len(scores) < top_n:\n logger.warning(\n 'not enough data to answer, found %s words out of %s for period \"%s,%s\"',\n len(scores), top_n, year_from, year_to\n )\n top = sorted(scores.items(), key=lambda x: (-x[1], x[0]))[:top_n]\n res[\"top\"] = [list(t) for t in top]\n print(json.dumps(res))\n\n\ndef setup_logging():\n \"\"\"Setup logging\"\"\"\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\n fmt=\"%(levelname)s: %(message)s\"\n )\n\n debug_file_handler = logging.FileHandler(\n filename=\"stackoverflow_analytics.log\"\n )\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(formatter)\n\n warn_file_handler = logging.FileHandler(\n filename=\"stackoverflow_analytics.warn\"\n )\n warn_file_handler.setLevel(logging.WARNING)\n warn_file_handler.setFormatter(formatter)\n\n logger.addHandler(debug_file_handler)\n logger.addHandler(warn_file_handler)\n\n\ndef callback_analyze(arguments: \"NameSpace\") -> None:\n \"\"\"Callback\"\"\"\n azr = StackoverflowAnalyzer.read_xml(\n arguments.questions,\n check_attrib_values={\"PostTypeId\": \"1\"}\n )\n azr.load_stopwords(arguments.stop_words)\n with open(arguments.queries) as fin:\n for line in fin.readlines():\n year_from, year_to, top_n = map(int, line.split(\",\"))\n logger.debug('got query \"%s,%s,%s\"', year_from, year_to, top_n)\n azr.analyze(top_n, year_from=year_from, year_to=year_to, check_stopwords=True)\n logger.info(\"finish processing queries\")\n\n\ndef setup_parser(parser: \"ArgumentParser\") -> None:\n \"\"\"Setup parser\"\"\"\n parser.add_argument(\n \"--questions\",\n required=True,\n help=\"path to questions\"\n )\n parser.add_argument(\n \"--stop-words\",\n required=True,\n help=\"path to stop words\"\n )\n parser.add_argument(\n \"--queries\",\n required=True,\n help=\"path to queries\"\n )\n parser.set_defaults(callback=callback_analyze)\n\n\ndef main() -> None:\n \"\"\"Process arguments and analyze\"\"\"\n setup_logging()\n parser = ArgumentParser(\n prog=\"stackoverflow analyzer\",\n description=\"tool to find the most popular words for a period of time\",\n formatter_class=ArgumentDefaultsHelpFormatter\n )\n setup_parser(parser)\n arguments = parser.parse_args()\n arguments.callback(arguments)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ZingyKizz/MADE","sub_path":"python/hw2/task_Khnykov_Yaroslav_stackoverflow_analytics.py","file_name":"task_Khnykov_Yaroslav_stackoverflow_analytics.py","file_ext":"py","file_size_in_byte":5749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23447131101","text":"import sys\n\ndef idmin(A):\n\ti = 0\n\tmini = 0\n\twhile iA[i]):\n\t\t\tmini = i\n\t\ti+=1\n\treturn mini\n\ndef solve(inname,outname):\n\tfin = open(inname,'r')\n\tfout = open(outname,'w')\n\n\tT = int(fin.readline())\n\tfor t in range(0,T):\n\t\tN = int(fin.readline())\n\t\ttokens = fin.readline().split()\n\t\tA = [int(x) for x in tokens]\n\t\tcount=0\n\t\tfor i in range(0,N):\n\t\t\tx = idmin(A)\n\t\t\tif abs(x-(len(A)-1))>x:\n\t\t\t\tcount = count + x\n\t\t\telse:\n\t\t\t\tcount = count + (len(A)-1-x)\n\t\t\ttmp_A = [A[j] for j in range(0,len(A)) if not (j==x)]\n\t\t\tA = tmp_A[:]\n\t\tfout.write(\"Case #%d: %d\\n\"%(t+1,count))\n\tfin.close()\n\tfout.close()\n\nif __name__ == \"__main__\":\n\tinname = sys.argv[1]\n\toutname = sys.argv[2]\n\tsolve(inname,outname)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_149/35.py","file_name":"35.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28604326413","text":"import numpy as np\nimport pandas as pd\nfrom numpy import datetime64\nfrom rdflib.namespace import RDFS, XSD\nfrom rdflib.plugins.sparql.datatypes import type_promotion\n\n# RDF types to Python types\n_rdf_types_to_python_types = {\n XSD.dateTime: datetime64,\n XSD.date: datetime64,\n XSD.long: int,\n XSD.nonNegativeInteger: int,\n XSD.integer: int,\n XSD.double: float,\n XSD.float: float,\n XSD.decimal: float,\n XSD.string: str,\n XSD.boolean: bool,\n RDFS.Literal: str,\n}\n\n# simple types\nsimple_type_numeric = \"numeric\"\nsimple_type_numeric_list = \"numeric_list\"\nsimple_type_datetime = \"temporal\"\nsimple_type_geometry = \"geometry\"\nsimple_type_boolean = \"boolean\"\nsimple_type_string = \"string\"\n\n# Types\ntype_numeric_list = \"numeric_list\"\ntype_integer = \"integer\"\ntype_long = \"long\"\ntype_float = \"float\"\ntype_string = \"string\"\ntype_datetime = \"datetime\"\ntype_bool = \"bool\"\ntype_geometry = \"geometry\"\n\n_sml_types_to_rdf_types = {\n type_long: XSD.long,\n type_float: XSD.float,\n type_integer: XSD.integer,\n type_datetime: XSD.dateTime,\n type_bool: XSD.boolean,\n type_string: XSD.string,\n}\n\n_type_to_simple_type = {\n type_numeric_list: simple_type_numeric_list,\n type_long: simple_type_numeric,\n type_float: simple_type_numeric,\n type_integer: simple_type_numeric,\n type_string: simple_type_string,\n type_datetime: simple_type_datetime,\n type_bool: simple_type_boolean,\n type_geometry: simple_type_geometry,\n}\n\n# Python types\n_python_types_to_sml_types = {\n datetime64: type_datetime,\n int: type_integer,\n float: type_float,\n bool: type_bool,\n str: type_string,\n np.float64: type_float,\n}\n\n_python_types_to_simple_types = {\n datetime64: simple_type_datetime,\n int: simple_type_numeric,\n float: simple_type_numeric,\n bool: simple_type_boolean,\n str: simple_type_string,\n}\n\n_rdf_types_to_sml_types = {}\nfor rdf_type, python_type in _rdf_types_to_python_types.items():\n _rdf_types_to_sml_types[rdf_type] = _python_types_to_sml_types[python_type]\n\n# numpy types\n\n# pandas types\n\"\"\"\ndata_type_labels = {\n np.int32: type_integer,\n Int32Dtype(): type_integer,\n Int64Dtype(): type_long,\n np.bool_: type_bool,\n np.datetime64: type_datetime,\n str: type_string,\n int: type_integer,\n float: type_float,\n np.int64: type_integer,\n np.float64: type_float,\n np.floating: type_float,\n np.integer: type_integer,\n np.dtype(\"float64\"): type_float,\n np.dtype(\"int64\"): type_integer,\n np.dtype(\"bool\"): type_bool,\n \"str\": type_string,\n bool: type_bool,\n object: \"ERROR\",\n geometry: type_geometry,\n Timestamp: type_datetime,\n StringDtype: type_string,\n type_numeric_list: type_numeric_list,\n}\n\n\"\"\"\n\n_pandas_types_to_python_types = {\n pd.Float64Dtype(): float,\n pd.Int64Dtype(): int,\n pd.StringDtype(): str,\n pd.BooleanDtype(): bool,\n pd.Timestamp: datetime64,\n np.dtype(\"datetime64[ns]\"): datetime64,\n}\n\n_python_types_to_pandas_types = {v: k for k, v in _pandas_types_to_python_types.items()}\n\n\ndef promote_rdf_type(rdf_type):\n try:\n if type_promotion(rdf_type, XSD.integer) == XSD.integer:\n return XSD.integer\n except TypeError:\n try:\n if type_promotion(rdf_type, XSD.double) == XSD.double:\n return XSD.double\n except TypeError:\n pass\n finally:\n return rdf_type\n\n\ndef get_sml_type_from_python_type(python_type, return_if_missing: object = None):\n return _python_types_to_sml_types.get(python_type, return_if_missing)\n\n\ndef get_sml_type_from_rdf_type(rdf_type, return_if_missing: object = None):\n return _rdf_types_to_sml_types.get(promote_rdf_type(rdf_type), return_if_missing)\n\n\ndef get_simple_type_from_sml_type(sml_type, return_if_missing: object = None):\n return _type_to_simple_type.get(sml_type, return_if_missing)\n\n\ndef get_python_type_from_rdf_type(rdf_type, return_if_missing: object = None):\n return _rdf_types_to_python_types.get(promote_rdf_type(rdf_type), return_if_missing)\n\n\ndef get_simple_type_from_python_type(python_type, return_if_missing: object = None):\n return _python_types_to_simple_types.get(python_type, return_if_missing)\n\n\ndef get_python_type_from_pandas_type(pandas_type, return_if_missing: object = None):\n return _pandas_types_to_python_types.get(pandas_type, return_if_missing)\n\n\ndef get_rdf_type_from_sml_type(sml_type, return_if_missing: object = None):\n return _sml_types_to_rdf_types.get(sml_type, return_if_missing)\n\n\ndef get_pandas_type_from_python_type(python_type, return_if_missing: object = None):\n return _python_types_to_pandas_types.get(python_type, return_if_missing)\n","repo_name":"Simple-ML/Simple-ML","sub_path":"Runtime/stdlib/python/simpleml/util/_types.py","file_name":"_types.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"35669692113","text":"from django.contrib.auth import get_user_model\nfrom rest_framework import generics, permissions\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\nfrom .serializers import UserSerializer\nfrom django.contrib.auth import authenticate\n\n\nclass SignUpView(generics.CreateAPIView):\n queryset = get_user_model().objects.all()\n serializer_class = UserSerializer\n permission_classes = [permissions.AllowAny]\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n token, created = Token.objects.get_or_create(user=user)\n return Response(\n {\n \"user\": UserSerializer(\n user, context=self.get_serializer_context()\n ).data,\n \"token\": token.key,\n }\n )\n\n\nclass LoginView(generics.GenericAPIView):\n permission_classes = [permissions.AllowAny]\n\n def post(self, request, *args, **kwargs):\n username = request.data.get(\"username\")\n password = request.data.get(\"password\")\n user = authenticate(request, username=username, password=password)\n if user is not None:\n token, created = Token.objects.get_or_create(user=user)\n return Response(\n {\n \"user\": UserSerializer(\n user, context=self.get_serializer_context()\n ).data,\n \"token\": token.key,\n }\n )\n else:\n return Response({\"error\": \"Wrong Credentials\"}, status=400)\n\n\nclass LogoutView(generics.GenericAPIView):\n permission_classes = [permissions.IsAuthenticated]\n\n def post(self, request, *args, **kwargs):\n request.user.auth_token.delete()\n return Response(status=204)\n","repo_name":"Glayson7/APIRest","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40041512386","text":"#!/usr/bin/env python\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\ndef create_context_memory(encdec, biprocessor, search_engine, src_sentence):\n examples_list = search_engine.search(src_sentence)\n \n for ex_src, ex_tgt in examples_list:\n if isinstance(biprocessor, tuple):\n assert len(biprocessor) == 2\n idx_ex_src, idx_ex_tgt = biprocessor[0].convert(ex_src), biprocessor[1].convert(ex_tgt)\n else:\n idx_ex_src, idx_ex_tgt = biprocessor.convert(ex_src, ex_tgt)\n \n state_context_list = encdec.compute_state_context_list(idx_ex_src, idx_ex_tgt)\n # encode idx_ex_src\n # generate conditionalized cell\n # apply conditionalized cell to idx_ex_tgt to generate sequence of (states, ci, yt)\n context_memory.add(state_context_list)\n return context_memory\n\n\nimport nmt_chainer.training_module.train as train\nimport nmt_chainer.training_module.train_config as train_config\n\ndef test_context_memory(config_filename, search_engine, src_sentence):\n config_training = train_config.load_config_train(config_filename)\n (encdec, eos_idx, src_indexer, tgt_indexer), model_infos = train.create_encdec_and_indexers_from_config_dict(config_training,\n load_config_model=\"yes\",\n return_model_infos=True)\n \n ctxt_mem = create_context_memory(encdec, (src_indexer, tgt_indexer), search_engine, src_sentence)\n \n # (later) do some tests with ctxt_mem\n \n","repo_name":"fabiencro/knmt","sub_path":"nmt_chainer/models/search_engine_guided_non_param.py","file_name":"search_engine_guided_non_param.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"61"} +{"seq_id":"20021537027","text":"import sys\r\n\r\n\r\ndef processVertex(vertex, adj_list):\r\n\r\n two_paths_map = {}\r\n for inner_edge in adj_list[vertex]:\r\n for outer_edge in adj_list[inner_edge]:\r\n if outer_edge not in two_paths_map:\r\n two_paths_map[outer_edge] = 1\r\n else:\r\n two_paths_map[outer_edge] += 1\r\n \r\n two_path_res = []\r\n for dest_vertex, num_times in two_paths_map.items():\r\n two_path_res.append((dest_vertex, num_times))\r\n\r\n return two_path_res \r\n\r\n\r\ndef countTwoPaths(input_adj_list):\r\n output_adj_list = {} # personID (int) -> [(twoPathPerson, numTimes)]\r\n\r\n for key in input_adj_list:\r\n output_adj_list[key] = processVertex(key, input_adj_list)\r\n\r\n\r\n return output_adj_list\r\n\r\n\r\n# Opens and parses input matrices from text files\r\ndef openFile(matFile):\r\n matrix = []\r\n adj_list = {} # personID (int) -> [connected nonzero IDs]\r\n curr_row_ID = 0\r\n with open(matFile, 'r') as f_read:\r\n for line in f_read:\r\n line = line.replace(\" \\n\", \"\").replace(\"\\n\", \"\")\r\n line_split = line.split(\" \")\r\n \r\n curr_row = []\r\n for otherPersonIdx, row_num in enumerate(line_split):\r\n row_num = int(row_num)\r\n if (row_num == 1):\r\n curr_row.append(otherPersonIdx+1)\r\n adj_list[curr_row_ID+1] = curr_row\r\n curr_row_ID+=1\r\n return adj_list\r\n\r\n\r\ndef __main__():\r\n if (len(sys.argv) != 2):\r\n print(\"CLI Format: python Algo_HW6.py \")\r\n return\r\n \r\n # Ripped if statement from https://stackoverflow.com/questions/5899497/how-can-i-check-the-extension-of-a-file\r\n if not (sys.argv[1].endswith('.txt')):\r\n print(\"Please make sure Matrix input parse files are text files (.txt)\")\r\n return\r\n\r\n # open command line arguments\r\n input_adj_list = openFile(sys.argv[1])\r\n print(input_adj_list)\r\n\r\n output_adj_list = countTwoPaths(input_adj_list)\r\n print(output_adj_list)\r\n\r\n\r\n\r\n\r\n\r\nif (__name__ == \"__main__\"):\r\n __main__()","repo_name":"billjr20/Algo","sub_path":"HW6/Algo_HW6.py","file_name":"Algo_HW6.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7034623214","text":"from pathlib import Path\nimport sys\nimport re\n\nfrom .isCase import isCase\n\n\ndef listCases(path=Path.cwd(), absolutePath=False):\n #convert string inputs to Path()\n if isinstance(path, str):\n path = Path(path)\n\n if isCase(path): #is the root directory an OpenFOAM case?\n if absolutePath:\n return [path]\n else:\n return [path.relative_to(Path.cwd())]\n\n cases = []\n\n for p in path.rglob('*'):\n if isCase(p):\n if absolutePath:\n cases.append(p)\n else:\n cases.append(p.relative_to(path))\n\n cases.sort()\n\n return cases\n","repo_name":"mcgoldba/pyFoamd","sub_path":"pyfoamd/functions/listCases.py","file_name":"listCases.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23937935057","text":"#Simulate an income statement and visualize it in 3D\nimport vtk\nimport time\nimport imageio\n\n\ntime_scale = \"weekly\" # Time scale (you can set it to your desired timescale)\n\n# Function to create a sphere with a given radius\ndef create_sphere(radius, color, position):\n sphere = vtk.vtkSphereSource()\n sphere.SetRadius(radius)\n\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputConnection(sphere.GetOutputPort())\n\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(color)\n actor.SetPosition(position[0], position[1], position[2]) # Set position for the actor\n\n return actor\n\ndef update_revenue_and_expenses(debt, revenue, expenses, iteration, time_scale):\n \n apr = 0.25 # Annual Percentage Rate\n monthly_apr = apr / 12 # Monthly interest rate\n daily_apr = monthly_apr/30\n weekly_apr = daily_apr * 7\n hourly_apr = daily_apr/8\n minute_apr = hourly_apr/60\n second_apr = minute_apr/60\n hourly_rate = 26\n minute_rate = hourly_rate/60\n second_rate = minute_rate/60\n \n if time_scale == \"yearly\":\n debt += apr * debt\n revenue += hourly_rate * 10080\n expenses += hourly_rate * 10080\n debt_service = .8 * hourly_rate * 10080\n debt -= debt_service\n\n if time_scale == \"monthly\":\n debt += monthly_apr * debt # Increase the debt by monthly interest\n revenue += hourly_rate * 160\n expenses += 0.8 * hourly_rate * 160 # Increase expenses by 80% of revenue\n debt_service = .8 * hourly_rate * 160 # Assume 80% of revenue used to pay debt\n debt -= debt_service # Decrease the debt by the debt service\n \n \n if time_scale == \"daily\":\n debt += daily_apr * debt # Increase the debt by the daily interest\n revenue += hourly_rate * 8 # Increment revenue by a fixed amount\n expenses += .8 * hourly_rate * 8 # Increment expenses by a fixed amount\n debt_service = .8 * hourly_rate * 8 # Use expenses to pay down the debt\n debt -= debt_service # Decrease the debt by the debt service\n\n \n if time_scale == \"weekly\":\n debt += weekly_apr * debt\n revenue += hourly_rate * 80\n expenses += .8 * hourly_rate * 80\n debt_service = .8 * hourly_rate * 80 # Assume 80% of revenue used to pay debt\n debt -= debt_service # Decrease the debt by the debt service\n \n \n if time_scale == \"hourly\":\n debt += hourly_apr * debt\n revenue += hourly_rate\n expenses += .8 * hourly_rate\n debt_service = .8 * hourly_rate # Assume 80% of revenue used to pay debt\n debt -= debt_service # Decrease the debt by the debt service\n \n \n if time_scale == \"second\":\n debt += second_apr * debt\n revenue += second_rate \n expenses += .8 * second_rate\n debt_service = .8 * second_rate # Assume 80% of revenue used to pay debt\n debt -= debt_service # Decrease the debt by the debt service\n \n \n if time_scale == \"minute\":\n debt += minute_apr * debt\n revenue += minute_rate\n expenses += .8 * minute_rate\n debt_service = .8 * minute_rate # Assume 80% of revenue used to pay debt\n debt -= debt_service # Decrease the debt by the debt service\n \n \n\n\n net_income = revenue - expenses\n return debt, revenue, expenses, net_income\n\n# Initialize variables\ndebt = 100000 # Initial debt value\nrevenue = 0\nexpenses = 0\n#debt_service = .8 * revenue\n#debt -= debt_service\n#how does debt_service change according to time scale? \n\n\n# Create renderer\nrenderer = vtk.vtkRenderer()\nrenderer.SetBackground(0.1, 0.2, 0.4)\nrenderer.ResetCamera()\n\n# Enable user interaction\nrender_window = vtk.vtkRenderWindow()\nrender_window.AddRenderer(renderer)\nrender_window.SetSize(800, 600)\n\n# Add spheres to the renderer\ndebt_actor = create_sphere(0.5, [1.0, 0.0, 0.0], [-2.0, 0.0, 0.0]) # Debt sphere\nrevenue_actor = create_sphere(0.5, [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]) # Revenue sphere\nexpenses_actor = create_sphere(0.5, [0.0, 0.0, 1.0], [2.0, 0.0, 0.0]) # Expenses sphere\nnet_income_actor = create_sphere(0.5, [1.0, 1.0, 0.0], [4.0, 0.0, 0.0]) # Net Income sphere\n\nrenderer.AddActor(debt_actor)\nrenderer.AddActor(revenue_actor)\nrenderer.AddActor(expenses_actor)\nrenderer.AddActor(net_income_actor)\n\n# Enable user interaction\ninteractor = vtk.vtkRenderWindowInteractor()\ninteractor.SetRenderWindow(render_window)\ninteractor.Initialize()\n\nstyle = vtk.vtkInteractorStyleTrackballCamera()\ninteractor.SetInteractorStyle(style)\n\nrender_window.Render()\n\n# Animation parameters\nanimation_duration = 60 # Duration in seconds\nanimation_steps = 5 # Number of steps for the animation\n\n\nloop_count = 10 # Number of times to loop the animation\n\nloop_iteration = 0\n\n# Initialize variables for GIF creation\nimage_list = []\n\n# Update function\ndef update():\n global debt, revenue, expenses, net_income, loop_iteration, start_time\n\n # Update the values\n debt, revenue, expenses, net_income = update_revenue_and_expenses(debt, revenue, expenses, loop_iteration, time_scale)\n\n scale_factor = 0.00002 # Experiment with different values to ensure sphere visibility\n\n debt_actor.SetScale(debt * scale_factor, debt * scale_factor, debt * scale_factor)\n\n revenue_actor.SetScale(revenue * scale_factor, revenue * scale_factor, revenue * scale_factor)\n expenses_actor.SetScale(expenses * scale_factor, expenses * scale_factor, expenses * scale_factor)\n net_income_actor.SetScale(net_income * scale_factor, net_income * scale_factor, net_income * scale_factor)\n print(f\"Period: {loop_iteration}, Debt: {debt:.2f}, Net Income: {net_income:.2f}\")\n\n render_window.Render()\n\n loop_iteration += 1\n \n #if loop_iteration >= animation_steps:\n #loop_iteration = 0\n\n if debt <= 0:\n interactor.DestroyTimer(timer_id) # Stop the timer when debt is zero or less\n print(\"Debt reached zero or below. Stopping the animation.\")\n imageio.mimsave(\"F:\\valuation_models\\output\\animation.gif\", image_list)\n \n else:\n # Capture the current frame and append it to the list\n w2if = vtk.vtkWindowToImageFilter()\n w2if.SetInput(render_window)\n w2if.Update()\n writer = vtk.vtkPNGWriter()\n writer.SetInputConnection(w2if.GetOutputPort())\n filename = f\"F:\\valuation_models\\output\\frame_{loop_iteration}.png\"\n writer.SetFileName(filename)\n writer.Write()\n\n # Append the filename to the list\n image_list.append(filename) \n\n render_window.Render()\n \n # Check if 30 seconds have passed\n #current_time = time.time()\n #if current_time - start_time >= 30:\n #interactor.DestroyTimer(timer_id) # Stop the timer\n\n# Create a timer to control the animation for 30 seconds\nstart_time = time.time() # Start time of the animation\n\n# Call the update function at regular intervals\ntimer_id = interactor.CreateRepeatingTimer(int(1000 / animation_steps))\ninteractor.AddObserver('TimerEvent', lambda caller, event: update())\n\n# Start the interactor\ninteractor.Start()\n\n#Save GIF\n# Set up the animation scene writer to save the animation as a GIF\nwriter = vtk.vtkAnimationSceneImageWriter()\nwriter.SetFileName(\"F:\\valuation_models\\output\\animation.gif\")\nwriter.SetFrameRate(30) # Set the frame rate for the GIF\n\n# Start the render and the writer\nwriter.SetInput(render_window)\nwriter.Write()\n\n","repo_name":"Photon1c/lavenderfields","sub_path":"examples/3Dfinancialmodeler.py","file_name":"3Dfinancialmodeler.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44869268903","text":"\"\"\"Test the TcEx Logger Module.\"\"\"\n# standard library\nimport os\nfrom random import randint\n\n\nclass TestLogs:\n \"\"\"Test the TcEx Logger Module.\"\"\"\n\n @staticmethod\n def test_logger(tcex_proxy):\n \"\"\"Test TcEx logger\n\n Args:\n tcex_proxy (TcEx, fixture): An instantiated instance of TcEx object.\n \"\"\"\n tcex = tcex_proxy\n for _ in range(0, 20):\n tcex.log.trace('TRACE LOGGING')\n tcex.log.debug('DEBUG LOGGING')\n tcex.log.info('INFO LOGGING')\n tcex.log.warning('WARNING LOGGING')\n tcex.log.error('ERROR LOGGING')\n\n # update handler log level\n tcex.logger.update_handler_level(None)\n tcex.logger.update_handler_level('trace')\n\n # simple assert to ensure the log file was created\n assert os.path.exists(\n os.path.join(tcex.default_args.tc_log_path, tcex.default_args.tc_log_file)\n )\n\n @staticmethod\n def test_logger_rotate(playbook_app):\n \"\"\"Test TcEx logger\n\n Args:\n playbook_app (callable, fixture): The playbook_app fixture.\n \"\"\"\n config_data = {'tc_log_file': 'rotate.log', 'tc_log_max_bytes': 1_048_576}\n tcex = playbook_app(config_data=config_data).tcex\n\n for _ in range(0, 5_000):\n tcex.log.info(f'A long random string {tcex.utils.random_string(randint(200, 250))}')\n\n # simple assert to ensure the log file was created\n assert os.path.exists(\n os.path.join(tcex.default_args.tc_log_path, tcex.default_args.tc_log_file)\n )\n assert os.path.exists(\n os.path.join(tcex.default_args.tc_log_path, f'{tcex.default_args.tc_log_file}.1.gz')\n )\n","repo_name":"ThreatConnect-Inc/threatconnect-developer-docs","sub_path":"tcex/tests/logger/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"21682797636","text":"import os\nfrom tqdm import trange\nimport torch\nfrom torch.nn import functional as F\nfrom torch import distributions as dist\nfrom src.common import (\n compute_iou, make_3d_grid, add_key,\n)\nfrom src.utils import visualize as vis\nfrom src.training import BaseTrainer\nimport numpy as np\n\nfrom torch.cuda.amp import GradScaler, autocast\n\n\nclass Trainer(BaseTrainer):\n ''' Trainer object for the Occupancy Network.\n\n Args:\n model (nn.Module): Occupancy Network model\n optimizer (optimizer): pytorch optimizer object\n device (device): pytorch device\n input_type (str): input type\n vis_dir (str): visualization directory\n threshold (float): threshold value\n eval_sample (bool): whether to evaluate samples\n\n '''\n\n def __init__(self, model, optimizer, device=None, input_type='pointcloud',\n vis_dir=None, threshold=0.5, eval_sample=False):\n self.model = model\n self.optimizer = optimizer\n self.device = device\n self.input_type = input_type\n self.vis_dir = vis_dir\n self.threshold = threshold\n self.eval_sample = eval_sample\n\n self.scaler = GradScaler()\n\n if vis_dir is not None and not os.path.exists(vis_dir):\n os.makedirs(vis_dir)\n\n def train_step(self, data, idx, epoch_it, gradient_accumulations):\n ''' Performs a training step.\n\n Args:\n data (dict): data dictionary\n '''\n\n '''\n self.model.train()\n self.optimizer.zero_grad()\n loss, loss_p, loss_rgb = self.compute_loss(data, epoch_it)\n loss.backward()\n self.optimizer.step()\n '''\n\n # v1\n self.model.train()\n loss, loss_p, loss_rgb = self.compute_loss(data, epoch_it)\n (loss / gradient_accumulations).backward()\n\n if (idx + 1) % gradient_accumulations == 0:\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # v2\n '''\n self.model.train()\n with autocast():\n loss, loss_p, loss_rgb = self.compute_loss(data, epoch_it)\n self.scaler.scale(loss / gradient_accumulations).backward()\n\n if (idx + 1) % gradient_accumulations == 0:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.model.zero_grad()\n '''\n\n return loss.item(), loss_p.item(), loss_rgb.item()\n \n def eval_step(self, data):\n ''' Performs an evaluation step.\n\n Args:\n data (dict): data dictionary\n '''\n self.model.eval()\n\n device = self.device\n threshold = self.threshold\n eval_dict = {}\n\n points = data.get('points').to(device) # torch.Size([1, 1024, 3])\n occ = data.get('points.occ').to(device) # torch.Size([1, 1024])\n p_colors = data.get('points.colors').to(device) # torch.Size([1, 1024, 3])\n\n inputs = data.get('inputs', torch.empty(points.size(0), 0)).to(device) # torch.Size([1, 32, 32, 32])\n voxels_occ = data.get('voxels') # torch.Size([1, 32, 32, 32])\n inputs_colors = data.get('inputs.voxels_color').to(device) # torch.Size([1, 32, 32, 32, 3])\n\n points_iou = data.get('points_iou').to(device) # torch.Size([1, 100000, 3])\n occ_iou = data.get('points_iou.occ').to(device) # torch.Size([1, 100000])\n\n batch_size = points.size(0)\n\n kwargs = {}\n \n # add pre-computed index\n inputs = add_key(inputs, data.get('inputs.ind'), 'points', 'index', device=device)\n # add pre-computed normalized coordinates\n points = add_key(points, data.get('points.normalized'), 'p', 'p_n', device=device)\n points_iou = add_key(points_iou, data.get('points_iou.normalized'), 'p', 'p_n', device=device)\n\n # Compute iou\n with torch.no_grad():\n p_out = self.model(points_iou, inputs, inputs_colors, sample=self.eval_sample, **kwargs)\n\n occ_iou_np = (occ_iou >= 0.5).cpu().numpy()\n occ_iou_hat_np = (p_out.probs >= threshold).cpu().numpy()\n\n iou = compute_iou(occ_iou_np, occ_iou_hat_np[:, :, 0]).mean()\n eval_dict['iou'] = iou\n\n # Estimate voxel iou\n if voxels_occ is not None:\n voxels_occ = voxels_occ.to(device)\n points_voxels = make_3d_grid(\n (-0.5 + 1/64,) * 3, (0.5 - 1/64,) * 3, voxels_occ.shape[1:])\n points_voxels = points_voxels.expand(\n batch_size, *points_voxels.size())\n points_voxels = points_voxels.to(device)\n with torch.no_grad():\n p_out = self.model(points_voxels, inputs, inputs_colors, sample=self.eval_sample, **kwargs)\n\n voxels_occ_np = (voxels_occ >= 0.5).cpu().numpy()\n occ_hat_np = (p_out.probs >= threshold).cpu().numpy()\n iou_voxels = compute_iou(voxels_occ_np, occ_hat_np[:, :, 0]).mean()\n\n eval_dict['iou_voxels'] = iou_voxels\n\n return eval_dict\n\n def compute_loss(self, data, epoch_it):\n ''' Computes the loss.\n\n Args:\n data (dict): data dictionary\n '''\n device = self.device\n p = data.get('points').to(device) #torch.Size([64, 1024, 3])\n occ = data.get('points.occ').to(device) #torch.Size([64, 1024])\n p_colors = data.get('points.colors').to(device) #torch.Size([64, 1024, 3])\n inputs = data.get('inputs', torch.empty(p.size(0), 0)).to(device) #torch.Size([64, 32, 32, 32])\n inputs_colors = data.get('inputs.voxels_color').to(device) # torch.Size([64, 32, 32, 32, 3]) range[0,1]\n loss = 0\n\n if 'pointcloud_crop' in data.keys():\n # add pre-computed index\n inputs = add_key(inputs, data.get('inputs.ind'), 'points', 'index', device=device)\n inputs['mask'] = data.get('inputs.mask').to(device)\n # add pre-computed normalized coordinates\n p = add_key(p, data.get('points.normalized'), 'p', 'p_n', device=device)\n\n '''\n if epoch_it > 12351235:\n c = self.model.encode_inputs(inputs) # xy: torch.Size([64, 32, 64, 64])\n kwargs = {}\n # General points\n logits = self.model.decode(p, c, **kwargs).logits #torch.Size([64, 1024])\n loss_i = F.binary_cross_entropy_with_logits(\n logits, occ, reduction='none')\n loss = loss_i.sum(-1).mean()\n '''\n\n if epoch_it > 0:\n c_main = self.model.main_encode_inputs(inputs, inputs_colors) # xy: torch.Size([64, 32, 64, 64])\n kwargs = {}\n # General points\n logits = self.model.main_decode(p, c_main, **kwargs).logits #torch.Size([64, 1024, 4])\n # logits[:, :, [1, 2, 3]] = logits[:, :, [1, 2, 3]] * 255\n\n loss_l1 = torch.nn.L1Loss()\n loss_MSE = torch.nn.MSELoss()\n loss_r = loss_l1(logits[:, :, 1].double(), p_colors[:, :, 0].double())\n loss_g = loss_l1(logits[:, :, 2].double(), p_colors[:, :, 1].double())\n loss_b = loss_l1(logits[:, :, 3].double(), p_colors[:, :, 2].double())\n # loss_r = loss_MSE(logits[:, :, 1].double(), p_colors[:, :, 0].double())\n # loss_g = loss_MSE(logits[:, :, 2].double(), p_colors[:, :, 1].double())\n # loss_b = loss_MSE(logits[:, :, 3].double(), p_colors[:, :, 2].double())\n\n loss_bce_p = F.binary_cross_entropy_with_logits(logits[:, :, 0], occ, reduction='none')\n loss_p = loss_bce_p.sum(-1).mean()\n # loss = loss_p + (loss_r + loss_g + loss_b) / 3\n loss_rgb = loss_r + loss_g + loss_b\n # print(\"loss rgb\", loss_rgb)\n loss = loss_p + loss_rgb\n # loss = loss_p\n '''\n unit_m = torch.ones(p_colors.shape[0], p_colors.shape[1], p_colors.shape[2])\n print(\"one\", unit_m.shape)\n diff = torch.abs(loss_l1(logits[:, :, [1, 2, 3]]))\n print(\"diff\", diff.shape)\n loss_rgb = torch.log(unit_m - diff).sum(-1).mean()\n print(loss_rgb.shape)\n #print(torch.abs(torch.abs(logits[:, :, [1, 2, 3]]) - torch.abs(p_colors)))\n #loss_rgb = torch.log(1 - (torch.abs(logits[:, :, [1, 2, 3]] - p_colors)).sum(-1).mean)\n '''\n #print(\"loss rgb portion\", loss_rgb / loss_p)\n\n\n return loss, loss_p, loss_rgb\n","repo_name":"Zeju1997/Texture_Completion","sub_path":"convolutional_occupancy_network_textured/src/conv_onet/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15131657847","text":"\"\"\" model selection and parameter optimization \"\"\"\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import GridSearchCV\n\ndef model_select_param_opt_demo():\n # 1) get data\n iris = load_iris()\n\n # 2) partition \n x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state = 22)\n\n # 3) feature engineer: standardize\n transfer = StandardScaler()\n x_train = transfer.fit_transform(x_train)\n x_test = transfer.transform(x_test) # use mean and std got in x_train\n\n # 4) knn with grid search with cross validation\n estimator = KNeighborsClassifier()\n\n # 5) grid search with cross validation\n param_dict = {\"n_neighbors\" : [1, 3, 5, 7, 9, 11], \"metric\" : ['minkowski'], 'p' : [1, 2, 3]} # different distance metric\n estimator = GridSearchCV(estimator, param_grid = param_dict, cv = 10)\n estimator.fit(x_train, y_train)\n\n # 6) evaluation\n score = estimator.score(x_test, y_test)\n print(score)\n print(\"best param: \", estimator.best_params_)\n print(\"best score: \", estimator.best_score_)\n print(\"best estimator: \", estimator.best_estimator_)\n print(\"cv results: \", estimator.cv_results_)\n\n return None\n\n\nif __name__ == \"__main__\":\n model_select_param_opt_demo()","repo_name":"richjin518/MachineLearningClass","sub_path":"day_4_crossval_gridsearch.py","file_name":"day_4_crossval_gridsearch.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38079082135","text":"with open('Chapter 9\\\\input.txt', 'r') as f:\n # ✅ get list of all lines\n lines = f.read().splitlines()\n\nd = {}\nsl = []\ntotal = 0\nfor line in lines:\n if line.startswith('$ cd'):\n d[line.split(' ')[2]] = sl\n sl = []\n continue\n else:\n sl.append(line)\n\nprint(d)","repo_name":"bartoszc/Python-Crash-Course-Solutions","sub_path":"Chapter 9/day_7b.py","file_name":"day_7b.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33830861911","text":"import os,shutil\n\n\ndef copyfile(fpath, srcfile, dstfile):\n if not os.path.isfile(srcfile):\n print(\"%s not exist!\")%(srcfile)\n else:\n fpath, fname=os.path.split(dstfile)\n if not os.path.exists(fpath):\n os.makedirs(fpath)\n shutil.copyfile(srcfile,dstfile)\n print(\"copy %s -> %s\")%( srcfile,dstfile)\n\n\ndef catch_polygon(row, col, bin_image):\n start_point = (row, col)\n flag = 'left2right'\n line_end = []\n moving_point = [row, col + 1]\n line_end.append([start_point[0], start_point[1]])\n while moving_point[0] != start_point[0] or moving_point[1] != start_point[1]:\n\n '''\n # the situation which width == 0:\n if bin_image[moving_point[0]][moving_point[1]] == 0:\n moving_point[0] += 1\n moving_point[1] -= 1\n flag = 'up'\n '''\n\n center = bin_image[moving_point[0]][moving_point[1]]\n right = bin_image[moving_point[0]][moving_point[1] + 1]\n right_down = bin_image[moving_point[0] - 1][moving_point[1] + 1]\n right_up = bin_image[moving_point[0] + 1][moving_point[1] + 1]\n left = bin_image[moving_point[0]][moving_point[1] - 1]\n left_down = bin_image[moving_point[0] - 1][moving_point[1] - 1]\n left_up = bin_image[moving_point[0] + 1][moving_point[1] - 1]\n up = bin_image[moving_point[0] + 1][moving_point[1]]\n down = bin_image[moving_point[0] - 1][moving_point[1]]\n\n # read the polygons:\n if flag == 'left2right':\n if right == 0 or (up == 1 and left_up == 0) or (down == 1 and left_down == 0):\n line_end.append([moving_point[0], moving_point[1]])\n if up == 1 and left_up != right_down:\n flag = 'down2up'\n moving_point[0] += 1\n else:\n flag = 'up2down'\n moving_point[0] -= 1\n else:\n moving_point[1] += 1\n elif flag == 'right2left':\n if left == 0 or (up == 1 and right_up == 0) or (down == 1 and right_down == 0):\n line_end.append([moving_point[0], moving_point[1]])\n if up == 1 and right_up != left_down:\n flag = 'down2up'\n moving_point[0] += 1\n else:\n flag = 'up2down'\n moving_point[0] -= 1\n else:\n moving_point[1] -= 1\n elif flag == 'down2up':\n if up == 0 or (right == 1 and right_down == 0) or (left == 1 and left_down == 0):\n line_end.append([moving_point[0], moving_point[1]])\n if left == 1 and right_up != left_down:\n flag = 'right2left'\n moving_point[1] -= 1\n else:\n flag = 'left2right'\n moving_point[1] += 1\n else:\n moving_point[0] += 1\n elif flag == 'up2down':\n if down == 0 or (right == 1 and right_up == 0) or (left == 1 and left_up == 0):\n line_end.append([moving_point[0], moving_point[1]])\n if left == 1 and left_up != right_down:\n flag = 'right2left'\n moving_point[1] -= 1\n else:\n flag = 'left2right'\n moving_point[1] += 1\n else:\n moving_point[0] -= 1\n\n # print(line_end)\n return line_end\n\n\ndef polygons_check(polygons, information='n'):\n authenticated = True\n line_ends = []\n # In every polygon:\n for i in polygons:\n\n for j in i:\n line_ends.append(j)\n\n # check the M1 Critical Dimension > 80\n for j in range(len(line_ends)):\n if j == len(line_ends) - 1:\n break\n else:\n if max(abs(line_ends[j + 1][0] - line_ends[j][0]), abs(line_ends[j + 1][1] - line_ends[j][1])) < 80:\n authenticated = False\n if information == 'y':\n print('Dimension error')\n print(max(abs(line_ends[j + 1][0] - line_ends[j][0]), abs(line_ends[j + 1][1] - line_ends[j][1])))\n\n # check the Tip to tip distance > 60\n for i in range(len(polygons)):\n for j in range(i + 1, len(polygons)):\n for k in polygons[i]:\n for l in polygons[j]:\n if abs(k[0] - l[0]) + abs(k[1] - l[1]) < 60:\n authenticated = False\n if information == 'y':\n print('Tip error')\n print(abs(k[0] - l[0]) + abs(k[1] - l[1]))\n\n # check the pitch >140 (only the rectangles)\n rectangles = []\n horizontal = []\n vertical = []\n for i in polygons:\n if len(i) == 4:\n rectangles.append(i)\n for i in rectangles:\n if abs(i[0][0] - i[1][0]) > abs(i[0][1] - i[2][1]):\n i.append('horizontal')\n i.append(abs(i[0][1] - i[2][1])/2 + min(i[0][1], i[2][1]))\n else:\n i.append('vertical')\n i.append(abs(i[0][0] - i[1][0])/2 + min(i[0][0], i[1][0]))\n for i in rectangles:\n if i[4] == 'horizontal':\n horizontal.append(i)\n else:\n vertical.append(i)\n # print(vertical)\n # print(horizontal)\n for i in horizontal:\n i.append([min(i[0][0], i[1][0], i[2][0], i[3][0]), max(i[0][0], i[1][0], i[2][0], i[3][0])])\n for i in vertical:\n i.append([min(i[0][1], i[1][1], i[2][1], i[3][1]), max(i[0][1], i[1][1], i[2][1], i[3][1])])\n horizontal.sort(key=lambda x: x[6])\n horizontal.sort(key=lambda x: x[6])\n # 到此为止,数组的结构是[[[多边形点1],[多边形点2],[多边形点3],[多边形点4],[多边形方向],[多边形中线],[多边形最左最右或最低最高]]\n for i in range(len(horizontal)):\n for j in range(i + 1, len(horizontal)):\n if horizontal[j][6][1] > horizontal[i][6][0]:\n continue\n else:\n if i == len(horizontal) - 1:\n break\n else:\n if abs(horizontal[j][5] - horizontal[i][5]) < 140:\n authenticated = False\n if information == 'y':\n print('Pitch error')\n print(horizontal[j][5] - horizontal[i][5])\n for i in range(len(vertical)):\n for j in range(i + 1, len(vertical)):\n if vertical[j][6][1] > vertical[j][6][0]:\n continue\n else:\n if i == len(vertical) - 1:\n break\n else:\n if abs(vertical[i + 1][5] - vertical[i][5]) < 140:\n authenticated = False\n if information == 'y':\n print('Pitch error')\n print(vertical[i + 1][5] - vertical[i][5])\n\n return authenticated\n\n\ndef main():\n # the count of files\n txt_num = int(input('Input count of the files you hope to read which in [2, 5460]:'))\n information = input('If you need information about the error(y/n):')\n\n # to count the generated files\n count = 1\n\n # read the txt file\n for k in range(1, txt_num):\n with open('Mask_bin/M1_test' + str(k) + '.txt') as f:\n print('Reading the text \"' + 'Mask_bin/M1_test' + str(k) + '.txt'+'\"...(' + str(k) + '/' + str(txt_num - 1) +')')\n # transform the binary file to a array:\n bin_image = [[] for i in range(2048)]\n line = f.readlines()\n for i in range(2048):\n for j in line[i]:\n if j == '0' or j == '1':\n bin_image[i].append(int(j))\n # alter the base point form the left top to the left bottom:\n bin_image = bin_image[::-1]\n # check the every point:\n tips = []\n polygons = []\n # the point:[row][col]\n # erase the single point:\n for row in range(2048):\n for col in range(2048):\n\n if row == 0 or row == 2047 or col == 0 or col == 2047:\n continue\n\n zero_point = 0\n\n center = bin_image[row][col]\n right = bin_image[row][col + 1]\n right_down = bin_image[row - 1][col + 1]\n right_up = bin_image[row + 1][col + 1]\n left = bin_image[row][col - 1]\n left_down = bin_image[row - 1][col - 1]\n left_up = bin_image[row + 1][col - 1]\n up = bin_image[row + 1][col]\n down = bin_image[row - 1][col]\n\n if center == 1:\n if right == 0:\n zero_point += 1\n if left == 0:\n zero_point += 1\n if up == 0:\n zero_point += 1\n if down == 0:\n zero_point += 1\n if right_down == 0:\n zero_point += 1\n if right_up == 0:\n zero_point += 1\n if left_up == 0:\n zero_point += 1\n if left_down == 0:\n zero_point += 1\n\n if zero_point >= 6:\n bin_image[row][col] = 0\n print('erased [' + str(row) + '][' + str(col) + ']')\n\n for row in range(2048):\n for col in range(2048):\n\n if row == 0 or row == 2047 or col == 0 or col == 2047:\n continue\n\n center = bin_image[row][col]\n right = bin_image[row][col + 1]\n right_down = bin_image[row - 1][col + 1]\n right_up = bin_image[row + 1][col + 1]\n left = bin_image[row][col - 1]\n left_down = bin_image[row - 1][col - 1]\n left_up = bin_image[row + 1][col - 1]\n up = bin_image[row + 1][col]\n down = bin_image[row - 1][col]\n\n if center == 1 and right_down == 0 and left_down == 0 and left_up == 0 and [col, row] not in tips:\n temp = catch_polygon(row, col, bin_image)\n for i in temp:\n i[0], i[1] = i[1], i[0]\n tips += temp\n polygons.append(temp)\n\n # recovery the binary image array:\n bin_image = bin_image[::-1]\n\n if polygons_check(polygons, information) == True:\n # shutil.copy('Mask_bin/M1_test' + str(k) + '.txt', 'certed_bin/M1_test' + str(k) + '.txt')\n with open('certed_bin/' + str(count) + '.txt', 'w') as f:\n for i in bin_image:\n for j in i:\n f.write(str(j) + ' ')\n f.write('\\n')\n count += 1\n print('certed_bin/' + str(count) + '.txt, finished.')\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"Geiv/OPC","sub_path":"txt_check.py","file_name":"txt_check.py","file_ext":"py","file_size_in_byte":11335,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"34288285711","text":"import os\r\nimport configparser\r\nfrom pathlib import Path\r\nimport time\r\nfrom datetime import datetime\r\nimport requests\r\nimport json\r\nimport traceback\r\nimport re\r\nimport ctypes\r\n\r\nfrom .wechatautomator import WechatAutomator\r\n\r\ndef url_in_states(url, states):\r\n for state in states:\r\n if url == state['url']:\r\n return True\r\n return False\r\n\r\ndef find_id_in_states(url, states):\r\n for state in states:\r\n if url == state['url']:\r\n return state[\"id\"]\r\n return None\r\n\r\n\r\ndef send_heart_beat(wechat_id, type, java_server):\r\n try:\r\n page = {\"wechatId\": wechat_id,\r\n \"activityType\": type\r\n }\r\n print(\"heart-beat: {}\".format(page))\r\n s = requests.post(java_server + \"/heartbeat\", json=page)\r\n res = json.loads(s.text)\r\n if not res[\"success\"]:\r\n print(\"heartbeat失败:{}\".format(res))\r\n except:\r\n print(\"heartbeat失败\")\r\n\r\ndef send_debug_info(wechat_id, debug_info, java_server):\r\n try:\r\n print(\"heart-beat: {}\".format(debug_info))\r\n s = requests.post(java_server + \"/debuginfo?wechatId=\"+wechat_id, json=debug_info)\r\n res = json.loads(s.text)\r\n if not res[\"success\"]:\r\n print(\"send-debug-info失败:{}\".format(res))\r\n except:\r\n print(\"send-debug-info失败\")\r\n\r\ndef add_to_detail(s, detail):\r\n detail.append(time.strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+str(s))\r\n\r\ndef main(parser):\r\n debug_info = {}\r\n wechat_path = parser.get('basic', 'wechat_path', fallback=None)\r\n if wechat_path is not None and wechat_path.strip() == '':\r\n wechat_path = None\r\n java_server = parser.get('basic', 'java_server', fallback=None)\r\n if java_server is not None and java_server.strip() == '':\r\n java_server = None\r\n if java_server is None:\r\n java_server = \"http://localhost:4567\"\r\n\r\n win_width = int(parser.get('basic', 'win_width', fallback=1000))\r\n win_height = int(parser.get('basic', 'win_height', fallback=600))\r\n crawl_interval = int(parser.get('basic', 'crawl_interval', fallback=1))\r\n counter_interval_seconds = int(parser.get('basic', 'counter_interval_seconds', fallback=48*3600))\r\n\r\n crawl_pages = int(parser.get('basic', 'crawl_pages', fallback=3))\r\n max_crawl_pages = int(parser.get('basic', 'max_crawl_pages', fallback=6))\r\n\r\n debug_count = int(parser.get('basic', \"debug_count\", fallback=\"10\"))\r\n latest_date = parser.get('basic', 'latest_date', fallback=None)\r\n first_pages = int(parser.get('basic', 'first_pages', fallback=\"1\"))\r\n find_window_timeout = int(parser.get('basic', 'find_window_timeout', fallback='30'))\r\n\r\n first_max_crawl_time = int(parser.get('basic', 'first_max_crawl_time', fallback=\"86400\"))\r\n switch_gongzhonghao = parser.get('basic', 'switch_gongzhonghao', fallback=None)\r\n\r\n crawl_read_count = parser.get('basic', 'crawl_read_count', fallback=\"False\")\r\n crawl_read_count = crawl_read_count.lower() == 'true'\r\n\r\n debug_ocr = parser.get('basic', 'debug_ocr', fallback=\"False\")\r\n debug_ocr = debug_ocr.lower() == 'true'\r\n\r\n\r\n print(\"max_crawl_pages: {}, crawl_pages: {}\".format(max_crawl_pages, crawl_pages))\r\n print(\"width: {}, height: {}\".format(win_width, win_height))\r\n print(\"java_server: {}\".format(java_server))\r\n print(\"wechat_path: {}\".format(wechat_path))\r\n print(\"crawl_interval: {} hours\".format(crawl_interval))\r\n print(\"counter_interval: {} seconds\".format(counter_interval_seconds))\r\n print(\"latest_date: {}\".format(latest_date))\r\n print(\"first_max_crawl_time: {}\".format(first_max_crawl_time))\r\n print(\"switch_gongzhonghao: {}\".format(switch_gongzhonghao))\r\n print(\"first_pages: {}\".format(first_pages))\r\n print(\"crawl_read_count: {}\".format(crawl_read_count))\r\n print(\"debug_ocr: {}\".format(debug_ocr))\r\n print(\"find_window_timeout: {}\".format(find_window_timeout))\r\n\r\n cwd = os.getcwd()\r\n print(\"current directory {}\".format(cwd))\r\n debug_info[\"max_crawl_pages\"] = max_crawl_pages\r\n debug_info[\"crawl_pages\"] = crawl_pages\r\n debug_info[\"win_width\"] = win_width\r\n debug_info[\"win_height\"] = win_height\r\n debug_info[\"java_server\"] = java_server\r\n debug_info[\"wechat_path\"] = wechat_path\r\n debug_info[\"crawl_interval\"] = crawl_interval\r\n debug_info[\"cwd\"] = cwd\r\n debug_info[\"debug_count\"] = debug_count\r\n debug_info[\"latest_date\"] = latest_date\r\n debug_info[\"first_max_crawl_time\"] = first_max_crawl_time\r\n debug_info[\"switch_gongzhonghao\"] = switch_gongzhonghao\r\n debug_info[\"first_pages\"] = first_pages\r\n debug_info[\"crawl_read_count\"] = crawl_read_count\r\n debug_info[\"counter_interval_seconds\"] = counter_interval_seconds\r\n debug_info[\"debug_ocr\"] = debug_ocr\r\n debug_info[\"find_window_timeout\"] = find_window_timeout\r\n\r\n automator = WechatAutomator()\r\n try:\r\n automator.init_window(counter_interval=counter_interval_seconds,\r\n find_window_timeout=find_window_timeout)\r\n except:\r\n print(\"微信未启动或未登陆,请启动微信并扫码登陆后再运行本程序。\")\r\n return\r\n wechat_id = automator.get_wechat_id()\r\n print(\"wechat id {}\".format(wechat_id))\r\n debug_info[\"wechat_id\"] = wechat_id\r\n debug_info[\"details\"] = []\r\n\r\n account_is_fuwuhao = {}\r\n while True:\r\n automator.move_window()\r\n start_time = int(time.time())\r\n my_file = Path(\"gongzhonghao.txt\")\r\n if not my_file.is_file():\r\n s = \"gongzhonghao.txt文件不存在,请创建后再运行\"\r\n debug_info[\"error_msg\"] = s\r\n print(s)\r\n time.sleep(60)\r\n continue\r\n try:\r\n with open('gongzhonghao.txt', encoding=\"UTF-8\") as f:\r\n lines = f.read().splitlines()\r\n except:\r\n print(\"打开gongzhonghao.txt失败,请确保它是utf编码的文本文件\")\r\n return\r\n print(\"抓取的公众号列表:\")\r\n for line in lines:\r\n print(\"\\t{}\".format(line))\r\n\r\n if len(lines) == 1 and switch_gongzhonghao is None:\r\n s = \"只有一个公众号要抓取,需要配置 switch_gongzhonghao\"\r\n ctypes.windll.user32.MessageBoxW(0, \"请在config.ini配置switch_gongzhonghao或者增加公众号数量\", \"没有switch_gongzhonghao\", 0)\r\n return\r\n if len(lines) == 1:\r\n try:\r\n automator.locate_user(switch_gongzhonghao)\r\n except:\r\n pass\r\n for line in lines:\r\n line = line.strip()\r\n if line == '':\r\n continue\r\n is_fuwuhao = account_is_fuwuhao.get(line)\r\n if is_fuwuhao is None:\r\n is_fuwuhao = automator.is_fuwuhao(line)\r\n if is_fuwuhao is None:\r\n print(\"账号{}不能确定是否服务号,请联系开发修复bug\".format(line))\r\n continue\r\n account_is_fuwuhao[line] = is_fuwuhao\r\n s = \"{} 是否服务号 {}\".format(line, is_fuwuhao)\r\n print(s)\r\n details = debug_info[\"details\"]\r\n detail = []\r\n detail.append(s)\r\n details.append(detail)\r\n # 只保留\r\n if len(details) > debug_count:\r\n details = details[-debug_count:]\r\n debug_info[\"details\"] = details\r\n\r\n try:\r\n s = \"开始抓取: {}\".format(line)\r\n print(s)\r\n add_to_detail(s, detail)\r\n send_heart_beat(wechat_id, \"start-\"+line, java_server)\r\n\r\n articles = []\r\n # get states from java server\r\n page = {\"pubName\": line}\r\n s = requests.post(java_server + \"/getstate\", json=page)\r\n rsp = json.loads(s.text)\r\n if not rsp[\"success\"]:\r\n s = \"获取states失败:{}\".format(rsp[\"msg\"])\r\n add_to_detail(s, detail)\r\n print(s)\r\n continue\r\n states = rsp[\"data\"]\r\n i = 0\r\n for state in states:\r\n i += 1\r\n if i < 50:\r\n add_to_detail(\"state: {}\".format(state), detail)\r\n print(state)\r\n\r\n curr_crawl_pages = crawl_pages\r\n curr_max_pages = max_crawl_pages\r\n # 可以通过它是否为None来判断是否首次抓取\r\n curr_latest_date = None\r\n\r\n is_first_crawl = False\r\n if len(states) == 0:\r\n is_first_crawl = True\r\n s = \"首次抓取 {}\".format(line)\r\n add_to_detail(s, detail)\r\n print(s)\r\n curr_time = int(time.time())\r\n if curr_time - start_time >= first_max_crawl_time:\r\n s = \"时间太长,跳过首次抓取 {}-{}\".format(start_time, curr_time)\r\n print(s)\r\n add_to_detail(s, detail)\r\n try:\r\n automator.locate_user(line)\r\n except:\r\n pass\r\n continue\r\n\r\n curr_max_pages = max(max_crawl_pages, first_pages)\r\n curr_latest_date = latest_date\r\n\r\n s = \"curr_max: {}, curr_pages: {}, curr_latest_date: {}\".format(curr_max_pages,\r\n curr_crawl_pages, curr_latest_date)\r\n print(s)\r\n add_to_detail(s, detail)\r\n if is_fuwuhao:\r\n result = automator.crawl_fuwuhao(line, articles,\r\n states=states, max_pages=curr_max_pages,\r\n detail=detail, latest_date=curr_latest_date,\r\n crawl_counter=crawl_read_count,\r\n debug_ocr=debug_ocr)\r\n else:\r\n result = automator.crawl_dingyuehao(line, articles,\r\n states=states, max_pages=curr_max_pages,\r\n detail=detail, latest_date=curr_latest_date,\r\n crawl_counter=crawl_read_count,\r\n debug_ocr=debug_ocr)\r\n s = \"抓取 {} 成功: {}\".format(line, result)\r\n add_to_detail(s, detail)\r\n print(s)\r\n if result:\r\n for article in articles:\r\n url, _, title, html, pub_date, counts = article\r\n if counts:\r\n read_count, star_count, share_count = counts\r\n else:\r\n read_count, star_count, share_count = -1, -1, -1\r\n if not url_in_states(url, states):\r\n page = {\"url\": url,\r\n \"crawlWechatId\": wechat_id,\r\n \"title\": title,\r\n \"pubName\": line,\r\n \"readCount\": read_count,\r\n \"starCount\": star_count,\r\n \"shareCount\": share_count,\r\n \"html\": html}\r\n s = \"addurl: {}\".format(page[\"url\"])\r\n add_to_detail(s, detail)\r\n print(s)\r\n s = requests.post(java_server + \"/addurl\", json=page)\r\n res = json.loads(s.text)\r\n if not res[\"success\"]:\r\n s = \"addurl失败:{}\".format(res)\r\n add_to_detail(s, detail)\r\n print(s)\r\n continue\r\n\r\n if crawl_read_count and not is_first_crawl:\r\n results = []\r\n if is_fuwuhao:\r\n res = automator.crawl_fuwuhao_read_count(line, results, states, detail,\r\n max_pages=curr_max_pages, debug_ocr=debug_ocr)\r\n else:\r\n res = automator.crawl_dingyuehao_read_count(line, results, states, detail,\r\n max_pages=curr_max_pages, debug_ocr=debug_ocr)\r\n s = \"抓取 readcount {} 成功: {}\".format(line, res)\r\n add_to_detail(s, detail)\r\n print(s)\r\n if res:\r\n for item in results:\r\n url, _, title, html, pub_date, counts = item\r\n read_count, star_count, share_count = counts\r\n if read_count <= 0:\r\n s = \"{} {} no url\".format(url, title)\r\n print(s)\r\n add_to_detail(s, detail)\r\n continue\r\n\r\n page_id = find_id_in_states(url, states)\r\n if page_id is None:\r\n s = \"url {} not found in states\".format(url)\r\n print(s)\r\n add_to_detail(s, detail)\r\n continue\r\n\r\n params = {'wechatId': wechat_id,\r\n 'id': page_id,\r\n \"state\": True,\r\n \"read\": read_count,\r\n \"star\": star_count,\r\n \"share\": share_count\r\n }\r\n s = \"counter params: {}\".format(params)\r\n add_to_detail(s, detail)\r\n print(s)\r\n r = requests.post(java_server + '/updatecounter', json=params)\r\n\r\n res = json.loads(r.text)\r\n if not res[\"success\"]:\r\n s = \"更新失败: {}\".format(res)\r\n add_to_detail(s, detail)\r\n print(s)\r\n\r\n except:\r\n traceback.print_exc()\r\n finally:\r\n send_debug_info(wechat_id, debug_info, java_server)\r\n\r\n while True:\r\n current_time = int(time.time())\r\n time_sleep = 3600 * crawl_interval + start_time - current_time\r\n if time_sleep > 0:\r\n time.sleep(min(5*60, time_sleep))\r\n test_id = automator.get_wechat_id()\r\n succ = test_id == wechat_id\r\n send_heart_beat(wechat_id, \"heart-beat-{}\".format(succ), java_server)\r\n else:\r\n break\r\n\r\n\r\n\r\n\r\n","repo_name":"fancyerii/wechat-gongzhonghao-crawler","sub_path":"client/crawler/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":15281,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"61"} +{"seq_id":"19053701990","text":"#coding:utf-8\n# Write a Python program that matches a word at the beginning of a string. \nimport re\ndef test(str):\n regex=re.compile(r\"lol\")\n if regex.match(str):\n return \"found\"\n return \"not found\"\n\nprint(test(\"lolsdfb\"))\nprint(test(\"aabAlolbbbc\"))\n\n","repo_name":"DonaFidele/PythonExercices","sub_path":"re/exo_10.py","file_name":"exo_10.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20859286481","text":"import unittest\nimport pygame\nimport sys\nimport os\n\n# getting the name of the directory where the this file is present\ncurrent = os.path.dirname(os.path.realpath(__file__))\n# Getting the parent directory name where the current directory is present\nparent = os.path.dirname(current)\n# adding the parent directory to the sys.path\nsys.path.append(os.path.dirname(parent))\n\nSTATE_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"assets\", \"state2.txt\")\n\nfrom classes.board import Board\nfrom classes.pawn import Pawn\nfrom classes.match import Match\n\nclass TestMovementIntegration(unittest.TestCase):\n def setUp(self):\n self.tela = pygame.surface.Surface((800, 800))\n self.tam = 100\n self.board = Board(self.tela, self.tam, STATE_PATH)\n self.piece = Pawn(\"A1\", True)\n self.board.matrix[0][0] = self.piece\n self.board.white.append(self.piece)\n self.board.white_group.add(self.piece)\n self.piece.board = self.board\n self.board.match = Match(self.board)\n\n def test_piece_movement(self):\n self.assertEqual(self.board.get_piece((0,0)), self.piece)\n self.assertIsNone(self.board.get_piece((1,0)))\n self.piece.move((1,0))\n self.assertEqual(self.board.get_piece((1,0)), self.piece)\n self.assertIsNone(self.board.get_piece((0,0)))\n\n def test_piece_capture(self):\n piece2 = Pawn(\"B2\", False)\n piece2.board = self.board\n self.board.matrix[2][1] = piece2\n self.board.black.append(self.board.matrix[2][1])\n self.board.black_group.add(self.board.matrix[2][1])\n self.assertEqual(self.board.get_piece((0,0)), self.piece)\n self.assertIsNone(self.board.get_piece((1,0)))\n self.assertEqual(self.board.get_piece((2,1)), piece2)\n self.piece.move((1,0))\n self.assertEqual(self.board.get_piece((1,0)), self.piece)\n self.assertIsNone(self.board.get_piece((0,0)))\n self.assertEqual(self.board.get_piece((2,1)), piece2)\n self.piece.move((2,1))\n self.assertIsNone(self.board.get_piece((1,0)))\n self.assertEqual(self.board.get_piece((2,1)), self.piece)\n self.assertIsNone(self.board.get_piece((0,0)))\n\n","repo_name":"higorrluiz/Xadrez","sub_path":"test/integration/test_movement.py","file_name":"test_movement.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41516003608","text":"# ///-----------------------------------------------------------------\n# /// Description: \n# /// Author: \n# /// Date: \n# /// Revision History: ---\n# ///-----------------------------------------------------------------\n\nfrom matplotlib import pyplot\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nfrom math import pi\nfrom IPython import embed\nfrom draw_path import draw_path\nfrom traj import traj\n\ndef plot_initial(opt,agents):\n\n\tplt.figure(1)\n\t\n\t#plt.setp(axes,fontsize=20)\n\tax2=plt.subplot(1,3,2)\n\tplt.title('Optimal trajectory')\n\t#ax=Axes3D(plt.gcf())\n\tplt.imshow(opt.mapWithObstacles)\n\t#plt.axis('tight')\n\t#plt.axis('equal')\n\tplt.show()\n\tr=10\n\tang=np.arange(0,2*pi+0.01,0.01)\n\tx1=65\n\ty1=40\n\tx2=35\n\ty2=75\n\tplt.axis((opt.xmin,opt.xmax,opt.ymin,opt.ymax))\n\t\n\t\n\tplt.figure(1)\n\tax1=plt.subplot(1,3,1)\n\n\tplt.imshow(np.flipud(opt.mapWithObstacles))\n\t#plt.axis('tight')\n\t#plt.axis('equal')\n\tplt.title('Candidate trajectories')\n\n\tax3=plt.subplot(1,3,3)\n\t\n\n\t#check opt.htraj\n\tplt.title('Time-Average Statistics')\n\tplt.axis((opt.xmin,opt.xmax,opt.ymin,opt.ymax))\n\t\n\tagents.trajFigOPTIMAL=[0,0,0]\n\tagents.trajFig=[0,0,0]\n\n\tfor iagent in range(0,opt.nagents):\n\t\topt.iagent=iagent\n\t\topt.figg=plt.figure(1)\n\t\tax2.scatter(agents.xi[iagent,0],agents.xi[iagent,1],0,30)\n\t\tagents.trajFigOPTIMAL[iagent]=draw_path(traj(0*opt.z,opt,agents),opt.colors[iagent],2,5)\n\n\t\t#subplot\n\n\t\tagents.trajFig[iagent]=draw_path(traj(0*opt.z,opt,agents),opt.colors[iagent],2,5)\n\t\tax1.scatter(agents.xi[iagent,0],agents.xi[iagent,1],0,30)\n\n\tFig=draw_path(0*np.random.rand(3,3),'b',3,5)\n\t\n\treturn opt,agents,Fig\n\n\t\n\t\t","repo_name":"stevenshan/search-coverage","sub_path":"stoec/plot_initial.py","file_name":"plot_initial.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"71100504196","text":"from typing import TypedDict\n\nfrom booklog.bookdata import authors\nfrom booklog.utils import export_tools\nfrom booklog.utils.logging import logger\n\nAuthor = TypedDict(\n \"Author\",\n {\n \"name\": str,\n \"sortName\": str,\n \"slug\": str,\n },\n)\n\n\ndef export() -> None:\n logger.log(\"==== Begin exporting {}...\", \"authors\")\n\n all_authors = [\n Author(\n name=author.name,\n sortName=author.sort_name,\n slug=author.slug,\n )\n for author in authors.deserialize_all()\n ]\n\n export_tools.serialize_dicts(all_authors, \"authors\")\n","repo_name":"fshowalter/booklog","sub_path":"booklog/bookdata/exports/authors.py","file_name":"authors.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34548330641","text":"import io\nfrom datetime import datetime\n\nfrom django.contrib.auth import get_user_model\nfrom django.http import FileResponse\nfrom django.shortcuts import get_object_or_404\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom foodgram.models import Ingredients, Recipe, Tags\nfrom reportlab.lib.pagesizes import A4\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nfrom reportlab.pdfgen import canvas\nfrom rest_framework import permissions, status, views, viewsets\nfrom rest_framework.response import Response\n\nfrom .filters import IngredientFilter, RecipeFilter\nfrom .helpers import get_unique_recipe_ingredients\nfrom .mixins import CreateDestroyModelViewSet, ListModelViewSet\nfrom .pagination import PageNumberLimitPagination\nfrom .permissions import IsAdminOrAuthorOrReadOnly\nfrom .serializers import (IngredientsSerializer, RecipeCompactSerializer,\n RecipeCreateSerializer, RecipeSerializer,\n TagsSerializer, UserSubscribedSerializer)\n\nUser = get_user_model()\n\n\nclass SubscriptionsViewSet(ListModelViewSet):\n \"\"\"Представление для подписок.\"\"\"\n\n serializer_class = UserSubscribedSerializer\n permission_classes = (permissions.IsAuthenticated,)\n pagination_class = PageNumberLimitPagination\n\n def get_queryset(self):\n return self.request.user.follower.all()\n\n\nclass SwitchOnOffViewSet(CreateDestroyModelViewSet):\n \"\"\"Базовый класс\"\"\"\n router_pk = \"id\"\n error_text_create = \"Невозможно добавить запись\"\n error_text_destroy = \"Невозможно удалить запись\"\n permission_classes = (permissions.IsAuthenticated,)\n\n def get_object(self):\n queryset = self.get_queryset()\n obj = get_object_or_404(queryset, pk=self.kwargs.get(self.router_pk))\n self.check_object_permissions(self.request, obj)\n return obj\n\n def is_on(self) -> bool:\n raise NotImplementedError('Определите метод is_on!')\n\n @staticmethod\n def error(text: str) -> Response:\n return Response(\n {\n \"errors\": text,\n },\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n def create(self, request, *args, **kwargs):\n if self.is_on():\n return self.error(self.error_text_create)\n obj = self.get_object()\n serializer = self.get_serializer(instance=obj)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.data)\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED,\n headers=headers,\n )\n\n def delete(self, request, *args, **kwargs):\n return self.destroy(request, *args, **kwargs)\n\n def destroy(self, request, *args, **kwargs):\n if not self.is_on():\n return self.error(self.error_text_destroy)\n return super().destroy(request, *args, **kwargs)\n\n\nclass RecipeFavoriteViewSet(SwitchOnOffViewSet):\n \"\"\"Представление для добавления в избранное\"\"\"\n\n model_class = Recipe\n queryset = Recipe.objects.all()\n serializer_class = RecipeCompactSerializer\n router_pk = \"recipe_id\"\n error_text_create = \"Рецепт уже добавлен в список избранное\"\n error_text_destroy = \"Рецепта нет в списке избранного\"\n\n def is_on(self) -> bool:\n recipe = self.get_object()\n return self.request.user.favorite.filter(id=recipe.id).exists()\n\n def perform_create(self, serializer: RecipeCompactSerializer):\n recipe = self.get_object()\n self.request.user.favorite.add(recipe)\n self.request.user.save()\n\n def perform_destroy(self, instance: Recipe):\n self.request.user.favorite.remove(instance)\n self.request.user.save()\n\n\nclass RecipeCartViewSet(SwitchOnOffViewSet):\n \"\"\"Представление для добавления в список покупок\"\"\"\n\n model_class = Recipe\n queryset = Recipe.objects.all()\n serializer_class = RecipeCompactSerializer\n router_pk = \"recipe_id\"\n error_text_create = \"Рецепт уже добавлен список покупок\"\n error_text_destroy = \"Рецепта нет в списке покупок\"\n\n def is_on(self) -> bool:\n recipe = self.get_object()\n\n return self.request.user.cart.filter(id=recipe.id).exists()\n\n def perform_create(self, serializer: RecipeCompactSerializer):\n recipe = self.get_object()\n\n self.request.user.cart.add(recipe)\n self.request.user.save()\n\n def perform_destroy(self, instance: Recipe):\n self.request.user.cart.remove(instance)\n self.request.user.save()\n\n\nclass SubscribeViewSet(SwitchOnOffViewSet):\n \"\"\"Представление для подписки\"\"\"\n\n model_class = User\n queryset = User.objects.all()\n serializer_class = UserSubscribedSerializer\n router_pk = 'user_id'\n error_text_create = \"Подписка уже существует\"\n error_text_destroy = \"Подписки не существует\"\n\n def is_on(self) -> bool:\n user = self.get_object()\n return self.request.user.follower.filter(id=user.id).exists()\n\n def create(self, request, *args, **kwargs):\n user = self.get_object()\n if self.request.user.id == user.id:\n return self.error(\"Невозможно подписаться на самого себя\")\n return super().create(request, *args, **kwargs)\n\n def perform_create(self, serializer: UserSubscribedSerializer):\n user = self.get_object()\n\n self.request.user.follower.add(user)\n self.request.user.save()\n\n def perform_destroy(self, instance: User):\n self.request.user.follower.remove(instance)\n self.request.user.save()\n\n\nclass IngredientsViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"Представление для работы с ингридиентами.\"\"\"\n\n queryset = Ingredients.objects.all()\n serializer_class = IngredientsSerializer\n permission_classes = (IsAdminOrAuthorOrReadOnly,)\n filter_backends = (DjangoFilterBackend,)\n filterset_class = IngredientFilter\n\n\nclass TagsViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"Представление для работы с тэгами.\"\"\"\n\n queryset = Tags.objects.all()\n serializer_class = TagsSerializer\n permission_classes = (IsAdminOrAuthorOrReadOnly,)\n\n\nclass RecipeViewSet(viewsets.ModelViewSet):\n \"\"\"Представление для работы с рецептами.\"\"\"\n\n queryset = Recipe.objects.all()\n serializer_class = RecipeSerializer\n pagination_class = PageNumberLimitPagination\n permission_classes = (IsAdminOrAuthorOrReadOnly,)\n filter_backends = (DjangoFilterBackend,)\n filterset_class = RecipeFilter\n lookup_field = \"id\"\n search_fields = (\"name\",)\n\n def get_serializer_class(self):\n if self.action in (\n \"create\",\n \"partial_update\",\n \"update\",\n ):\n return RecipeCreateSerializer\n return RecipeSerializer\n\n\nclass SaveCartView(views.APIView):\n \"\"\"Базовое представление для сохранения списка покупок.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n font_path = \"./static/fonts/JetBrainsMono-Medium.ttf\"\n filename = \"file.pdf\"\n\n def get_text_lines(self):\n pass\n\n def get(self, request):\n buffer = io.BytesIO()\n pdfmetrics.registerFont(TTFont(\"Font\", self.font_path))\n page = canvas.Canvas(buffer, pagesize=A4)\n page.setFont(\"Font\", 14)\n text = page.beginText()\n text.setTextOrigin(80, 750)\n text.textLine(f'СПИСОК ПОКУПОК {datetime.date(datetime.now())}.')\n for text_line in self.get_text_lines():\n text.textLine(text=text_line)\n page.drawText(text)\n page.showPage()\n page.save()\n buffer.seek(0)\n return FileResponse(\n buffer,\n as_attachment=True,\n filename=self.filename,\n )\n\n\nclass RecipeCartDownloadView(SaveCartView):\n \"\"\"Представление для сохранения списка покупок.\"\"\"\n\n filename = f'Список покупок {datetime.date(datetime.now())}.pdf\\n'\n\n def get_text_lines(self):\n recipes = self.request.user.cart.all()\n unique_recipe_ingredients = get_unique_recipe_ingredients(recipes)\n text_lines = []\n\n for ingredient in unique_recipe_ingredients.values():\n text_lines.append(\n f\"{ingredient['name']} ({ingredient['unit']}) \"\n f\"— {ingredient['amount']}\"\n )\n\n return text_lines\n","repo_name":"netzen86/foodgram-project-react","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16417865208","text":"from collections import deque\n\nboard_size = int(input())\nnum_apple = int(input())\n\nboard = [([0] * board_size) for _ in range(board_size)]\nfor _ in range(num_apple):\n x, y = map(int, input().split())\n board[x - 1][y - 1] = 1\n\nnum_moves = int(input())\nmoves = []\nfor _ in range(num_moves):\n time, dir = input().split()\n time = int(time)\n moves.append((time, dir))\n\nsnake = deque()\nsnake.append((0, 0))\nboard[0][0] = 2\nRIGHT = [0, 1]\nLEFT = [0, -1]\nUP = [-1, 0]\nDOWN = [1, 0]\nDIR = [RIGHT, DOWN, LEFT, UP]\ncurrent_dir = 0\n\ndef change_dir(rotate_dir):\n global current_dir\n if rotate_dir == \"L\":\n current_dir = (4 + current_dir - 1) % 4\n elif rotate_dir == \"D\":\n current_dir = (current_dir + 1) % 4\n\ntime = 0\nmove = 0\nover = False\nwhile True:\n # 방향 확인\n if len(moves) > move and time == moves[move][0]:\n change_dir(moves[move][1])\n move += 1\n\n # 먼저 머리를 늘린다.\n x, y = snake.popleft()\n nx, ny = x + DIR[current_dir][0], y + DIR[current_dir][1]\n\n if nx < 0 or ny < 0 or nx >= board_size or ny >= board_size:\n # Game over\n print(time + 1)\n over = True\n break\n \n if (nx, ny) in snake:\n print(time + 1)\n over = True\n break\n\n # 사과가 있다면\n if board[nx][ny] == 1:\n snake.appendleft((x, y))\n snake.appendleft((nx, ny))\n board[nx][ny] = 2\n board[x][y] = 2\n else:\n snake.appendleft((x, y))\n snake.appendleft((nx, ny))\n board[nx][ny] = 2\n board[x][y] = 2\n px, py = snake.pop()\n board[px][py] = 0\n time += 1\n\nif not over:\n print(time + 1)","repo_name":"Sleeeeeepy/jungle-ps","sub_path":"week1/3190.py","file_name":"3190.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25618870328","text":"import sys\nsys.stdin = open('ex01input.txt', 'r')\n\nT = int(input())\n\nfor tc in range(1, T+1): \n Arr = []\n # input 받기\n for _ in range(5):\n lst = list(map(int, input().split()))\n Arr.append(lst)\n \n total = 0\n for i in range(5):\n for j in range(5):\n diff_up = (0 if i==0 else abs(Arr[i][j] - Arr[i-1][j]))\n diff_down = (0 if i==4 else abs(Arr[i][j] - Arr[i+1][j]))\n diff_left = (0 if j==0 else abs(Arr[i][j] - Arr[i][j-1]))\n diff_right = (0 if j==4 else abs(Arr[i][j] - Arr[i][j+1]))\n diff_total = diff_up + diff_down + diff_left + diff_right\n total += diff_total\n print(\"#%d %d\" % (tc, total))","repo_name":"minnczi/Algorithm","sub_path":"Concepts/delta.py","file_name":"delta.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30316602809","text":"\"\"\"\n https://colab.research.google.com/drive/1-mc8j6UnIVglOsmE8ue68vGkf_BBxTK0\n\"\"\"\n\n# !pip install medisearch_client\n\nimport json\nimport uuid\n\nfrom medisearch_client import MediSearchClient\n\n#@title Set constants\n\napi_key = \"gx4XXBhE7Zrga682gmEm\"\nconversation_id = str(uuid.uuid4())\nclient = MediSearchClient(api_key=api_key)\n\n#@title Make a search query\nquery = \"Does depression increase the chances of heart attack?\"\nresponses = client.send_user_message(conversation=[query],\n conversation_id=conversation_id,\n language=\"English\",\n should_stream_response=True)\n\nfor response in responses:\n if response[\"event\"] == \"llm_response\":\n text_response = response[\"text\"]\n print(text_response)\n\n#@title Ask a followup question\nfollow_up_query = \"By what percentage does depression increase risk of heart disease?\"\nresponses = client.send_user_message(conversation=[query,\n text_response,\n follow_up_query],\n conversation_id=conversation_id,\n language=\"English\",\n should_stream_response=False)\n\nresponses\n\n","repo_name":"kwishna/LLMCodes","sub_path":"medisearch/medisearch_api_demo.py","file_name":"medisearch_api_demo.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18037072977","text":"from openroad import Design, Tech\nimport helpers\nimport odb\n\ntech = Tech()\ntech.readLef(\"sky130hd/sky130hd.tlef\")\ntech.readLef(\"sky130hd/sky130_fd_sc_hd_merged.lef\")\ndesign = Design(tech)\ndesign.readDef(\"gcd_prefill.def\")\n\ndfl = design.getFinale()\ndfl.densityFill(\"fill.json\", design.getBlock().getCoreArea())\n\ndef_file = helpers.make_result_file(\"gcd_fill.def\")\ndesign.writeDef(def_file)\nhelpers.diff_files(def_file, \"gcd_fill.defok\")\n","repo_name":"The-OpenROAD-Project/OpenROAD","sub_path":"src/fin/test/gcd_fill.py","file_name":"gcd_fill.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":1027,"dataset":"github-code","pt":"61"} +{"seq_id":"24335086595","text":"from project.room import Room\r\n\r\n\r\nclass Hotel:\r\n\r\n def __init__(self, name):\r\n self.name = name\r\n self.rooms = []\r\n\r\n @property\r\n def guests(self):\r\n return sum([room.guests for room in self.rooms])\r\n\r\n @classmethod\r\n def from_stars(cls, stars_count):\r\n return cls(f\"{stars_count} stars Hotel\")\r\n\r\n def add_room(self, room):\r\n self.rooms.append(room)\r\n\r\n def take_room(self, room_number, people):\r\n room = [room for room in self.rooms if room_number == room.number][0]\r\n room.take_room(people)\r\n\r\n def free_room(self, room_number):\r\n room = [room for room in self.rooms if room_number == room.number][0]\r\n room.free_room()\r\n\r\n def status(self):\r\n free_rooms = [str(room.number) for room in self.rooms if not room.is_taken]\r\n taken_rooms = [str(room.number) for room in self.rooms if room.is_taken]\r\n result = f\"Hotel {self.name} has {self.guests} total guests\\n\" \\\r\n f\"Free rooms: {', '.join(free_rooms)}\\n\" \\\r\n f\"Taken rooms: {', '.join(taken_rooms)}\"\r\n return result\r\n\r\n\r\nhotel = Hotel.from_stars(5)\r\n\r\nfirst_room = Room(1, 3)\r\nsecond_room = Room(2, 2)\r\nthird_room = Room(3, 1)\r\n\r\nhotel.add_room(first_room)\r\nhotel.add_room(second_room)\r\nhotel.add_room(third_room)\r\n\r\nhotel.take_room(1, 4)\r\nhotel.take_room(1, 2)\r\nhotel.take_room(3, 1)\r\nhotel.take_room(3, 1)\r\n\r\nprint(hotel.status())\r\n","repo_name":"TomaMishev/Python_4_OOP","sub_path":"5. Static and Class methods/Lab/P04_hotel_rooms/project/hotel.py","file_name":"hotel.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38120752856","text":"import uuid\n\nimport datetime\nimport jsonpickle\nfrom django.db.models import F\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom cart_app.cartmanager import DBCartManger\nfrom goods_app.models import Inventory\nfrom pay_app.models import Order, OrderItem\nfrom net_app.models import Address\nfrom utils.alipay_p3 import AliPay\n\n\n\ndef toOrderView(request):\n\n cartitems = request.GET.get('cartitems','')\n # 获取支付总金额\n totalPrice = request.GET.get('totalPrice','')\n\n #判断当前用户是否登录\n if not request.session.get('user',''):\n # return HttpResponseRedirect('/user/login/?reflag=order&cartitems='+cartitems)\n return render(request,'net_app/login.html',{'reflag':'order','cartitems':cartitems,'totalPrice':totalPrice})\n\n\n #反序列化cartitems\n #[{'goodsid':1,'sizeid':'2','colorid':'3'},{}]\n cartitemList = jsonpickle.loads(cartitems)\n\n\n #获取默认收货地址\n user = jsonpickle.loads(request.session.get('user',''))\n addrObj = user.address.get(isdefault=True)\n\n #获取订单内容\n #[CartItem(),CartItem()]\n cartItemObjList = [DBCartManger(user).get_cartitems(**item) for item in cartitemList if item]\n\n # toPrice = 0\n # for ci in cartItemObjList:\n # toPrice += ci.getTotalPrice()\n\n\n return render(request,'pay_app/order.html',{'addrObj':addrObj,'cartItemObjList':cartItemObjList,'totalPrice':totalPrice})\n\nalipayObj = AliPay(appid='2016102500756428', app_notify_url='http://127.0.0.1:8000/pay/checkPay/', app_private_key_path='pay_app/keys/my_private_key.txt',\n alipay_public_key_path='pay_app/keys/alipay_public_key.txt', return_url='http://127.0.0.1:8000/pay/modifycart/', debug=True)\n\n\n\ndef toPayView(request):\n addrid = request.GET.get('address', -1)\n payway = request.GET.get('payway', 'alipay')\n cartitems = request.GET.get('cartitems', '')\n\n params = {\n 'out_trade_num': uuid.uuid4().hex,\n 'order_num': datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"),\n 'address': Address.objects.get(id=addrid),\n 'user': jsonpickle.loads(request.session.get('user', '')),\n 'payway': payway\n }\n\n orderObj = Order.objects.create(**params)\n\n # '['{'goodsid:1','sizeid:'2',...'}']'\n if cartitems:\n # [{dict1},{dict2}]\n cartitems = jsonpickle.loads(cartitems)\n\n orderItemList = [OrderItem.objects.create(order=orderObj, **ci) for ci in cartitems if ci]\n\n urlparam = alipayObj.direct_pay(subject='京东商城', out_trade_no=orderObj.out_trade_num,\n total_amount=request.GET.get('totalPrice', 0))\n\n url = alipayObj.gateway + '?' + urlparam\n\n return HttpResponseRedirect(url)\n\ndef modify_cart(request):\n params = request.GET.dict()\n user = jsonpickle.loads(request.session.get('user', ''))\n\n # 修改订单状态\n orderObj = Order.objects.get(out_trade_num=params.get('out_trade_no', ''))\n orderObj.trade_no = params.get('trade_no', '')\n orderObj.status = '待发货'\n orderObj.save()\n\n # 修改库存\n orderItemList = orderObj.orderitem_set.all()\n [Inventory.objects.filter(goods_id=oi.goodsid, color_id=oi.colorid, size_id=oi.sizeid).update(\n count=F('count') - oi.count) for oi in orderItemList if oi]\n\n # 更新购物车表中数据\n [user.cartitem_set.filter(goodsid=oi.goodsid, colorid=oi.colorid, sizeid=oi.sizeid, count=oi.count).delete() for oi\n in orderItemList if oi]\n\n # 更新订单表中数据\n\n [user.orderitem_set.create(goodsid=oi.goodsid, colorid=oi.colorid, sizeid=oi.sizeid,count=oi.count,\n aname=Address.objects.get(id=Order.objects.get(id=oi.order_id).address_id).aname,\n aphone=Address.objects.get(id=Order.objects.get(id=oi.order_id).address_id).aphone,\n addr=Address.objects.get(id=Order.objects.get(id=oi.order_id).address_id).addr\n ) for oi in orderItemList if oi]\n\n return HttpResponseRedirect('/order/queryAll/')\n\ndef checkPayView(request):\n # 获取所有的请求参数\n params = request.GET.dict()\n # 获取sign的值\n sign = params.pop('sign')\n\n\n # 校验是否支付成功\n if alipayObj.verify(params, sign):\n\n return HttpResponse('支付成功!')\n\n return HttpResponse('支付失败!')","repo_name":"flag-xt/my_site","sub_path":"pay_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4779808791","text":"# CHALLENGE PROBLEM: \n#\n# Use your check_sudoku function as the basis for solve_sudoku(): a\n# function that takes a partially-completed Sudoku grid and replaces\n# each 0 cell with an integer in the range 1..9 in such a way that the\n# final grid is valid.\n#\n# There are many ways to cleverly solve a partially-completed Sudoku\n# puzzle, but a brute-force recursive solution with backtracking is a\n# perfectly good option. The solver should return None for broken\n# input, False for inputs that have no valid solutions, and a valid\n# 9x9 Sudoku grid containing no 0 elements otherwise. In general, a\n# partially-completed Sudoku grid does not have a unique solution. You\n# should just return some member of the set of solutions.\n#\n# A solve_sudoku() in this style can be implemented in about 16 lines\n# without making any particular effort to write concise code.\n\nfrom copy import deepcopy\nimport random, time\n\ngrids = {\n 'Number Missing': [ # -> None\n [5, 3, 4, 6, 7, 8, 9, 1, 2],\n [6, 7, 2, 1, 9, 5, 3, 4, 8],\n [1, 9, 8, 3, 4, 2, 5, 6, 7],\n [8, 5, 9, 7, 6, 1, 4, 2, 3],\n [4, 2, 6, 8, 5, 3, 7, 9], # <---\n [7, 1, 3, 9, 2, 4, 8, 5, 6],\n [9, 6, 1, 5, 3, 7, 2, 8, 4],\n [2, 8, 7, 4, 1, 9, 6, 3, 5],\n [3, 4, 5, 2, 8, 6, 1, 7, 9]\n ],\n\n 'Valid': [ # -> True\n [5, 3, 4, 6, 7, 8, 9, 1, 2],\n [6, 7, 2, 1, 9, 5, 3, 4, 8],\n [1, 9, 8, 3, 4, 2, 5, 6, 7],\n [8, 5, 9, 7, 6, 1, 4, 2, 3],\n [4, 2, 6, 8, 5, 3, 7, 9, 1],\n [7, 1, 3, 9, 2, 4, 8, 5, 6],\n [9, 6, 1, 5, 3, 7, 2, 8, 4],\n [2, 8, 7, 4, 1, 9, 6, 3, 5],\n [3, 4, 5, 2, 8, 6, 1, 7, 9]\n ],\n\n 'Invalid': [ # -> False\n [5, 3, 4, 6, 7, 8, 9, 1, 2],\n [6, 7, 2, 1, 9, 5, 3, 4, 8],\n [1, 9, 8, 3, 8, 2, 5, 6, 7],\n [8, 5, 9, 7, 6, 1, 4, 2, 3],\n [4, 2, 6, 8, 5, 3, 7, 9, 1],\n [7, 1, 3, 9, 2, 4, 8, 5, 6],\n [9, 6, 1, 5, 3, 7, 2, 8, 4],\n [2, 8, 7, 4, 1, 9, 6, 3, 5],\n [3, 4, 5, 2, 8, 6, 1, 7, 9]\n ],\n\n 'Easy': [ # -> True\n [2, 9, 0, 0, 0, 0, 0, 7, 0],\n [3, 0, 6, 0, 0, 8, 4, 0, 0],\n [8, 0, 0, 0, 4, 0, 0, 0, 2],\n [0, 2, 0, 0, 3, 1, 0, 0, 7],\n [0, 0, 0, 0, 8, 0, 0, 0, 0],\n [1, 0, 0, 9, 5, 0, 0, 6, 0],\n [7, 0, 0, 0, 9, 0, 0, 0, 1],\n [0, 0, 1, 2, 0, 0, 3, 0, 6],\n [0, 3, 0, 0, 0, 0, 0, 5, 9]\n ],\n\n 'Hard': [ # -> True\n [1, 0, 0, 0, 0, 7, 0, 9, 0],\n [0, 3, 0, 0, 2, 0, 0, 0, 8],\n [0, 0, 9, 6, 0, 0, 5, 0, 0],\n [0, 0, 5, 3, 0, 0, 9, 0, 0],\n [0, 1, 0, 0, 8, 0, 0, 0, 2],\n [6, 0, 0, 0, 0, 4, 0, 0, 0],\n [3, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 4, 0, 0, 0, 0, 0, 0, 7],\n [0, 0, 7, 0, 0, 0, 3, 0, 0]\n ]\n}\n\n\ndef check_sudoku(grid):\n \"\"\"Determine whether a given sudoku grid is valid and correct.\n\n To be valid, a grid must be a 9x9 list of lists made of ints in the range 0..9.\n\n To be correct, a grid must respect the following rules:\n * each number in the range 1..9 occurs only once in each row;\n * each number in the range 1..9 occurs only once in each column;\n * each number the range 1..9 occurs only once in each of the nine 3x3 sub-grids, or \"boxes\", that make up the board.\n\n :param grid: the grid to check\n :return: None if the grid is invalid, False if it's incorrect and True otherwise\n \"\"\"\n # Check validity\n if not isinstance(grid, list) or len(grid) != 9: # Checking if the grids is an instance of list and that the length of the gris is 9\n return None # Otherwise return None (First return case)\n\n for row in grid: # Checking each row on the grid\n if not isinstance(row, list) or len(row) != 9 or \\\n any(number not in range(10) for number in row): # If each row is not an instance of a list or if the length of the row not 9 or if there is any number higher than 10 in the row\n return None # Then it return None (First return case)\n\n # To keep track of the numbers we are checking, we are going to use sets\n # If a number is already found in a given set then it will return False\n # As the grid would be incorrect (2 same numbers on the same row/subgrid/column)\n column_numbers = [set() for i in range(9)] # Making a list of 9 sets (9 columns)\n subgrid_numbers = [ # Making a subgrid display to keep track later the numbers in each subgrid\n [set(), set(), set()],\n [set(), set(), set()],\n [set(), set(), set()]\n ]\n\n for row_index, row in enumerate(grid): # Looping through each grid and getting the lists of row\n row_numbers = set() \n\n for column_index, number in enumerate(row): # Looping throught the column by getting the row values => We loop horizontally\n if number in row_numbers or \\\n number in column_numbers[column_index] or \\\n number in subgrid_numbers[row_index // 3][column_index // 3]: # Here we check if the number is already in a colum, a row or in a subgrid. To get the good subgrid, we divide by 3 because there a 3 colums in a subgrid, so dividing by 3 gives us the right subgrid index\n return False # Return False if any of those conditions are true, which means the grid is incorrect but not invalid.\n\n if number != 0:\n row_numbers.add(number) # Here we keep track of the row values of the number we just tested\n column_numbers[column_index].add(number) # Here we keep track of the column values of the number we just tested\n subgrid_numbers[row_index // 3][column_index // 3].add(number) # Here we keep track of the subgrid values of the number we just tested\n return True # Returning true meaning the grid is correct\n\n\ndef solve_sudoku(original_grid):\n \"\"\"Solve an incomplete sudoku grid.\n\n In an incomplete grid, empty cells are represented by zeroes.\n\n Also note that the original grid stays untouched: a new one is created when solved.\n\n :param original_grid: the grid to solve\n :return: None if the grid is invalid, False if it's incorrect or a resolved grid otherwise\n \"\"\"\n # Simple, naive and unefficient backtracking algorithm\n # (based on https://en.wikipedia.org/wiki/Sudoku_solving_algorithms)\n\n # Ensure that the input grid is valid and correct\n check = check_sudoku(original_grid)\n if not check:\n return check\n\n # Solve it\n grid = deepcopy(original_grid) # We get a copy of the grid to solve\n\n empty_cells = [] # Stack to track last visited empty cells\n cell_found = False # Whether the algorithm is searching for an empty cell or not\n\n row_index = 0 \n while row_index < 9: # Looping throught the rows\n\n column_index = 0\n while column_index < 9: # Looping throught the columns\n\n if cell_found or grid[row_index][column_index] == 0: # Checking into the grid if we have already found a cell or if the cell in the grid is equals to 0\n cell_found = True # In which case you have found a cell\n\n if grid[row_index][column_index] < 9: # Trying every number for this cell\n grid[row_index][column_index] += 1 # By adding one to the actual cell number\n\n if check_sudoku(grid): # Now we recheck the new grid to see if it's still valid\n empty_cells.append((row_index, column_index)) # If so, we keep track of that number we just tested and add it to the stack\n cell_found = False # We then do it multiple times\n column_index += 1 # For each column\n\n else:\n grid[row_index][column_index] = 0 # If we can't find a solution, we go back from the beginning\n row_index, column_index = empty_cells.pop() # And we go to the previous solution to change it by iterating from the previous solution\n\n else:\n column_index += 1 # Checking an other column by adding one to the column index\n\n row_index += 1 # Checking an other row by adding one to the row index\n return grid # We reached the end of grid\n\n\ndef print_grid(grid):\n \"\"\"Print a valid sudoku grid.\n\n :param grid: the grid to display (must be valid)\n \"\"\"\n for i, row in enumerate(grid):\n print(' {} {} {} | {} {} {} | {} {} {} '.format(*row)) # Unpack the row to fit the format slots\n if i == 2 or i == 5:\n print('-------+-------+-------')\n\n\n#This function generate randomly a valid completed sudoku\ndef generate_sudoku_grid():\n grid = [[]]*9 # Generating an empty grid\n for i in range(9): # Looping throught the grid\n grid[i] = [0]*9 # Filling the grid with zeroes\n grid[0] = [i for i in range(1, 10)] # Filling the first row with numbers from 1 - 9\n random.shuffle(grid[0]) # Shuffling the numbers in the first line\n\n def add_next_item(grid, row, col): # Making a nested function to call it recursively later\n valids = [i for i in range(1, 10)] # Get the numbers that can be placed at grid[row][col]\n subgrid_coords = (row - (row % 3), col - (col % 3)) # Getting the coords of the subgrids\n for i in range(9): # Looping through the grid\n lst = [\n grid[row][i],\n grid[i][col], \n grid[ subgrid_coords[0] + i//3 ][ subgrid_coords[1] + (i%3) ]\n ]\n for itm in lst: # Looping through the item in the list\n if itm in valids: # Checking it the item is one element of the valids elements\n valids.remove(itm) # If so, removing it from the valids elements as it's already placed\n\n if row == 8 and col == 8: # If we are filling the last box of the grid\n if len(valids) == 0: # And the valid list is empty\n return False # Then the row generated is wrong (it lacks one number)\n else:\n grid[row][col] = valids[0] # Else, there is only an element left in the valid list\n return True # The row is correct\n next_case = (row, col+1) if col<8 else (row+1, 0) # Getting the coordinates of the next subgrid to fill\n\n random.shuffle(valids) # Mixing the numbers list, to avoid choosing always the smallest numbers first\n for itm in valids: # Put the number in the grid, and try to solve this new grid by calling add_next_item()\n grid[row][col] = itm\n if add_next_item(grid, next_case[0], next_case[1]): # If the function return True, the grid is valid, return True\n return True\n grid[row][col] = 0 # If we get here, the grid is not solvable, so return False\n return False\n\n add_next_item(grid, 1, 0) # First call of the nested function\n return grid # Return a completed valid grid\n\n\ndef random_tester(n, print_grid_bool):\n amount_of_tests = n\n tests_failed = 0\n some_test_failed = False\n\n for i in range(amount_of_tests):\n failed = False # Value to know if a test as failed\n\n grid = generate_sudoku_grid() # Here we generate a complete valid grid \n \n amount_to_remove = random.randrange(5,20) if iinput grid: \"+str(grid))\n some_test_failed = True\n elif print_grid_bool: # Else it prints the solved grid and the unsolved one if we said to at the begining\n print_grid(grid) # Unsolved grid\n print() # New Line\n print_grid(solved) # Solved grid\n print() # New Line\n\n if some_test_failed:\n print(\"{} test(s) failed !\".format(tests_failed))\n print(\"Success rate of {}%\".format(((amount_of_tests - tests_failed)/amount_of_tests)*100))\n else:\n print(\"No test failed !\")\n print(\"Success rate of {}%\".format(((amount_of_tests - tests_failed)/amount_of_tests)*100))\n\ntest_start = int(round(time.time() * 1000)) # Getting the current time in millis\nprint(\"Starting testing...\")\nrandom_tester(20, False)\nprint(\"Testings done in {} ms !\".format(int(round(time.time() * 1000)) - test_start)) # Printing how much time it has passed since the begining of the test","repo_name":"hugovrl29/Software-Testing-MOOC-2021-2022","sub_path":"Autres/Presentation Donato/Présentatation - Sudoku Solver.py","file_name":"Présentatation - Sudoku Solver.py","file_ext":"py","file_size_in_byte":18181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12339480755","text":"# pylint: disable=missing-docstring, line-too-long, invalid-name\n\nfrom __future__ import unicode_literals\nfrom uuid import uuid4\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='HelcimTransaction',\n fields=[\n ('id', models.UUIDField(default=uuid4, editable=False, primary_key=True, verbose_name='ID')),\n ('raw_request', models.CharField(blank=True, help_text='The raw request used for this transaction', max_length=1024, null=True)),\n ('raw_response', models.CharField(blank=True, help_text='The raw response returned for this transaction', max_length=1024, null=True)),\n ('transaction_success', models.BooleanField(help_text='Whether the transaction was successful or not')),\n ('response_message', models.CharField(blank=True, help_text='The response message with the API call', max_length=256, null=True)),\n ('notice', models.CharField(blank=True, help_text='Any error or warning messages from Helcim', max_length=128, null=True)),\n ('date_response', models.DateTimeField(help_text='The date and time of the API response')),\n ('date_created', models.DateTimeField(auto_now_add=True, help_text='Date and time this transaction was recorded in database')),\n ('transaction_type', models.CharField(choices=[('s', 'purchase (sale)'), ('p', 'pre-authorization'), ('c', 'capture'), ('r', 'refund')], help_text='The type of transaction', max_length=1)),\n ('transaction_id', models.PositiveIntegerField(blank=True, help_text='The Helcim Commerce transaction ID', null=True)),\n ('amount', models.DecimalField(blank=True, decimal_places=2, help_text='The transaction amount', max_digits=12, null=True)),\n ('currency', models.CharField(blank=True, help_text='The transaction currency', max_length=8, null=True)),\n ('cc_name', models.CharField(blank=True, help_text='The cardholder name', max_length=256, null=True)),\n ('cc_number', models.CharField(blank=True, help_text='The first four and last 4 digits of the credit card number', max_length=16, null=True)),\n ('cc_expiry', models.DateField(blank=True, help_text='The credit card expiry date', null=True)),\n ('cc_type', models.CharField(blank=True, help_text='The credit card type', max_length=32, null=True)),\n ('token', models.CharField(blank=True, help_text='The Helcim generated and stored credit card token', max_length=23, null=True)),\n ('token_f4l4', models.CharField(blank=True, help_text='The first and last 4 digits of the credit card number', max_length=8, null=True)),\n ('avs_response', models.CharField(blank=True, help_text='The address verification response', max_length=1, null=True)),\n ('cvv_response', models.CharField(blank=True, help_text='The CVV verification response', max_length=1, null=True)),\n ('approval_code', models.CharField(blank=True, help_text='The transaction approval code', max_length=16, null=True)),\n ('order_number', models.CharField(blank=True, help_text='The Helcim order number', max_length=16, null=True)),\n ('customer_code', models.CharField(blank=True, help_text='The Helcim customer code', max_length=16, null=True)),\n ],\n options={\n 'ordering': ('-date_response',),\n 'permissions': (('helcim_transactions', 'Can view and interact with Helcim transactions.'),),\n },\n ),\n migrations.CreateModel(\n name='HelcimToken',\n fields=[\n ('id', models.UUIDField(default=uuid4, editable=False, primary_key=True, verbose_name='ID')),\n ('token', models.CharField(help_text='The Helcim card token number', max_length=23)),\n ('token_f4l4', models.CharField(help_text='The first & last four digits of the credit card number', max_length=8)),\n ('cc_type', models.CharField(blank=True, help_text='The credit card type', max_length=32, null=True)),\n ('date_added', models.DateTimeField(auto_now_add=True, help_text='Date and time this token was added to database')),\n ('customer_code', models.CharField(blank=True, help_text='The Helcim customer code', max_length=16, null=True)),\n ('django_user', models.ForeignKey(blank=True, null=True, on_delete=models.deletion.CASCADE, related_name='tokens', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'permissions': (('helcim_tokens', 'Can view and interact with Helcim tokens.'),),\n 'unique_together': {('token', 'token_f4l4', 'customer_code', 'django_user')},\n },\n )\n ]\n","repo_name":"studybuffalo/django-helcim","sub_path":"helcim/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"32350390885","text":"\"\"\" Module losses3d.py (By: Charley Zhang, 2021.05)\nContains loss functions specifically for 3D imaging tasks.\n\nAssumes the following:\n - Prediction & target variables are tensors.\n - Prediction & target variables have the same shape (target = one-hot format)\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nREDUCTIONS = ['mean', 'sum', 'class', 'none']\n\n\nclass BYOL3d:\n def __init__(self):\n print(f'💠 Criterion: BYOL3d initiated.')\n \n def __call__(self, pred, targ):\n \"\"\"\n Args:\n pred: features outputted from projection head + prediction head\n NxCxHxWxD\n targ: features outputted by EMA model + EMA projection head\n NxCxHxWxD\n \"\"\"\n msg = f'Pred & Targ shape mismatch: {pred.shape} {targ.shape}.'\n assert pred.shape == targ.shape, msg\n \n pred = pred.view(pred.shape[0], -1)\n targ = targ.view(targ.shape[0], -1)\n pred_norm = F.normalize(pred, dim=-1, p=2)\n targ_norm = F.normalize(targ, dim=-1, p=2)\n loss = (2 - 2 * (pred_norm * targ_norm).sum(dim=-1)).mean()\n return {\n 'loss': loss\n }\n\n\nclass DiceLoss3d:\n \"\"\" Good for both 2D and 3D tensors. Assumes pred.shape == targ.shape. \n Whether it is soft-dice or dice depends on whether predictions are \n thresholded to be a binary mask (dice) or as probabilities (soft-dice).\n \"\"\" \n def __init__(self, smooth=1.):\n self.smooth = smooth\n \n def __call__(self, pred, targ):\n return dice_loss(pred, targ, s=self.smooth)\n\n\nclass CrossEntropyLoss3d:\n \"\"\" Different from torch CE in 2 ways: (1) assumes pred.shape == targ.shape, \n which means targ must be one-hot, (2) does not apply softmax to pred.\n \"\"\"\n def __init__(self, weights=None, reduction='mean'):\n self.weights = weights\n self.reduction = reduction\n \n def __call__(self, pred, targ):\n return cross_entropy_loss(pred, targ, weights=self.weights,\n reduction=self.reduction)\n\n\nclass DiceCrossEntropyLoss3d:\n def __init__(self, weights=None, alpha=0.5):\n self.weights = weights\n self.alpha = alpha\n\n name = type(self).__name__\n print(f'💠 {name} initiated with weights={self.weights}, \\n'\n f' alpha={self.alpha}.')\n \n def __call__(self, pred, targ):\n \"\"\"\n Args:\n pred: BxCxDxHxW logits\n targ: BxCxDxHxW one hot binary mask\n \"\"\"\n # Activation\n pred = pred.softmax(1)\n ce_loss = cross_entropy_loss(pred, targ, weights=self.weights,\n reduction='mean')\n dc_loss = dice_loss(pred, targ, ignore_background=False)\n # print('Mine', dc_loss, ce_loss)\n return dc_loss + ce_loss\n return self.alpha * dc_loss + (1 - self.alpha) * ce_loss\n \n\n \n\n### ======================================================================== ###\n### * ### * ### * ### * Functional * ### * ### * ### * ###\n### ======================================================================== ###\n\n\ndef dice_loss(pred, targ, s=1, ignore_background=False, reduction='mean'):\n \"\"\" Dice loss. Assumes targ is in one-hot format. \n Parameters\n pred - prediction image probabilities [any shape]\n targ - binary label image [any shape]\n s (float) - smoothing factor added to the numerator and denominator.\n \"\"\"\n assert pred.shape == targ.shape \n assert 0 <= pred.min() <= pred.max() <= 1, '\"pred\" must be probabilities'\n\n # Calculate soft dice\n if ignore_background:\n pred = pred[:, 1:] if pred.shape[1] > 1 else pred\n targ = targ[:, 1:] if targ.shape[1] > 1 else targ\n B, C = pred.shape[:2]\n\n pred_flat = pred.view(B, C, -1).float()\n targ_flat = targ.view(B, C, -1).float()\n intersec = (pred_flat * targ_flat).sum(-1) # Shape BxC\n dice = 1 - (2 * intersec + s) / (pred_flat.sum(-1) + targ_flat.sum(-1) + s)\n # Dice is BxC, reduce to mean?\n if reduction == 'sum':\n return dice.sum()\n else:\n return dice.mean()\n\n\ndef cross_entropy_loss(pred, targ, weights=None, reduction='mean'):\n \"\"\" CE loss w/probabilties. Assumes targ is in one-hot format. \n Parameters\n pred - prediction probabilities [BxCxHxWxD]\n targ - binary label image [BxCxHxWxD]\n s (float) - smoothing factor added to the numerator and denominator.\n \"\"\"\n if pred.shape != targ.shape:\n assert targ.shape[0] == pred.shape[0]\n assert targ.shape[1:] == pred.shape[2:]\n assert reduction in REDUCTIONS, f'Reduction {reduction} is not valid!'\n \n targ_ind = to_single_index(targ, keepdims=False) # BxHxWxD\n loss = F.nll_loss(torch.log(pred), targ_ind, weight=weights, \n reduction=reduction)\n return loss\n \n\n### ======================================================================== ###\n### * ### * ### * ### * Rudimentary Testing * ### * ### * ### * ###\n### ======================================================================== ###\n\n\ndef to_one_hot(target, C=None):\n \"\"\" \n Parameters\n tensor (torch.Tensor) - Input of shape Bx1xHxWxD\n C (int) - Number of classes\n Note: if C is none, max(unique_values) is used\n \"\"\"\n assert target.ndim == 5, 'Expected 5 dimensional input (Bx1xHxWxD)'\n assert target.shape[1] == 1, 'Target shape needs to be Bx1xHxWxD'\n if C is None:\n C = target.max() + 1\n one_hot_shape = (target.shape[0], C, *target.shape[2:])\n one_hot = torch.zeros(one_hot_shape, device=target.device)\n one_hot.scatter_(1, target, 1)\n return one_hot\n\ndef to_single_index(target, keepdims=True):\n \"\"\"\n Parameters\n target (tensor) - one-hot target BxCxHxWxD\n keepdims (bool) - if True, returns Bx1xHxWxD else BxHxWxD\n \"\"\"\n assert target.ndim == 5, 'Expected 5 dimensional input (BxCxHxWxD)'\n targ_ind = target.view(target.shape[0], target.shape[1], -1).argmax(1)\n targ_ind = targ_ind.view(target.shape[0], *target.shape[2:])\n if keepdims:\n targ_ind = targ_ind.unsqueeze(1)\n return targ_ind\n \nif __name__ == '__main__':\n \n ### Cross Entropy ###\n print(f\"Testing CE ..\", end='')\n C = 4\n pred = torch.randn(3, C, 1, 2, 3)\n targ = torch.randint(0, C, (3, 1, 1, 2, 3))\n assert torch.all(targ == to_single_index(to_one_hot(targ))) \n \n torch_ce = nn.CrossEntropyLoss()\n torch_loss = torch_ce(pred, targ.squeeze(1))\n \n targ_1h = to_one_hot(targ)\n loss = cross_entropy_loss(pred.softmax(1), targ_1h, reduction='mean')\n assert abs(torch_loss - loss) < 10**-6, \\\n f'Torch {torch_loss.item():.4f}, Ours {loss.item():.4f}.'\n print(f\"good ✔\")\n \n # Now weight weights\n print(f\"Testing CE w/class-weights..\", end='')\n pred = torch.randn(3, C, 1, 2, 3)\n targ = torch.randint(0, C, (3, 1, 1, 2, 3))\n weights = torch.randint(1, 3, (C,)).float()\n assert torch.all(targ == to_single_index(to_one_hot(targ))) \n \n for reduction in ('none', 'sum', 'mean'):\n print(f' reduction {reduction}..', end='')\n torch_ce = nn.CrossEntropyLoss(weight=weights, reduction=reduction)\n torch_loss = torch_ce(pred, targ.squeeze(1))\n\n targ_1h = to_one_hot(targ)\n # import IPython; IPython.embed(); \n loss = cross_entropy_loss(pred.softmax(1), targ_1h, reduction=reduction,\n weights=weights)\n \n assert torch.all((torch_loss - loss) < 10**-6), \\\n f'Torch {torch_loss:.4f}, Ours {loss:.4f}.'\n print(f\"good ✔\")\n \n ","repo_name":"charzharr/3D-medseg-pretraining","sub_path":"src/lib/assess/losses3d.py","file_name":"losses3d.py","file_ext":"py","file_size_in_byte":7722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40614971968","text":"# 输入 n(≤500) m(≤500) k 和一个 n 行 m 列的网格图,'#' 表示墙,'.' 表示平地。\n# 保证所有 '.' 可以互相到达(四方向连通)。保证 k 小于 '.' 的个数。\n# 你需要把恰好 k 个 '.' 修改成 'X',使得剩余的所有 '.' 仍然是可以互相到达的。\n# 输出修改后的网格图。\nimport os, sys\nfrom io import BytesIO, IOBase\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, 8192))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self):\n while self.newlines == 0:\n b = os.read(self._fd, max(os.fstat(self._fd).st_size, 8192))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n os.write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\nsys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\n\ninput = lambda: sys.stdin.readline().strip()\nints = lambda: list(map(int, input().split()))\nInt = lambda: int(input())\n\ndef queryInteractive(a, b, c):\n print('? {} {} {}'.format(a, b, c))\n sys.stdout.flush()\n return int(input())\n\ndef answerInteractive(x1, x2):\n print('! {} {}'.format(x1, x2))\n sys.stdout.flush()\n\ninf = float('inf')\nfrom types import GeneratorType\n\ndef bootstrap(f, stack=[]):\n def wrappedfunc(*args, **kwargs):\n if stack:\n return f(*args, **kwargs)\n else:\n to = f(*args, **kwargs)\n while True:\n if type(to) is GeneratorType:\n stack.append(to)\n to = next(to)\n else:\n stack.pop()\n if not stack:\n break\n to = stack[-1].send(to)\n return to\n return wrappedfunc\n\ndef solve():\n n,m,k = map(int,input().split())\n t = [input().replace('.','X') for _ in range(n)]\n k = n * m - k - sum( i.count('#') for i in t) # 所有的点数-墙数,就是平地数,-k个平地,就是需要剩余的连通\n t = [ list(i) for i in t]\n i, q = 0,[]\n # 找到一个点\n while k:\n if 'X' in t[i]:\n j = t[i].index('X')\n t[i][j],q = '.',[(i,j)]\n k -= 1\n break\n i += 1\n while k:\n x,y = q.pop()\n for i,j in ((x,y-1),(x,y+1),(x-1,y),(x+1,y)):\n if 0<=i1500,np.abs(zs)>3193.9))[0]\n\n if len(stop_idx) > 0:\n stop_idx = stop_idx[0]\n\n else:\n return []\n\n traj = np.column_stack([xs[:stop_idx],ys[:stop_idx],zs[:stop_idx]])\n traj = traj.tolist()\n \n return traj\n\n\n#def get_part_traj(px,py,pz,x0,y0,z0):\ndef get_part_traj(pt,eta,phi,x0,y0,z0):\n\n #ptot = np.sqrt( px * px + py * py + pz * pz )\n #theta = 2*np.atan(np.e^(-1*eta)) #np.arccos( pz / ptot )\n #phi = np.arctan2(py, px)\n\n ptot = pt * np.cosh(eta);\n pz = pt * np.sinh(eta);\n px = pt * np.cos(phi);\n py = pt * np.sin(phi);\n\n times = range(10000)\n\n xs = np.array([x0 + t*px/ptot for t in times])\n ys = np.array([y0 + t*py/ptot for t in times])\n zs = np.array([z0 + t*pz/ptot for t in times])\n \n rs = np.sqrt( xs**2+ys**2 )\n\n stop_idx = np.where(np.logical_or(rs >1500,np.abs(zs)>3193.9))[0]\n\n if len(stop_idx) > 0:\n stop_idx = stop_idx[0]\n\n else:\n return []\n\n traj = np.column_stack([xs[:stop_idx],ys[:stop_idx],zs[:stop_idx]])\n traj = traj.tolist()\n \n return traj\n\ndef get_topoclusters(cell_topo_idx, cell_eta, cell_phi, cell_e):\n \n if len(cell_topo_idx) == 0:\n return []\n n_topoclusters = max(cell_topo_idx)\n\n topoclusters = []\n for i in range(n_topoclusters):\n mask = cell_topo_idx == i+1\n \n tmp_e = cell_e[mask].sum()\n tmp_eta = (cell_eta * cell_e)[mask].sum() / tmp_e\n tmp_phi = (cell_phi * cell_e)[mask].sum() / tmp_e\n\n tmp_tc = {\n 'energy': tmp_e.item(),\n 'eta' : tmp_eta.item(),\n 'phi' : tmp_phi.item()\n }\n \n topoclusters.append(tmp_tc)\n \n return topoclusters\n\ndef get_cells(cell_x, cell_y, cell_z, cell_e):\n \n cells = []\n \n for i,x in enumerate(cell_x):\n size = np.log(cell_e[i].item())*5\n cells.append({\n 'type': 'Point',\n 'pos': [cell_x[i].item(),cell_y[i].item(),cell_z[i].item()], #,size,size,size],\n #'x': cell_x[i].item(),\n #'y': cell_y[i].item(),\n #'z': cell_z[i].item(),\n 'color': '#FFFF00', #yellow\n })\n \n return cells\n\ndef energy_transparency(cell_e):\n \n return (np.log(cell_e) - np.min(np.log(cell_e)))/np.ptp(np.log(cell_e))\n\ndef get_color(layer,scaleby,topoidx=-1):\n\n #lay_max = np.array([\n # [0, 255, 128],\n # [0, 255, 128],\n # [0, 255, 128],\n # [127, 0, 255],\n # [127, 0, 255],\n # [127, 0, 255]\n # ])\n \n lay_max = np.array([[110, 222, 138],\n [146, 230, 167],\n [183, 239, 197],\n [129, 137, 255],\n [150, 162, 255],\n [174, 184, 255]\n ])\n\n if(topoidx > 0):\n topoidx = topoidx + 1 # avoid first (yellowey) color in cmap\n while topoidx>12:\n topoidx = topoidx - 12\n rgb = [int(cmap_topos(topoidx)[i]*255) for i in range(3)]\n else:\n rgb = lay_max[layer]\n rgb = rgb*scaleby\n rgb = np.clip(rgb,0,255).astype(int).tolist()\n\n #return f\"rgb({rgb[0]}, {rgb[1]}, {rgb[2]})\"\n return rgb\n\ndef get_vertices(cell_x, cell_y, cell_z, cell_l, cell_e, cell_topoidx, vertices_df, dump_vertices=False, dump_centres=False):\n \n if len(cell_x) == 0:\n return []\n merge_supercells = True\n size = 0.2\n vertices = []\n hashlist = []\n distlist = []\n colors = [\"rgb(110, 222, 138)\",\"rgb(146, 230, 167)\",\"rgb(183, 239, 197)\",\"rgb(129, 137, 255)\",\"rgb(150, 162, 255)\",\"rgb(174, 184, 255)\"]\n layer_noise = [13, 34.,41.,75.,50.,25.]\n cell_noise = cell_l.copy()\n for layer, noise in enumerate(layer_noise):\n cell_noise[cell_l==layer] = noise\n\n SNR = cell_e / cell_noise\n SNR_01 = np.clip(SNR/4.6,0.25,1)\n #SNR_01 = np.clip(2*SNR/4.6,0.4,1)\n #SNR_01 = (SNR - np.min(SNR)) / np.ptp(SNR)\n #SNR_01 = np.clip(SNR_01,0.0,1.0)\n #SNR_01 = np.power(np.arctan(SNR_01*np.pi/2),1./2)\n #SNR_01 = np.clip(SNR_01,0.1,1.0)\n energy_01 = (np.log(cell_e) - np.min(np.log(cell_e)))/np.ptp(np.log(cell_e))\n\n event_cells = np.column_stack([cell_x,cell_y,cell_z])\n \n for idx, cell in enumerate(tqdm(event_cells)):\n layer_df = vertices_df[vertices_df['layer']==cell_l[idx]]\n if len(layer_df) == 0: continue\n mpos = np.column_stack([layer_df['vmx'].to_numpy(),layer_df['vmy'].to_numpy(),layer_df['vmz'].to_numpy()])\n pos = np.repeat(np.expand_dims(cell,axis=0),len(mpos),axis=0)\n dist = np.linalg.norm(pos - mpos,axis=1)\n\n if(len(dist)==0):\n print(layer_df)\n print('cell_l=',cell_l[idx])\n i_min = np.argmin(dist)\n #if dist[i_min]>5:\n # continue\n \n distlist.append(dist[i_min])\n winner = layer_df.iloc[i_min]['hash'].item()\n if winner in hashlist:\n print('WARNING: found duplicate! hash=',winner)\n continue\n hashlist.append(winner)\n rows = layer_df[layer_df['hash']==winner]\n rows = rows.sort_values('eta_idx')\n\n \n if merge_supercells and not dump_vertices and not dump_centres:\n\n flat_coords = []\n \n if len(rows)>1:\n vlists = [\n [0,2,4,6],\n [1,3,5,7],\n ]\n\n first_and_last = [0,len(rows)-1] if len(rows)>1 else [0]\n\n for subidx,subcell in enumerate(first_and_last):\n row = rows.iloc[subcell]\n for v in vlists[subidx] :\n flat_coords = flat_coords + [row['v{}x'.format(v)].item(),row['v{}y'.format(v)].item(),row['v{}z'.format(v)].item()]\n else:\n row = rows\n for v in range(8):\n flat_coords = flat_coords + [row['v{}x'.format(v)].item(),row['v{}y'.format(v)].item(),row['v{}z'.format(v)].item()] \n \n\n layer = int(row['layer'].item())\n energy = cell_e[idx].item()\n #if layer > 2:\n # layer = layer - 1\n vertices.append({\n 'type': 'IrregularCaloCells',\n 'layer': layer,\n 'energy': energy,\n 'vtx': flat_coords,\n 'cluster': cell_topoidx[idx].item(),\n 'color': get_color(layer, 1.0, cell_topoidx[idx].item()),\n 'opacity': SNR_01[idx] #energy_01[idx].item(),\n })\n else:\n\n for _, row in rows.iterrows():\n\n if dump_centres: #dump the geometric centre of each cell as a point to display as a hit\n v = 'm'\n vertices.append({\n 'type': 'Point',\n 'pos': [row['v{}x'.format(v)].item(),row['v{}y'.format(v)].item(),row['v{}z'.format(v)].item()],\n 'color': '#FFFF00', #yellow\n })\n elif dump_vertices: #dump each vertex as a separate point to display as a hit\n for v in range(8):\n vertices.append({\n 'type': 'Point',\n 'pos': [row['v{}x'.format(v)].item(),row['v{}y'.format(v)].item(),row['v{}z'.format(v)].item()],\n 'color': '#FFFF00', #yellow\n })\n else: #dump full coords per cell to display as a polyhedron\n flat_coords = []\n vlist = range(8)\n for v in vlist:\n flat_coords = flat_coords + [row['v{}x'.format(v)].item(),row['v{}y'.format(v)].item(),row['v{}z'.format(v)].item()]\n\n layer = int(row['layer'].item())\n energy = cell_e[idx].item()\n #if layer > 2:\n # layer = layer - 1\n vertices.append({\n 'type': 'IrregularCaloCells',\n 'layer': layer,\n 'energy': energy,\n 'vtx': flat_coords,\n 'color': colors[layer],\n 'opacity': 1.0 #energy_01[idx].item(),\n })\n\n return vertices\n\ndef get_scaled_jet_e(e):\n x_, y_ = 20_000, 300_000\n x, y = 25_000, 40_000\n \n scale = (y - x) / (y_ - x_)\n scaled_e = (e - x_) * scale + x\n \n return scaled_e\n\ndef cocoa_to_phoenix(ntuple_path, cell_path, output_path, nevents=-1, firstevent=0):\n \n tree = uproot.open(ntuple_path)['Out_Tree']\n\n if nevents<0:\n nevents = tree.num_entries\n lastevent = min(tree.num_entries, firstevent + nevents)\n\n # truth particles\n part_pdgid = tree[\"particle_pdgid\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n part_pt = tree[\"particle_pt\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n part_eta = tree[\"particle_eta\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n part_phi = tree[\"particle_phi\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n # part_px = tree[\"particle_px\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n # part_py = tree[\"particle_py\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n # part_pz = tree[\"particle_pz\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n part_prod_x = tree[\"particle_prod_x\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n part_prod_y = tree[\"particle_prod_y\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n part_prod_z = tree[\"particle_prod_z\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n\n # tracks\n track_d0 = tree[\"track_d0\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n track_z0 = tree[\"track_z0\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n track_theta = tree[\"track_theta\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n track_phi = tree[\"track_phi\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n track_qoverp = tree[\"track_qoverp\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n track_pdgid = tree[\"track_pdgid\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n\n # photon conversion tracks\n conv_el_q = tree[\"conv_el_q\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n conv_el_px = tree[\"conv_el_px\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n conv_el_py = tree[\"conv_el_py\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n conv_el_pz = tree[\"conv_el_pz\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n conv_el_prod_x = tree[\"conv_el_prod_x\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n conv_el_prod_y = tree[\"conv_el_prod_y\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n conv_el_prod_z = tree[\"conv_el_prod_z\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n\n # topoclusters\n cell_topo_idx = tree[\"cell_topo_idx\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n \n #cells\n cell_x = tree[\"cell_x\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n cell_y = tree[\"cell_y\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n cell_z = tree[\"cell_z\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n cell_l = tree[\"cell_layer\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n cell_eta = tree[\"cell_eta\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n cell_phi = tree[\"cell_phi\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n cell_e = tree[\"cell_e\"].array(library='np', entry_stop=lastevent,entry_start=firstevent) \n \n # jets\n true_jet_pt = tree[\"true_jet_pt\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n true_jet_eta = tree[\"true_jet_eta\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n true_jet_phi = tree[\"true_jet_phi\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n true_jet_m = tree[\"true_jet_m\"].array(library='np', entry_stop=lastevent,entry_start=firstevent)\n\n ### Retrieve map of cells\n cell_df = pkl.load(open(cell_path,'rb'))\n\n ### Make up for iron gap <-- already converted upstream\n cell_df.loc[ cell_df['layer']==4, 'layer'] = 3\n cell_df.loc[ cell_df['layer']==5, 'layer'] = 4\n cell_df.loc[ cell_df['layer']==6, 'layer'] = 5\n\n ### Calculate geometric means of each cell from its 8 vertices\n cell_df['vmx'] = np.mean(np.column_stack([cell_df['v{}x'.format(i)].to_numpy() for i in range(8)]),axis=1)\n cell_df['vmy'] = np.mean(np.column_stack([cell_df['v{}y'.format(i)].to_numpy() for i in range(8)]),axis=1)\n cell_df['vmz'] = np.mean(np.column_stack([cell_df['v{}z'.format(i)].to_numpy() for i in range(8)]),axis=1)\n\n event_data = {}\n \n for i in range(nevents):\n \n event_dict = dict()\n \n event_dict['event number']: i\n event_dict['run number'] : 0\n\n \n # \n # tracks and trajectories\n #------------------\n \n n_tracks = track_d0[i].shape[0]\n \n tracks = {}\n all_tracks = []\n el_tracks = []; chhad_tracks = []; mu_tracks = []\n\n for j in range(n_tracks):\n track_traj = get_track_traj(\n track_d0[i][j], track_z0[i][j], track_theta[i][j], track_phi[i][j], track_qoverp[i][j])\n\n tmp_track = {\n 'pos': track_traj,\n 'color': '0xff0000'\n }\n all_tracks.append(tmp_track)\n \n if track_pdgid[i][j] in [11, -11] :\n tmp_track = {\n 'pos': track_traj,\n 'color': '0xff8700'\n }\n el_tracks.append(tmp_track)\n\n elif track_pdgid[i][j] in [13, -13] :\n tmp_track = {\n 'pos': track_traj,\n 'color': '0xffd100'\n }\n mu_tracks.append(tmp_track)\n\n else:\n tmp_track = {\n 'pos': track_traj,\n 'color': '0xff5d00'\n }\n chhad_tracks.append(tmp_track)\n\n # Add displaced tracks of electrons from photon conversions\n n_conv_el = conv_el_prod_x[i].shape[0]\n \n conv_el_tracks = []\n\n for j in range(n_conv_el):\n\n #conv_el_d0, conv_el_z0 = get_d0_z0_from_xyz(conv_el_prod_x[i][j] ,conv_el_prod_y[i][j],conv_el_prod_z[i][j],conv_el_px[i][j],conv_el_py[i][j],conv_el_pz[i][j],1,3.8) #TODO charge\n conv_el_p = np.sqrt(conv_el_px[i][j]**2 + conv_el_py[i][j]**2 + conv_el_pz[i][j]**2)\n conv_el_theta = np.arccos(conv_el_pz[i][j]/conv_el_p)\n conv_el_phi = np.arctan2(conv_el_py[i][j],conv_el_px[i][j])\n conv_el_traj = get_track_traj(\n 0, 0, conv_el_theta, conv_el_phi, conv_el_q[i][j]/conv_el_p, [conv_el_prod_x[i][j],conv_el_prod_y[i][j],conv_el_prod_z[i][j]])\n\n tmp_track = {\n 'pos': conv_el_traj,\n 'color': '0xff0000'\n }\n conv_el_tracks.append(tmp_track)\n\n # Add neutral particle trajectories as \"tracks\" with no curvature and drawn dashed (TODO)\n ph_trajs = []; nuhad_trajs = [];\n\n for j in range(part_pdgid[i].shape[0]):\n if part_pdgid[i][j] == 22 or part_pdgid[i][j] == 111 :\n\n #part_traj = get_part_traj(part_px[i][j], part_py[i][j], part_pz[i][j], part_prod_x[i][j], part_prod_y[i][j], part_prod_z[i][j])\n part_traj = get_part_traj(part_pt[i][j], part_eta[i][j], part_phi[i][j], part_prod_x[i][j], part_prod_y[i][j], part_prod_z[i][j])\n\n tmp_traj = {\n 'pos': part_traj,\n 'color': '0xff0000'\n }\n ph_trajs.append(tmp_traj)\n\n tracks['all_tracks'] = all_tracks\n tracks['el_tracks'] = el_tracks\n tracks['chhad_tracks'] = chhad_tracks\n tracks['mu_tracks'] = mu_tracks\n tracks['conv_el_tracks']= conv_el_tracks\n tracks['ph_trajs'] = ph_trajs\n\n event_dict['Tracks'] = tracks\n \n \n # \n # jets\n #------------------ \n\n jets = {}\n\n n_true_jets = true_jet_pt[i].shape[0]\n true_jets = []\n\n for j in range(n_true_jets):\n jet_p = true_jet_pt[i][j] * np.cosh(true_jet_eta[i][j]) \n jet_e = np.sqrt(true_jet_m[i][j] **2 + jet_p **2)\n \n tmp_jet = {\n 'eta': true_jet_eta[i][j].item(),\n 'phi': true_jet_phi[i][j].item(),\n 'coneR': 0.4,\n 'energy': get_scaled_jet_e(jet_e.item()),\n# 'color': '0x72fcfc'\n }\n true_jets.append(tmp_jet)\n jets['truth jets'] = true_jets\n \n \n event_dict['Jets'] = jets\n \n \n \n # \n # calclusters\n #------------------\n \n calo_clusters = {}\n calo_clusters['topoclusters'] = get_topoclusters(cell_topo_idx[i], cell_eta[i], cell_phi[i], cell_e[i]) \n event_dict['CaloClusters'] = calo_clusters\n\n #\n # hits\n #------------------\n \n cells = {}\n cells['hits'] = get_cells(cell_x[i],cell_y[i],cell_z[i],cell_e[i])\n event_dict['Hits'] = cells\n \n #\n # cells\n #------------------\n \n cells = {}\n cells['centres'] = get_vertices(cell_x[i],cell_y[i],cell_z[i],cell_l[i],cell_e[i],cell_topo_idx[i],cell_df,dump_centres=True)\n event_dict['Hits'] = cells\n\n \n #\n # COCOACaloCells\n #------------------\n \n scc = {}\n scc['vertices'] = get_vertices(cell_x[i],cell_y[i],cell_z[i],cell_l[i],cell_e[i],cell_topo_idx[i],cell_df)\n event_dict['IrregularCaloCells'] = scc\n \n \n # event\n event_data[f'event_num_{i}'] = event_dict\n \n json_object = json.dumps(event_data, indent=2)\n with open(output_path, \"w\") as outfile:\n outfile.write(json_object)\n\n print(\"Results written to \",output_path)\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\",\"--input\", dest=\"input\", type=str, help=\"path to input ROOT file\", required=True)\n parser.add_argument(\"-o\",\"--output\", dest=\"output\", type=str, help=\"path to output json file\", default=\"events.json\")\n parser.add_argument(\"-c\",\"--cells\", dest=\"cells\", type=str, help=\"path to cell geometry lookup table (pkl)\", default=\"cells_cocoa_default.pkl\")\n parser.add_argument(\"-n\",\"--nevents\", dest=\"nevents\", type=int, help=\"number of events to parse\", default=1)\n args = parser.parse_args()\n\n cocoa_to_phoenix(args.input, args.cells, args.output, args.nevents)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"cocoa-hep/cocoa-hep","sub_path":"COCOA/phoenix/event/dump_phoenix_eventdata.py","file_name":"dump_phoenix_eventdata.py","file_ext":"py","file_size_in_byte":21795,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"23564533461","text":"tc = int(input())\ndef check(n):\n for i in range(1,len(str(n))):\n if int(n[i-1]) > int(n[i]):\n return False\n return True\n\nfor i in range(tc):\n lp = int(input())\n for j in range(lp,-1,-1):\n if check(str(j)):\n print(\"Case #{}: {}\".format(i+1,j))\n break\n ","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/5426.py","file_name":"5426.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71271585796","text":"\"\"\" views.py \"\"\"\nimport json\nfrom datetime import datetime, date, timedelta\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.views import View\nfrom django.views.generic.edit import UpdateView, CreateView, FormView\nfrom django.views.generic.detail import DetailView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import F, Sum, Avg\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse_lazy, reverse\nfrom userPortal.models import Child, UpdateEvent, DailyRequirementsReport\nfrom userPortal.forms import UpdateBlocksForm, ScreentimePrereqsForm\nfrom userPortal.forms import UpdateDollarsForm\nfrom userPortal.signals.signals import child_logged_out, child_logged_in\nfrom adminPortal.forms import StoreItemRequestForm\nfrom adminPortal.models import Store_item\n\ndef logout(request):\n \"\"\" Send logout signal to userPortal.signals.handlers \"\"\"\n child_logged_out.send(sender=request.user)\n return HttpResponseRedirect(reverse('userPortal:logout'))\n\ndef login(request):\n \"\"\" Send login signal to userPortal.signals.handlers \"\"\"\n child_logged_in.send(sender=request.user)\n return HttpResponseRedirect(reverse('userPortal:users home'))\n\nclass Index(LoginRequiredMixin, View):\n template_name = 'userPortal/index.html' \n\n def get(self, request):\n this_user = request.user\n this_child = this_user.child\n add_block_value = (this_child.blocks + 1) % 5\n \n return render(request, self.template_name,\n context={'add_block_value': add_block_value})\n\nclass UpdateBlocksView(View):\n \"\"\" Handle Child blocks update \"\"\"\n model = Child\n success_url = '/'\n\n def post(self, request, **kwargs):\n \"\"\" Handle blocks update \"\"\"\n this_child = Child.objects.get(pk=kwargs['pk'])\n blocks_amount = int(self.request.POST.get('blocks', ''))\n update_event = UpdateEvent(\n user=request.user, \n type=0, \n amount=blocks_amount, \n reason=self.request.POST.get('reason', 'because'))\n if blocks_amount:\n coin_amount = 0\n current_blocks_amount = this_child.blocks + blocks_amount\n if current_blocks_amount > 4:\n coin_amount = int(current_blocks_amount / 4)\n current_blocks_amount = current_blocks_amount % 5\n elif current_blocks_amount < 0:\n current_blocks_amount = 0\n this_child.blocks = current_blocks_amount\n if coin_amount > 0:\n this_child.coins = F('coins') + coin_amount\n this_child.save()\n update_event.save()\n return HttpResponseRedirect('/')\n\nclass UpdateDollarsView(UpdateView):\n \"\"\" Handle Child dollars update \"\"\"\n \n def post(self, request, **kwargs):\n \"\"\" Handle dollars update \"\"\"\n this_child = Child.objects.get(pk=kwargs['pk'])\n current_dollars_amount = float(this_child.dollars)\n dollars_amount = float(self.request.POST.get('dollars',''))\n result_amount = current_dollars_amount + dollars_amount\n \n # Save UpdateEvent\n update_event = UpdateEvent(\n user=request.user, \n type=1, \n amount=int(dollars_amount), \n reason=self.request.POST.get('reason', 'because'))\n if result_amount > 0:\n this_child.dollars = F('dollars') + dollars_amount\n this_child.save()\n update_event.save()\n return HttpResponseRedirect('/')\n\nclass UpdateMinutesView(UpdateView):\n \"\"\" Handle Child minutes_left update \"\"\"\n\n def post(self, request, **kwargs):\n \"\"\" Handle minutes_left update \"\"\"\n\n this_child = Child.objects.get(pk=kwargs['pk'])\n new_minutes = int(self.request.POST.get('minutes_left', '-1'))\n screentime_is_on = False\n if self.request.POST.get('screentime_is_on', '') == \"True\":\n screentime_is_on = True\n current_minutes = this_child.minutes_left\n \n if new_minutes > 0:\n \n if new_minutes > current_minutes:\n current_minutes = current_minutes - 1\n else:\n current_minutes = new_minutes\n\n # Create UpdateEvent every 10 minutes of screentime\n if current_minutes % 10 == 0:\n # Save UpdateEvent\n update_event = UpdateEvent(\n user=request.user, \n type=2, \n amount=-10, \n reason='Screentime timer running')\n update_event.save()\n \n this_child.minutes_left = current_minutes\n this_child.screentime_is_on = screentime_is_on;\n this_child.save()\n return HttpResponse(json.dumps(\n {\n 'minutes_left': current_minutes,\n 'screentime_is_on': screentime_is_on,\n }))\n\nclass BuyMinutesView(UpdateView):\n \"\"\" Handle Child minutes purchase \"\"\"\n \n MINUTES_RATE = 30\n\n def post(self, request, **kwargs):\n \"\"\" Handle minutes purchase request \"\"\"\n\n this_child = Child.objects.get(pk=kwargs['pk'])\n current_minutes = this_child.minutes_left\n current_dollars = this_child.dollars\n if self.request.POST.get('buy', '') == \"True\"\\\n and current_dollars >= 1.00:\n current_minutes += self.MINUTES_RATE\n current_dollars = current_dollars - 1\n\n # Save UpdateEvent\n update_event = UpdateEvent(\n user=request.user, \n type=6, \n amount=1.00, \n reason='Buy Screentime button')\n update_event.save()\n\n this_child.minutes_left = current_minutes\n this_child.dollars = current_dollars;\n this_child.save()\n return HttpResponse(json.dumps(\n {\n 'minutes_left': current_minutes,\n 'dollars': float(current_dollars)\n }))\n\nclass ChildDetailView(DetailView):\n \"\"\" Display Child Details \"\"\"\n \n model = Child\n\nclass DailyReportDetailView(DetailView):\n \"\"\" Daily Report Details \"\"\"\n \n model = DailyRequirementsReport\n \nclass ScreentimePrereqsView(CreateView):\n \"\"\" Display ScreentimePrereqsForm \"\"\"\n form_class = ScreentimePrereqsForm\n template_name = 'screentimePrereqs.html'\n \n def form_valid(self, form):\n new_report = form.save(commit=False)\n this_user = self.request.user\n this_child = this_user.child\n\n new_report.user = this_user\n new_report.save()\n this_child.most_recent_screentime_ready = new_report.timestamp\n this_child.is_ready_for_screens = True\n this_child.save()\n return HttpResponseRedirect('/')\n\nclass IXLReportView(View):\n \"\"\" \n Displays Child IXL statistics from their submitted\n DailyRequirementsReports\n \"\"\"\n \n template_name = 'userPortal/ixl_report.html'\n \n def get(self, request, *args, **kwargs):\n \"\"\" Display report \"\"\"\n\n this_child = Child.objects.get(pk=kwargs['pk'])\n this_user = this_child.user\n start_date = this_user.date_joined.date()\n one_year_ago = date.today() - timedelta(days=365)\n one_month_ago = date.today() - timedelta(days=30)\n one_week_ago = date.today() - timedelta(days=6)\n this_sunday = date.today() - timedelta(days=(date.today().weekday() + 1) % 8)\n print('this_sunday date: {}'.format(this_sunday))\n print('one_week_ago date: {}'.format(one_week_ago))\n reports_year = DailyRequirementsReport.objects.filter(user=this_user).\\\n filter(timestamp__range=(one_year_ago, date.today())).\\\n order_by('timestamp').only('ixl_math_completed', 'ixl_language_arts_completed', 'timestamp')\n reports_month = reports_year.\\\n filter(timestamp__range=(one_month_ago, date.today())).\\\n order_by('timestamp')\n reports_week = reports_month.\\\n filter(timestamp__range=(one_week_ago, date.today())).\\\n order_by('timestamp')\n reports_since_sunday = reports_week.\\\n filter(timestamp__range=(this_sunday, date.today())).\\\n order_by('timestamp')\n\n # Yearly ixl data\n if start_date < one_year_ago:\n start_date = one_year_ago\n reports_this_year = reports_year.count()\n date_of_first_yearly_report = reports_year.earliest('timestamp').timestamp\n yearly_data = reports_year.\\\n aggregate(num_math = Sum('ixl_math_completed'), \n num_la = Sum('ixl_language_arts_completed'))\n \n math_this_year = yearly_data['num_math']\n la_this_year = yearly_data['num_la']\n math_avg_this_year = math_this_year / ((date.today() - start_date).days + 1)\n la_avg_this_year = la_this_year / ((date.today() - start_date).days + 1)\n \n # Monthly ixl data\n if start_date < one_month_ago:\n start_date = one_month_ago\n reports_this_month = reports_month.count()\n date_of_first_monthly_report = reports_month.earliest('timestamp').timestamp\n monthly_data = reports_month.\\\n aggregate(num_math = Sum('ixl_math_completed'),\n num_la = Sum('ixl_language_arts_completed'))\n \n math_this_month = monthly_data['num_math']\n la_this_month = monthly_data['num_la']\n math_avg_this_month = math_this_month / ((date.today() - start_date).days + 1)\n la_avg_this_month = la_this_month / ((date.today() - start_date).days + 1)\n \n # Weekly ixl data\n if start_date < one_week_ago:\n start_date = one_week_ago\n reports_this_week = reports_week.count()\n date_of_first_weekly_report = reports_week.earliest('timestamp').timestamp\n weekly_data = reports_week.\\\n aggregate(num_math = Sum('ixl_math_completed'),\n num_la = Sum('ixl_language_arts_completed'))\n math_this_week = weekly_data['num_math']\n la_this_week = weekly_data['num_la']\n math_avg_this_week = math_this_week / ((date.today() - start_date).days + 1)\n la_avg_this_week = la_this_week / ((date.today() - start_date).days + 1)\n print('number of days this week: {}'.format(str((date.today() - start_date).days)))\n\n # Since Sunday ixl data\n if start_date < this_sunday:\n start_date = this_sunday\n print('start date: {}'.format(this_sunday))\n \n if start_date < this_sunday:\n start_date = this_sunday\n reports_this_since_sunday = reports_since_sunday.count()\n date_of_first_report_since_sunday = reports_since_sunday.earliest('timestamp').timestamp\n since_sunday_data = reports_since_sunday.\\\n aggregate(num_math = Sum('ixl_math_completed'),\n num_la = Sum('ixl_language_arts_completed'))\n \n math_since_sunday = since_sunday_data['num_math']\n la_since_sunday = since_sunday_data['num_la']\n math_avg_since_sunday = math_since_sunday / ((date.today() - start_date).days + 1)\n la_avg_since_sunday = la_since_sunday / ((date.today() - start_date).days + 1)\n context = {\n 'yearly_count': reports_this_year,\n 'math_this_year': math_this_year,\n 'la_this_year': la_this_year,\n 'math_avg_this_year': round(math_avg_this_year, 2),\n 'la_avg_this_year': round(la_avg_this_year, 2),\n 'monthly_count': reports_this_month,\n 'math_this_month': math_this_month,\n 'la_this_month': la_this_month,\n 'math_avg_this_month': round(math_avg_this_month, 2),\n 'la_avg_this_month': round(la_avg_this_month, 2),\n 'weekly_count': reports_this_week,\n 'math_this_week': math_this_week,\n 'la_this_week': la_this_week,\n 'math_avg_this_week': round(math_avg_this_week, 2),\n 'la_avg_this_week': round(la_avg_this_week, 2),\n 'since_sunday_count': reports_this_since_sunday,\n 'math_since_sunday': math_since_sunday,\n 'la_since_sunday': la_since_sunday,\n 'math_avg_since_sunday': round(math_avg_since_sunday, 2),\n 'la_avg_since_sunday': round(la_avg_since_sunday, 2),\n }\n return render(request, template_name=self.template_name, context=context)\n\nclass CoinStoreView(LoginRequiredMixin, View):\n template_name = 'userPortal/coin_store.html'\n \n def get(self, request, *args, **kwargs):\n store_items = Store_item.objects.all()\n context = {'store_items': store_items }\n \n return render(request, template_name=self.template_name, context=context)\n \nclass CoinItemRequestView(LoginRequiredMixin, CreateView):\n \"\"\" Coin Item Request Form \"\"\"\n template_name = 'storeItemRequestForm.html'\n form_class = StoreItemRequestForm\n success_url = '/'\n def form_valid(self, form):\n this_request = form.save(commit=False)\n this_request.child = self.request.user\n return super(CoinItemRequestView, self).form_valid(form)\n ","repo_name":"danielsbonnin/b_and_d","sub_path":"userPortal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36982525155","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom mosaicode.GUI.fieldtypes import *\nfrom mosaicode.model.blockmodel import BlockModel\n\nclass MIDIToFloat(BlockModel):\n\n # -------------------------------------------------------------------------\n def __init__(self):\n BlockModel.__init__(self)\n\n self.language = \"c\"\n self.extension = \"sound\"\n self.help = \"MIDI to float\"\n self.label = \"MIDI_to_float\"\n self.color = \"140:114:114:150\"\n self.ports = [{\"type\":\"mosaicode_lib_c_sound.extensions.ports.midi\",\n \"name\":\"input\",\n \"conn_type\":\"Input\",\n \"label\":\"MIDI message\"},\n {\"type\":\"mosaicode_lib_c_base.extensions.ports.float\",\n \"name\":\"type\",\n \"conn_type\":\"Output\",\n \"label\":\"Message type\"},\n {\"type\":\"mosaicode_lib_c_base.extensions.ports.float\",\n \"name\":\"channel\",\n \"conn_type\":\"Output\",\n \"label\":\"MIDI channel\"},\n {\"type\":\"mosaicode_lib_c_base.extensions.ports.float\",\n \"name\":\"note\",\n \"conn_type\":\"Output\",\n \"label\":\"MIDI note\"},\n {\"type\":\"mosaicode_lib_c_base.extensions.ports.float\",\n \"name\":\"velocity\",\n \"conn_type\":\"Output\",\n \"label\":\"MIDI velocity (Note)\"},\n {\"type\":\"mosaicode_lib_c_base.extensions.ports.float\",\n \"name\":\"param\",\n \"conn_type\":\"Output\",\n \"label\":\"Param (Control)\"},\n {\"type\":\"mosaicode_lib_c_base.extensions.ports.float\",\n \"name\":\"control_value\",\n \"conn_type\":\"Output\",\n \"label\":\"Value (Control)\"}]\n\n self.group = \"Conversion\"\n self.codes[\"declaration\"] = \\\n\"\"\"\nfloat_callback *$port[type]$;\nint $port[type]$_size = 0;\nfloat_callback *$port[channel]$;\nint $port[channel]$_size = 0;\nfloat_callback *$port[note]$;\nint $port[note]$_size = 0;\nfloat_callback *$port[velocity]$;\nint $port[velocity]$_size = 0;\nfloat_callback *$port[param]$;\nint $port[param]$_size = 0;\nfloat_callback *$port[control_value]$;\nint $port[control_value]$_size = 0;\n\nvoid $port[input]$(snd_seq_event_t *ev){\n for(int i=0 ; i < $port[type]$_size ; i++){\n // Call the stored functions\n (*($port[type]$[i]))(ev->type);\n }\n\n for(int i=0 ; i < $port[channel]$_size ; i++){\n // Call the stored functions\n (*($port[channel]$[i]))(ev->data.note.channel);\n }\n\n for(int i=0 ; i < $port[note]$_size ; i++){\n // Call the stored functions\n (*($port[note]$[i]))(ev->data.note.note);\n }\n\n for(int i=0 ; i < $port[velocity]$_size ; i++){\n // Call the stored functions\n (*($port[velocity]$[i]))(ev->data.note.velocity);\n }\n\n for(int i=0 ; i < $port[param]$_size ; i++){\n // Call the stored functions\n (*($port[param]$[i]))(ev->data.control.param);\n }\n\n for(int i=0 ; i < $port[control_value]$_size ; i++){\n // Call the stored functions\n (*($port[control_value]$[i]))(ev->data.control.value);\n }\n}\n\"\"\"\n","repo_name":"Alice-ArtsLab/mosaicode-c-sound","sub_path":"mosaicode_lib_c_sound/extensions/blocks/Conversion/miditofloat.py","file_name":"miditofloat.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32273306176","text":"import os\nimport unittest\n\nif __name__ == \"__main__\":\n import utils\n utils.import_depends()\n\nfrom brokertest import TestBrokerCommand\n\n\nclass TestAddSandbox(TestBrokerCommand):\n\n def testaddutsandbox(self):\n self.noouttest([\"add\", \"sandbox\", \"--sandbox\", \"utsandbox\",\n \"--comments\", \"Sandbox used for aqd unit tests\",\n \"--noget\", \"--start=prod\"])\n sandboxdir = os.path.join(self.sandboxdir, \"utsandbox\")\n self.failIf(os.path.exists(sandboxdir),\n \"Did not expect directory '%s' to exist\" % sandboxdir)\n\n def testverifyaddunittestsandbox(self):\n kingdir = self.config.get(\"broker\", \"kingdir\")\n (out, err) = self.gitcommand([\"show-ref\", \"--hash\", \"refs/heads/prod\"],\n cwd=kingdir)\n head = out.strip()\n\n command = \"show sandbox --sandbox utsandbox\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Sandbox: utsandbox\", command)\n self.matchoutput(out, \"Comments: Sandbox used for aqd unit tests\",\n command)\n self.matchoutput(out, \"Base Commit: %s\" % head, command)\n self.matchclean(out, \"Path\", command)\n\n def testverifyshowpath(self):\n user = self.config.get(\"broker\", \"user\")\n command = \"show sandbox --sandbox %s/utsandbox --pathonly\" % user\n out = self.commandtest(command.split(\" \"))\n sandboxdir = os.path.join(self.sandboxdir, \"utsandbox\")\n self.matchoutput(out, sandboxdir, command)\n\n def testverifynoauthor(self):\n command = \"show sandbox --sandbox utsandbox --pathonly\"\n out = self.badrequesttest(command.split(\" \"))\n self.matchoutput(out,\n \"Must specify sandbox as author/branch \"\n \"when using --pathonly\",\n command)\n\n def testaddchangetest1sandbox(self):\n user = self.config.get(\"unittest\", \"user\")\n command = [\"add\", \"sandbox\", \"--sandbox\", \"%s/changetest1\" % user]\n (out, err) = self.successtest(command)\n self.matchoutput(err, \"creating %s\" % self.sandboxdir, command)\n sandboxdir = os.path.join(self.sandboxdir, \"changetest1\")\n self.matchoutput(out, \"Created sandbox: %s\" % sandboxdir, command)\n self.failUnless(os.path.exists(sandboxdir),\n \"Expected directory '%s' to exist\" % sandboxdir)\n\n def testverifyaddchangetest1sandbox(self):\n user = self.config.get(\"unittest\", \"user\")\n sandboxdir = os.path.join(self.sandboxdir, \"changetest1\")\n command = \"show sandbox --sandbox %s/changetest1\" % user\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Sandbox: changetest1\", command)\n self.matchoutput(out, \"Path: %s\" % sandboxdir, command)\n self.matchclean(out, \"Comments\", command)\n\n def testaddchangetest2sandbox(self):\n command = [\"add\", \"sandbox\", \"--sandbox\", \"changetest2\"]\n # Progress report may be displayed on stderr, ignore it\n out, err = self.successtest(command)\n sandboxdir = os.path.join(self.sandboxdir, \"changetest2\")\n self.matchoutput(out, \"Created sandbox: %s\" % sandboxdir, command)\n self.failUnless(os.path.exists(sandboxdir),\n \"Expected directory '%s' to exist\" % sandboxdir)\n\n def testverifyaddchangetest2sandbox(self):\n command = \"show sandbox --sandbox changetest2\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Sandbox: changetest2\", command)\n\n def testuppercase1(self):\n # For testing mixed-case add.\n command = [\"add\", \"sandbox\", \"--sandbox\", \"CamelCaseTest1\"]\n # Progress report may be displayed on stderr, ignore it\n out, err = self.successtest(command)\n sandboxdir = os.path.join(self.sandboxdir, \"camelcasetest1\")\n self.matchoutput(out, \"Created sandbox: %s\" % sandboxdir, command)\n self.failUnless(os.path.exists(sandboxdir),\n \"Expected directory '%s' to exist\" % sandboxdir)\n\n def testuppercase2(self):\n # For testing deletion of a sandbox added with mixed case.\n command = [\"add\", \"sandbox\", \"--sandbox\", \"CamelCaseTest2\"]\n # Progress report may be displayed on stderr, ignore it\n out, err = self.successtest(command)\n sandboxdir = os.path.join(self.sandboxdir, \"camelcasetest2\")\n self.matchoutput(out, \"Created sandbox: %s\" % sandboxdir, command)\n self.failUnless(os.path.exists(sandboxdir),\n \"Expected directory '%s' to exist\" % sandboxdir)\n\n def testverifylowerbranchname(self):\n command = ['branch', '-r']\n sandboxdir = os.path.join(self.sandboxdir, \"camelcasetest1\")\n (out, err) = self.gitcommand(command, cwd=sandboxdir)\n self.matchoutput(out, \"origin/camelcasetest1\", command)\n\n def testverifyshowmixedcase(self):\n command = \"show sandbox --sandbox CamelCaseTest1\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Sandbox: camelcasetest1\", command)\n\n def testverifyshowlowercase(self):\n command = \"show sandbox --sandbox camelcasetest1\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Sandbox: camelcasetest1\", command)\n\n def testaddbaduser(self):\n command = [\"add\", \"sandbox\",\n \"--sandbox\", \"user-does-not-exist/badbranch\"]\n err = self.badrequesttest(command)\n user = self.config.get(\"unittest\", \"user\")\n self.matchoutput(err,\n \"User '%s' cannot add or get a sandbox on \"\n \"behalf of 'user-does-not-exist'.\" % user,\n command)\n\n def testverifyall(self):\n command = \"show sandbox --all\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out, \"Sandbox: utsandbox\", command)\n self.matchoutput(out, \"Sandbox: changetest1\", command)\n self.matchoutput(out, \"Sandbox: changetest2\", command)\n\n def testfailinvalid(self):\n command = \"add sandbox --sandbox bad:characters!\"\n out = self.badrequesttest(command.split(\" \"))\n self.matchoutput(out, \"sandbox name 'bad:characters!' is not valid\",\n command)\n\n def testslashinuserid(self):\n test_user = \"user1\" + '/' + \"test\"\n command = \"add sandbox --sandbox '%s/nevermade'\" % test_user\n err = self.unauthorizedtest(command.split(\" \"))\n err = self.unauthorizedtest(command.split(\" \"))\n self.matchoutput(err, \"Unauthorized anonymous access attempt\"\n \" to add_sandbox on /sandbox/command/add\", command)\n\n def testverifysearch(self):\n user = self.config.get(\"unittest\", \"user\")\n command = [\"search\", \"sandbox\", \"--owner\", user]\n out = self.commandtest(command)\n self.matchoutput(out, \"utsandbox\", command)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestAddSandbox)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"gombasg/aquilon","sub_path":"tests/broker/test_add_sandbox.py","file_name":"test_add_sandbox.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"11396471671","text":"import random\nHANGMAN_PICS = ['''\n +---+\n |\n |\n |\n ===''', '''\n +---+\n 0 |\n |\n |\n ===''', '''\n +---+\n 0 |\n | |\n |\n ===''', '''\n +---+\n 0 |\n /| |\n |\n ===''', '''\n +---+\n 0 |\n /|\\ |\n |\n ===''', '''\n +---+\n 0 |\n /|\\ |\n / |\n ===''', '''\n +---+\n 0 |\n /|\\ |\n / \\ |\n ===''', '''\n +---+\n [0 |\n /|\\ |\n / \\ |\n ===''', '''\n +---+\n [0] |\n /|\\ |\n / \\ |\n ===''']\nwords = {'Животные':'аист акула бабуин баран барсук бобр бык верблюд волк воробей выдра ворон сова тигр окунь хорек черепаха ястреб ящерица'.split(),\n 'Фигуры':'ромб квадрат прямоугольник треугольник круг пятиугольник шестиугольник эллипс трапеция параллелограмм'.split()}\n\ndef getRandomWord(wordDict):\n #wordIndex = random.randint(0, len(wordList) - 1)\n #return wordList[wordIndex]\n wordKey = random.choice(list(wordDict.keys()))\n wordIndex = random.randint(0, len(wordDict[wordKey]) -1)\n return [wordDict[wordKey][wordIndex], wordKey]\n\ndef displayBoard(missedLetters, correctLetters, secretWord):\n print(HANGMAN_PICS[len(missedLetters)])\n print()\n\n print('Ошибочные буквы:', end=' ')\n for letter in missedLetters:\n print(letter, end=' ')\n print()\n\n blanks = '_' * len(secretWord)\n\n for i in range(len(secretWord)):\n if secretWord[i] in correctLetters:\n blanks = blanks[:i] + secretWord[i] + blanks[i+1:]\n\n for letter in blanks:\n print(letter, end=' ')\n print()\n\ndef getGuess(alreadyGuessed):\n\n while True:\n print('Введите букву.')\n guess = input()\n guess = guess.lower()\n if len(guess) != 1:\n print('Пожалуйста, введите одну букву.')\n elif guess in alreadyGuessed:\n print('Вы уже называли эту букву. Назовите другую.')\n elif guess not in 'абвгдежзийклмнопрстуфхцчшщъыьэюя':\n print('Пожалуйста, введите БУКВУ.')\n else:\n return guess\n\ndef playAgain():\n print('Хотите сыграть еще?(да или нет)')\n return input().lower().startswith('д')\n\n\n\nprint('В И С Е Л И Ц А')\n\ndifficulty = 'x'\nwhile difficulty not in 'ЛСТ':\n print('Выберите уровень сложности: Л - Легкий, С - Средний, Т - Тяжелый')\n difficulty = input().upper()\nif difficulty == 'С':\n del HANGMAN_PICS[8]\n del HANGMAN_PICS[7]\nif difficulty == 'Т':\n del HANGMAN_PICS[8]\n del HANGMAN_PICS[7]\n del HANGMAN_PICS[5]\n del HANGMAN_PICS[3]\n\nmissedLetters = ''\ncorrectLetters = ''\nsecretWord, secretSet = getRandomWord(words)\ngameIsDone = False\n\nwhile True:\n print('Секретное слово из набора: ' + secretSet)\n displayBoard(missedLetters, correctLetters, secretWord)\n\n\n guess = getGuess(missedLetters + correctLetters)\n\n if guess in secretWord:\n correctLetters = correctLetters + guess\n \n\n foundAllLetters = True\n for i in range(len(secretWord)):\n if secretWord[i] not in correctLetters:\n foundAllLetters = False\n break\n if foundAllLetters:\n print('ДА! Секретное слово - \"' + secretWord + '\"! Вы угадали!')\n gameIsDone = True\n else:\n missedLetters = missedLetters + guess\n\n\n if len(missedLetters) == len(HANGMAN_PICS) - 1:\n displayBoard(missedLetters, correctLetters, secretWord)\n print('Вы исчерпали все попытки!\\nНеугадано букв:' + str(len(missedLetters)) + 'и угадано букв:' + str(len(correctLetters)) + '. Было загадано слово\"' + secretWord + '\".')\n gameIsDone = True\n if gameIsDone:\n if playAgain():\n missedLetters = ''\n correctLetters = ''\n gameIsDone = False\n secretWord, secretSet = getRandomWord(words)\n else:\n break\n \n \n","repo_name":"Lemonochel/python_example","sub_path":"visilitsa_test2.py","file_name":"visilitsa_test2.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27806273284","text":"from pxr import Usd, Sdf, UsdGeom, UsdShade, UsdLux\n\n# Create new USD file\nstage = Usd.Stage.CreateNew(\"shaping_light.usda\")\n\n# Add assets into stage\nscene = stage.DefinePrim(\"/Scene\")\nscene.GetReferences().AddReference(\"./monkey_set.usda\")\n\n# Light\nsphereLight = UsdLux.SphereLight.Define(stage, '/Scene/Light/DiskLight')\nsphereLight.AddTranslateOp().Set(value=(0, 4, 0))\nsphereLight.AddRotateYOp().Set(value=(90))\nsphereLight.AddRotateXOp().Set(value=(-90))\nsphereLight.CreateIntensityAttr().Set(5)\n\napi = UsdLux.ShapingAPI(sphereLight)\napi.CreateShapingConeAngleAttr().Set(30)\n\n# Save as USD file\nstage.Save()","repo_name":"kat0c0tak/cedec2021","sub_path":"lighting/05_shaping_light.py","file_name":"05_shaping_light.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"29136658514","text":"import csv\n\n# with open(\"28_names.csv\", \"r\") as csv_file:\n# print(new_file.read())\t\t# to read the names.csv file\n# csv_reader = csv.reader(csv_file)\n# print(csv_reader)\n# next(csv_reader) # for eliminate the heading in result for example 1 in the sense last name not able to print on result k got it\n\n# # create a new file and paste the same conent in the new file as in the 28_names.csv\n# with open(\"28.2_copy.csv\", \"w\") as copy_file:\n# csv_writer = csv.writer(copy_file, delimiter=\"\\t\") \t\t# \\t in the sense tab by tab we devide the sentence like delimiter ok\n# for line in csv_reader:\n# # print(line)\n# # print(line[0]) # 0 first name 1 last name 2 email\n# csv_writer.writerow(line)\n\n\n# instead of working with read and write we work on csv data with dictionary reador and dictionary writer ok corey prefer methods best methods.\n# now dictionary Read\n# with open(\"28_names.csv\", \"r\") as csv_file:\n# csv_reader = csv.DictReader(csv_file)\n# for line in csv_reader:\n# print(line['first_name'])\n# print(line['last_name'])\n# print(line[\"email\"])\n\n\n# now dictionary write\n# with open(\"28_names.csv\", \"r\") as csv_file:\n# csv_reader = csv.DictReader(csv_file)\n\n# with open(\"28.2_copy.csv\", \"w\") as copy_file:\n# fieldnames = [\"first_name\", \"last_name\", \"email\"]\n\n# csv_writer = csv.DictWriter(copy_file, fieldnames=fieldnames, delimiter=\"\\t\") \t\t# \\t in the sense tab by tab we devide the sentence like delimiter ok\n# csv_writer.writeheader()\n# for line in csv_reader:\n# csv_writer.writerow(line)\n\n# to get only first name and last name without email in the 28.2 copy file taht is eeasy with dictionary read and write methods got it\nwith open(\"28_names.csv\", \"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n with open(\"28.2_copy.csv\", \"w\") as copy_file:\n fieldnames = [\"first_name\", \"last_name\"]\n\n csv_writer = csv.DictWriter(copy_file, fieldnames=fieldnames, delimiter=\"\\t\") \t\t# \\t in the sense tab by tab we devide the sentence like delimiter ok\n csv_writer.writeheader()\n for line in csv_reader:\n del line[\"email\"]\n csv_writer.writerow(line)\n","repo_name":"rajeshsvv/Lenovo_Back","sub_path":"1 PYTHON/2 COREY SCHAFER/PART 1/28.1_parse_csv.py","file_name":"28.1_parse_csv.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35705210810","text":"import requests\nfrom bs4 import BeautifulSoup\n\n# initialize arrays\nsummaries = []\ntitles = []\nlocations = []\n\n# loop through the first 1000 search results pages\nfor i in range(1,1000):\n\t\n\t# each search page url has a multiple of 10 at the end\n\tindex = str(10*i)\n\t\n\t# request the search results page \n\tr = requests.get('https://www.indeed.com/jobs?q=&l=United+States&start='+index)\n\tsoup = BeautifulSoup(r.text, 'html.parser')\n\t\n\t# loop through each search result (job post) \n\tresults = soup.find_all('a', attrs={'class':'turnstileLink', 'data-tn-element':'jobTitle'})\n\tfor link in results:\n\t\treq = requests.get('https://www.indeed.com/'+link['href'])\n\t\tsoup = BeautifulSoup(req.text, 'html.parser')\n\t\t\n\t\t# find the job title\n\t\tt = soup.find_all('b', attrs={'class':'jobtitle'})\n\t\ttitles.append(t[0].text)\n\t\t\n\t\t# find the location\n\t\tl = soup.find_all('span', attrs={'class':'location'})\n\t\tlocations.append(l[0].text)\n\t\t\n\t\t# find the job summary\n\t\ts = soup.find_all('span', attrs={'class':'summary'})\n\t\tsummaries.append(s[0].text)\n\nprint(len(summaries))\n\n# write the job postings to a text file\nf = open('alljobs.txt', 'w')\nfor j in range(1,len(summaries)):\n\tf.write(str(titles[j])+'\\n')\n\tf.write(str(locations[j]+'\\n'))\n\tf.write(str(summaries[j])+'\\n\\n')\nf.close()\n","repo_name":"NolanJMcCafferty/jobs-research","sub_path":"scrape_and_prep/indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32706953075","text":"# coding:utf-8\n\nimport HTMLTestRunner\nimport unittest\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef all_case():\n # 待执行用例目录\n case_dir = r\"C:\\Users\\Administrator\\apitest\\case\"\n testcase = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(case_dir, pattern='test_teacher.py', top_level_dir=None)\n # discover筛选出来的用例,循环添加到测试套件中去\n # for test_suit in discover:\n # for test_case in test_suit:\n # testcase.addTest(test_case)\n testcase.addTests(discover)\n print (testcase)\n return testcase\n\nif __name__==\"__main__\":\n # runner = unittest.TextTestRunner()\n report_path = r\"C:\\Users\\Administrator\\apitest\\case\\report\\result.html\"\n fp = open(report_path, \"wb\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'接口测试报告', description=u'用例执行情况:')\n runner.run(all_case())\n fp.close()","repo_name":"hikaruwin/hikaru","sub_path":"apitest/case/run_all_case.py","file_name":"run_all_case.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38090027619","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Impact of Covid-19 on Salaries & Employment\n\n# The aim of this analysis is to highlight potential impacts that the covid-19 pandemic may have had on the average salary/bonus/total yearly compensation, and also its effects on employment rates.\n\n# ## Importing Relevant Packages and Datasets\n\n# In[1]:\n\n\nimport findspark\nfindspark.init()\n\n\n# In[2]:\n\n\nimport numpy as np\nimport pyspark\nfrom pyspark.sql import SparkSession, Row\nfrom pyspark.sql.types import *\nimport pyspark.sql.functions as F\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pyspark.sql.functions import col, to_timestamp, unix_timestamp, to_date, to_date, col, year, month\n\n\n# In[3]:\n\n\nspark = pyspark.sql.SparkSession.builder.master(\"local\").getOrCreate()\n\n\n# In[4]:\n\n\n# Let's define our schema\nschema = StructType([ StructField(\"date\", DateType(), True), StructField(\"time\", StringType(), True), StructField(\"company\", StringType(), True), StructField(\"level\", StringType(), True), StructField(\"title\", StringType(), True), StructField(\"totalyearlycompensation\", IntegerType(), False), StructField(\"location\", StringType(), True), StructField(\"yearsofexperience\", FloatType(), False), StructField(\"yearsatcompany\", FloatType(), False), StructField(\"tag\", StringType(), True), StructField(\"basesalary\", IntegerType(), False), StructField(\"stockgrantvalue\", IntegerType(), False), StructField(\"bonus\", IntegerType(), False), StructField(\"gender\", StringType(), True), StructField(\"cityid\", StringType(), True), StructField(\"dmaid\", StringType(), True), StructField(\"race\", StringType(), True), StructField(\"education\", StringType(), True)])\n\n# Load and parse the data file, converting it to a DataFrame.\ndata = spark.read.format(\"csv\") .option(\"header\", \"false\") .option(\"delimiter\", \"\\t\") .schema(schema) .load(\"../data/seperated_time_data/cleaned.txt\")\n\n\n# ## Creating new Column for Month/Year to allow for Grouping\n\n# In[5]:\n\n\nadjusted_data_1 = data.withColumn(\"date\", to_date(col(\"date\"), \"yyyy-MM-dd\")) .withColumn('month', month(\"date\"))\n\nadjusted_data_2 = adjusted_data_1.withColumn(\"date\", to_date(col(\"date\"), \"yyyy-MM-dd\")) .withColumn('year', year(\"date\"))\n\n\n# In[6]:\n\n\n# Combining the columns 'month' and 'year' to a column of DateType()\nnew_data = adjusted_data_2.withColumn(\n \"date\",\n F.date_format(F.expr(\"make_date(year, month, 1)\"), \"MM/dd/yyyy\")\n)\n\n\n# ## Impact of Covid-19 on all Earnings\n\n# In[7]:\n\n\n# Selecting only the columns we need for our analysis\nselected_data = new_data.select(to_date(col(\"date\"),\"MM/dd/yyyy\").alias(\"date\"), 'totalyearlycompensation', 'basesalary', 'bonus')\n\n\n# In[8]:\n\n\n# Grouping data by month and year, and calculating the average values per month\ngrouped_data = selected_data.groupby('date').mean('totalyearlycompensation', 'basesalary', 'bonus')\n\n\n# In[9]:\n\n\n# Rounding the data to the nearest 2 decimals\nrounded_data1 = grouped_data.withColumn(\"avg(basesalary)\", F.round(grouped_data[\"avg(basesalary)\"], 2))\nrounded_data2 = rounded_data1.withColumn(\"avg(totalyearlycompensation)\", F.round(rounded_data1[\"avg(totalyearlycompensation)\"], 2))\nrounded_data3 = rounded_data2.withColumn(\"avg(bonus)\", F.round(rounded_data2[\"avg(bonus)\"], 2))\n\n\n# In[10]:\n\n\n# Separating the different types of earnings into different datasets and converting to Pandas\ntyc_data = rounded_data3.select('date', 'avg(totalyearlycompensation)').toPandas()\nsalary_data = rounded_data3.select('date', 'avg(basesalary)').toPandas()\nbonus_data = rounded_data3.select('date', 'avg(bonus)').toPandas()\n\n\n# In[11]:\n\n\n# Plotting each earning type on a line chart\nfig, ax = plt.subplots(figsize=(20, 7))\ntyc_data.plot(x=\"date\", ax=ax)\nsalary_data.plot(x=\"date\", ax=ax)\nbonus_data.plot(x=\"date\", ax=ax)\nplt.title(\"Earnings in STEM from 2017 to 2021\", fontsize=26)\nplt.xlabel(\"Date\", fontsize=20)\nplt.ylabel(\"Earnings\", fontsize=20)\nplt.legend(labels=[\"Total Yearly Compensation\", \"Base Salary\", \"Bonus\"], loc='best')\nplt.show()\n\n\n# The chart above shows that there was virtually no effect on average earnings related to Covid-19. The pandemic began in early 2020, and the chart does not show any noticeable increase/decrease in earnings, rather the earnings just continue to decline slightly from 2018. \n\n# # Identifying the number of Records created Each Month\n\n# In[12]:\n\n\n# Selecting only the columns we need for our analysis\nselected_data = new_data.select(to_date(col(\"date\"),\"MM/dd/yyyy\").alias(\"date\"))\n\n\n# In[13]:\n\n\n# Grouping data by month and year, and calculating the number of entries for each month\ngrouped_data = selected_data.groupby('date').count().toPandas()\n\n\n# In[14]:\n\n\n# Plotting the number of entries recorded each month\nfig, ax = plt.subplots(figsize=(20, 7))\ngrouped_data.plot(x=\"date\", ax=ax)\nplt.title(\"Number of Entries Recorded in Dataset\", fontsize=26)\nplt.xlabel(\"Date\", fontsize=16)\nplt.ylabel(\"Number of Entries\", fontsize=16)\nplt.show()\n\n\n# There is a similar finding in this chart also, the pandemic does not seem to have an effect on the number of entries recorded each month. We expect that the employment rates are relative to the number of entries recorded, and so we can deduce that the employment rates in the STEM sectors have been steadily increasing since mid-2018.\n","repo_name":"scummins00/ca4022_assignment_3","sub_path":"book/_build/jupyter_execute/Impact of Covid-19 on Salaries & Employment.py","file_name":"Impact of Covid-19 on Salaries & Employment.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27522811843","text":"from query_expansion import QE\nfrom preprocessing import Prapengolahan\nfrom vector_space_model import VSM\nfrom docs import Dokumen\nfrom term_weighting import PembobotanKata\nimport sys\nimport re\nfrom PyQt5.uic import loadUi\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QTableWidget, QTableWidgetItem\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import pyqtSlot, Qt\nfrom deep_translator import GoogleTranslator\nimport subprocess\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n loadUi(\"CLIR-QE.ui\", self)\n pixmap = QPixmap('images/AIR&D.jpg')\n self.label.setPixmap(pixmap)\n pixmap = QPixmap('images/logo-unsri.png')\n self.label_2.setPixmap(pixmap)\n self.tabWidget.removeTab(1)\n self.tableWidget.setColumnWidth(1, 610)\n self.parm = 0\n self.tableWidget.move(0, 0)\n self.tableWidget.setEditTriggers(QTableWidget.NoEditTriggers)\n self.tabWidget.tabCloseRequested.connect(lambda index: self.tabWidget.removeTab(index))\n self.tableWidget.doubleClicked.connect(self.read_more)\n self.searchBtn.clicked.connect(self.src)\n self.qeBtn.clicked.connect(self.search)\n self.resetBtn.clicked.connect(self.reset)\n self.manualBtn.clicked.connect(self.manual)\n self.exitBtn.clicked.connect(self.exit)\n\n def reset(self):\n print(\"Reset..\")\n if len(self.tabWidget) > 1:\n for tab in range(len(self.tabWidget)-1):\n self.tabWidget.removeTab(1)\n self.qeSystem.setChecked(True)\n self.allres = []\n self.qe0 = []\n self.qe1 = []\n self.qe2 = []\n self.qe3 = []\n self.bool = False\n self.inside_qe = False\n self.allQuery.setChecked(True)\n self.inputQuery.setText(\"\")\n self.sysResult.setPlainText(f\"Kueri awal: -\\n\\nHasil kueri ekspansi teratas;\\n1. -\\n2. -\\n3. -\")\n self.label_1.setText(\"Hasil Pencarian dengan Kueri - : x artikel\")\n self.tableWidget.setRowCount(0)\n\n def manual(self):\n print(\"Membuka pdf manual..\")\n path = 'manual.pdf'\n subprocess.Popen([path], shell=True)\n\n def exit(self):\n print(\"Exiting..\")\n sys.exit()\n\n def src(self):\n self.allres = []\n self.qe0 = []\n self.qe1 = []\n self.qe2 = []\n self.qe3 = []\n self.bool = False\n self.inside_qe = False\n if self.parm == 0:\n self.inv_idx = Dokumen().inverted_index()\n self.tf_idf_doc = PembobotanKata().create_tf_idf()\n self.parm += 1\n self.allQuery.setChecked(True)\n self.search()\n\n def search(self):\n query = \"\"\n if self.allQuery.isChecked():\n query = self.inputQuery.text()\n self.bool = True\n else:\n print(\"Memilih Kueri Ekspansi\")\n self.inside_qe = True\n qe = self.sysResult.toPlainText()\n qe = qe.split(\"\\n\")\n new_qe = []\n if self.ogQuery.isChecked():\n self.bool = True\n que = qe[0]\n query = que[12:]\n\n for qe in qe[3:]:\n new_qe.append(qe[3:])\n\n if self.qeRes1.isChecked():\n query = new_qe[0]\n if len(self.qe1) != 0:\n self.bool = True\n else:\n self.bool = False\n if new_qe[0] == \"-\":\n query = \"-\"\n\n elif self.qeRes2.isChecked():\n query = new_qe[1]\n if len(self.qe2) != 0:\n self.bool = True\n else:\n self.bool = False\n if new_qe[1] == \"-\":\n query = \"-\"\n\n elif self.qeRes3.isChecked():\n query = new_qe[2]\n if len(self.qe3) != 0:\n self.bool = True\n else:\n self.bool = False\n if new_qe[2] == \"-\":\n query = \"-\"\n\n if query == \"\":\n msg = QtWidgets.QMessageBox()\n msg.setText(\"Kueri belum dimasukkan!\")\n msg.setIcon(QtWidgets.QMessageBox.Critical)\n msg.exec_()\n elif query == \"-\":\n msg = QtWidgets.QMessageBox()\n msg.setText(\"Kueri ekspansi tidak ada! Cek kueri pilihanmu\")\n msg.setIcon(QtWidgets.QMessageBox.Critical)\n msg.exec_()\n else:\n translated_query = GoogleTranslator(source='id', target='en').translate(query)\n print(translated_query)\n temp = False\n check_res = []\n if self.qeSystem.isChecked() & self.allQuery.isChecked() and len(self.allres) == 0:\n temp = True\n exp_query, rule = QE().expanding_query(translated_query)\n # exp_query;\n # [winter, wintertime, storm, violent_storm]\n # [pollution, befoulment, defilement, contamination]\n allterm = []\n for term in exp_query:\n if \"_\" in term:\n output = re.sub(r'_', ' ', term)\n term = output\n allterm.append(term)\n\n allquery = []\n arr1 = []\n arr2 = []\n arr3 = []\n k = 0\n if rule == 1:\n altres = []\n for item in allterm:\n allquery.append(item)\n q_terms = Prapengolahan().tokenize_and_extract(item)\n print()\n print(q_terms)\n vec_space_model = VSM(self.inv_idx, self.tf_idf_doc)\n res = vec_space_model.cos_sim(q_terms)\n\n if res != -999:\n\n for item in res:\n if k == 0:\n self.qe0.append(item)\n elif k == 1:\n self.qe1.append(item)\n elif k == 2:\n self.qe2.append(item)\n elif k == 3:\n self.qe3.append(item)\n\n if item[0] not in altres:\n altres.append(item[0])\n self.allres.append(item)\n k += 1\n check_res = self.allres\n elif rule == 0:\n arr1 = [allterm[0]]\n arr2 = [allterm[1]]\n elif rule == 2:\n arr1 = allterm[0:2]\n arr2 = allterm[2:4]\n elif rule == 3:\n arr1 = [allterm[0]]\n arr2 = allterm[1:3]\n elif rule == 4:\n arr1 = allterm[0:2]\n arr2 = [allterm[2]]\n elif rule == 5:\n arr1 = allterm[0:2]\n arr2 = allterm[2:4]\n arr3 = allterm[4:6]\n elif rule == 6:\n arr1 = allterm[0:2]\n arr2 = [allterm[2]]\n arr3 = [allterm[3]]\n elif rule == 7:\n arr1 = [allterm[0]]\n arr2 = allterm[1:3]\n arr3 = [allterm[3]]\n elif rule == 8:\n arr1 = [allterm[0]]\n arr2 = [allterm[1]]\n arr3 = allterm[2:4]\n elif rule == 9:\n arr1 = [allterm[0]]\n arr2 = allterm[1:3]\n arr3 = allterm[3:5]\n elif rule == 10:\n arr1 = allterm[0:2]\n arr2 = [allterm[2]]\n arr3 = allterm[3:5]\n elif rule == 11:\n arr1 = allterm[0:2]\n arr2 = allterm[2:4]\n arr3 = [allterm[4]]\n elif rule == 12:\n arr1 = [allterm[0]]\n arr2 = [allterm[1]]\n arr3 = [allterm[2]]\n if rule == 2 or rule == 3 or rule == 4:\n altres = []\n for item1 in arr1:\n for item2 in arr2:\n # \"winter storm\", \"winter violent storm\", \"wintertime storm\", \"wintertime violent storm\"\n que = item1 + \" \" + item2\n allquery.append(que)\n q_terms = Prapengolahan().tokenize_and_extract(que)\n print()\n print(q_terms)\n # [winter storm], [winter violent storm], [wintertime storm], [wintertime violent storm]\n vec_space_model = VSM(self.inv_idx, self.tf_idf_doc)\n res = vec_space_model.cos_sim(q_terms)\n # [23: 0.82, 45: 0.67] atau -999\n if res != -999:\n for item in res:\n if k == 0:\n self.qe0.append(item)\n elif k == 1:\n self.qe1.append(item)\n elif k == 2:\n self.qe2.append(item)\n elif k == 3:\n self.qe3.append(item)\n\n if item[0] not in altres:\n altres.append(item[0])\n self.allres.append(item)\n k += 1\n check_res = self.allres\n elif rule >= 5:\n j = 0\n altres = []\n for item1 in arr1:\n for item2 in arr2:\n for item3 in arr3:\n if j < 4:\n que = item1 + \" \" + item2 + \" \" + item3\n allquery.append(que)\n q_terms = Prapengolahan().tokenize_and_extract(que)\n print()\n print(q_terms)\n vec_space_model = VSM(self.inv_idx, self.tf_idf_doc)\n res = vec_space_model.cos_sim(q_terms)\n if res != -999:\n for item in res:\n if k == 0:\n self.qe0.append(item)\n elif k == 1:\n self.qe1.append(item)\n elif k == 2:\n self.qe2.append(item)\n elif k == 3:\n self.qe3.append(item)\n\n if item[0] not in altres:\n altres.append(item[0])\n self.allres.append(item)\n j += 1\n k += 1\n check_res = self.allres\n if len(allquery) == 4:\n self.sysResult.setPlainText(\n f\"Kueri awal: {allquery[0]}\\n\\nHasil kueri ekspansi teratas;\\n1. {allquery[1]}\\n2. {allquery[2]}\\n3. {allquery[3]}\")\n elif len(allquery) == 3:\n self.sysResult.setPlainText(\n f\"Kueri awal: {allquery[0]}\\n\\nHasil kueri ekspansi teratas;\\n1. {allquery[1]}\\n2. {allquery[2]}\\n3. -\")\n elif len(allquery) == 2:\n self.sysResult.setPlainText(\n f\"Kueri awal: {allquery[0]}\\n\\nHasil kueri ekspansi teratas;\\n1. {allquery[1]}\\n2. -\\n3. -\")\n else:\n self.sysResult.setPlainText(\n f\"Kueri awal: {allquery[0]}\\n\\nHasil kueri ekspansi teratas;\\n1. -\\n2. -\\n3. -\")\n\n elif not self.qeSystem.isChecked() and len(self.allres) == 0: # elif self.radioButton_2.isChecked():\n query_exp = Prapengolahan().tokenize_and_extract(translated_query)\n print()\n print(query_exp)\n vec_space_model = VSM(self.inv_idx, self.tf_idf_doc)\n res = vec_space_model.cos_sim(query_exp)\n # [('4304', 1.0000000000000002), ('378', 1.0000000000000002), ('4935', 0.6283072553180628)]\n self.allres = res\n check_res = res\n self.qe0 = self.allres\n\n if res == -999:\n check_res = []\n\n if not self.qeSystem.isChecked():\n self.sysResult.setPlainText(\n f\"Kueri awal: {query}\\n\\nHasil kueri ekspansi teratas;\\n1. -\\n2. -\\n3. -\")\n elif self.bool:\n check_res = self.allres\n else:\n check_res = []\n\n if len(check_res) == 0:\n if self.allQuery.isChecked():\n self.label_1.setText(\n f\"Hasil Pencarian dengan gabungan semua kueri: 0 artikel\") # docs_len / len(res)\n else:\n self.label_1.setText(\n f\"Hasil Pencarian dengan Kueri ’{query}’: 0 artikel\") # docs_len / len(res)\n self.tableWidget.setRowCount(0)\n msg = QtWidgets.QMessageBox()\n msg.setText(\"Artikel Tidak Ditemukan!\")\n msg.setIcon(QtWidgets.QMessageBox.Warning)\n msg.exec_()\n else:\n docs_len = 0\n results = []\n qeres = self.allres\n if self.ogQuery.isChecked():\n qeres = self.qe0\n elif self.qeRes1.isChecked():\n qeres = self.qe1\n elif self.qeRes2.isChecked():\n qeres = self.qe2\n elif self.qeRes3.isChecked():\n qeres = self.qe3\n\n for item in qeres:\n if item[1] >= 0.851:\n results.append(item)\n docs_len += 1\n if self.allQuery.isChecked():\n self.label_1.setText(\n f\"Hasil Pencarian dengan gabungan semua kueri: {docs_len} artikel\") # docs_len / len(res)\n else:\n self.label_1.setText(f\"Hasil Pencarian dengan Kueri ’{query}’: {docs_len} artikel\") # docs_len / len(res)\n\n self.tableWidget.setRowCount(docs_len)\n\n corpus = Dokumen().document_retr()\n allcossim = []\n idx = 0\n if temp:\n results = sorted(results, key=lambda x: x[1], reverse=True)\n\n self.id_doc = []\n self.allnews = []\n self.alltitle = []\n doc_list = []\n\n for item in results:\n self.id_doc.append(item[0])\n cossim = \"%.2f\" % item[1]\n allcossim.append(cossim)\n doc = corpus[str(item[0])]\n j = 0\n k = 0\n n = 4000\n doc_news = doc[1]\n for i in range(0, len(doc_news), n):\n if len(doc_news) > n and i + n < len(doc_news):\n while doc_news[i + n + k] != \" \" and doc_news[i+n+k] != \".\" and doc_news[i+n+k] != \"”\" and doc_news[i+n+k] != \"\\\"\": #\n k += 1\n doc_list.append(doc_news[i + j:i + n + k])\n j = k\n\n news = \"\"\n for text in doc_list:\n translated = GoogleTranslator(source='en', target='id').translate(text)\n news += translated + \" \"\n\n self.allnews.append(news) # news\n\n doc_title = GoogleTranslator(source='en', target='id').translate(doc[0])\n self.alltitle.append(doc_title)\n\n doc_list = []\n idx += 1\n\n idx = 0\n for id in self.id_doc:\n item = QTableWidgetItem(str(id))\n item.setTextAlignment(Qt.AlignCenter)\n self.tableWidget.setItem(idx, 0, item)\n idx += 1\n idx = 0\n for news in self.allnews:\n self.tableWidget.setItem(idx, 1, QTableWidgetItem(news))\n idx += 1\n idx = 0\n for cossim in allcossim:\n cos = str(cossim)\n item = QTableWidgetItem(cos[:6])\n item.setTextAlignment(Qt.AlignCenter)\n self.tableWidget.setItem(idx, 2, item)\n idx += 1\n print(\"Pencarian selesai..\")\n\n @pyqtSlot()\n def read_more(self):\n self.new_tab = QtWidgets.QWidget()\n title = \"\"\n text = \"\"\n title_tab = \"\"\n for currentQTableWidgetItem in self.tableWidget.selectedItems():\n title = self.alltitle[currentQTableWidgetItem.row()]\n text = self.allnews[currentQTableWidgetItem.row()]\n title_tab = self.id_doc[currentQTableWidgetItem.row()]\n\n self.tabWidget.addTab(self.new_tab, f\"Dok {title_tab}\")\n self.new_layout = QtWidgets.QVBoxLayout(self.new_tab)\n self.new_label = QtWidgets.QLabel(self.new_tab)\n # title = \"Kemungkinan penyebab di balik pemboman St. Petersburg\"\n self.new_label.setText(title)\n self.new_label.setAlignment(Qt.AlignCenter)\n font = QtGui.QFont()\n font.setPointSize(12)\n font.underline()\n self.new_label.setFont(font)\n self.new_textBrowser = QtWidgets.QTextBrowser(self.new_tab)\n self.textBrowser.setFrameShape(QtWidgets.QFrame.NoFrame)\n self.textBrowser.setReadOnly(True)\n font2 = QtGui.QFont()\n font2.setPointSize(10)\n self.new_textBrowser.setFont(font2)\n self.new_textBrowser.setAlignment(Qt.AlignBaseline)\n self.new_textBrowser.setText(text)\n self.new_layout.addWidget(self.new_label)\n self.new_layout.addWidget(self.new_textBrowser)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n mainWindow = MainWindow()\n widget = QtWidgets.QStackedWidget()\n widget.addWidget(mainWindow)\n widget.setFixedWidth(985)\n widget.setFixedHeight(770)\n widget.show()\n\n try:\n sys.exit(app.exec_())\n except:\n print(\"Exiting..\")","repo_name":"ErwinSputra/CLIR-QE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28330718438","text":"\"\"\"app entry point\"\"\"\r\nfrom flask import Flask, Blueprint, render_template\r\nfrom model import Actor, Director, Movie, Genre, Review, User\r\nfrom memory_repo import abstract\r\n \r\napp = Flask(__name__)\r\n\r\ndata_file,genre_file, director_file,actor_file, = abstract()\r\n\r\nhome_blueprint = Blueprint(\r\n 'home_bp', __name__)\r\n\r\n\r\n@app.route(\"/\")\r\n@home_blueprint.route('/', methods=['GET'])\r\ndef home():\r\n return render_template(\r\n 'home.html',\r\n movie_file = data_file\r\n )\r\n\r\nbrowse_blueprint = Blueprint(\r\n 'browse_bp', __name__)\r\n\r\n\r\n@app.route(\"/browse\")\r\n@browse_blueprint.route('browse', methods=['GET'])\r\ndef browse():\r\n return render_template(\r\n 'browse.html',\r\n movie_file = data_file\r\n )\r\n\r\ngenre_blueprint = Blueprint(\r\n 'genre_bp', __name__)\r\n\r\n@app.route(\"/genre\")\r\n@genre_blueprint.route('genre', methods=['GET'])\r\ndef genre():\r\n return render_template(\r\n 'genre.html',\r\n movie_file = data_file,\r\n genres = genre_file\r\n )\r\ndirector_blueprint = Blueprint(\r\n 'director_bp', __name__)\r\n\r\n@app.route(\"/director\")\r\n@director_blueprint.route('director', methods=['GET'])\r\ndef director():\r\n return render_template(\r\n 'director.html',\r\n movie_file = data_file,\r\n directors = director_file\r\n )\r\n\r\nactor_blueprint = Blueprint(\r\n 'actor_bp', __name__)\r\n\r\n@app.route(\"/actor\")\r\n@actor_blueprint.route('actor', methods=['GET'])\r\ndef actor():\r\n return render_template(\r\n 'actor.html',\r\n movie_file = data_file,\r\n actors = actor_file\r\n )\r\n\r\nsearch_blueprint = Blueprint(\r\n 'search_bp', __name__)\r\n\r\n@app.route(\"/search\")\r\n@search_blueprint.route('search', methods=['GET', 'POST'])\r\ndef search():\r\n return render_template(\r\n 'search.html',\r\n movie_file = data_file,\r\n actors = actor_file,\r\n directors = director_file,\r\n genres = genre_file\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n app.run()","repo_name":"Tintock/rmil247_235A2","sub_path":"wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38208083649","text":"from textblob import TextBlob\nfrom textblob_fr import PatternTagger, PatternAnalyzer\nfrom newspaper import Article\n\nurl = ''\narticle = Article(url)\n\narticle.download()\narticle.parse()\narticle.nlp()\n\ntext = article.text\nprint(text)\n\nblob = TextBlob(text)\n# French version\n# blob = TextBlob(text, pos_tagger=PatternTagger(), analyzer=PatternAnalyzer())\nsentiment = blob.sentiment.polarity # -1 to 1\nprint(sentiment)\n","repo_name":"KevinD-UP/TextSentimentAnalysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20700573855","text":"import utils.loc as loc\nimport os\nimport pickle\ndef pkl_to_tsv(gantype, version, p):\n if gantype == \"wgan\":\n dir = loc.cvaewgan_sample_dir\n else:\n dir = loc.cvaegan_sample_dir\n\n file = os.path.join(dir, \"sample_version_{}_p_{}.pkl\".format(version, p))\n f = open(file, \"rb\")\n pic = pickle.load(f)\n\n feature_tsv = open(os.path.join(dir, \"sample_version_{}_p_{}.tsv\".format(version, p)), \"w\")\n label_tsv = open(os.path.join(dir, \"sample_version_{}_p_{}_label.tsv\".format(version, p)), \"w\")\n\n for loader_type in pic.keys():\n for clazz in pic[loader_type].keys():\n if clazz != \"loader\":\n for pic_num in pic[loader_type][clazz]:\n if pic_num.startswith(\"pic\"):\n # \"feat\" \"recon\" \"sample\"\n block = pic[loader_type][clazz][pic_num]\n for type in [\"feat\", \"recon\", \"sample\"]:\n for i, feat in enumerate(block[type]):\n print(feat.shape)\n feature_tsv.write('\\t'.join(list(feat.astype(\"str\"))) + \"\\n\")\n label_tsv.write(\"{}_{}_{}_{}_{}\\n\".format(loader_type, clazz, pic_num, type, i))\n\nif __name__ == '__main__':\n pkl_to_tsv(\"gan\", 0, 1)\n","repo_name":"Lectures2Code/slml","sub_path":"utils/pkl_to_tsv.py","file_name":"pkl_to_tsv.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15332746039","text":"from homeassistant.components.climate.const import (\r\n SWING_OFF,\r\n SWING_VERTICAL,\r\n ClimateEntityFeature,\r\n HVACMode,\r\n)\r\nfrom homeassistant.const import UnitOfTemperature\r\n\r\nfrom ..const import EBERG_COOLY_C35HD_PAYLOAD\r\nfrom ..helpers import assert_device_properties_set\r\nfrom ..mixins.climate import TargetTemperatureTests\r\nfrom ..mixins.select import BasicSelectTests\r\nfrom .base_device_tests import TuyaDeviceTestCase\r\n\r\nPOWER_DPS = \"1\"\r\nUNKNOWN4_DPS = \"4\"\r\nHVACMODE_DPS = \"5\"\r\nTEMPERATURE_DPS = \"6\"\r\nFAN_DPS = \"8\"\r\nUNIT_DPS = \"10\"\r\nUNKNOWN13_DPS = \"13\"\r\nUNKNOWN14_DPS = \"14\"\r\nUNKNOWN15_DPS = \"15\"\r\nSWING_DPS = \"16\"\r\nUNKNOWN17_DPS = \"17\"\r\nTEMPF_DPS = \"18\"\r\nUNKNOWN19_DPS = \"19\"\r\n\r\n\r\nclass TestEbergCoolyC35HDHeatpump(\r\n BasicSelectTests, TargetTemperatureTests, TuyaDeviceTestCase\r\n):\r\n __test__ = True\r\n\r\n def setUp(self):\r\n self.setUpForConfig(\r\n \"eberg_cooly_c35hd.yaml\",\r\n EBERG_COOLY_C35HD_PAYLOAD,\r\n )\r\n self.subject = self.entities.get(\"climate\")\r\n self.setUpTargetTemperature(\r\n TEMPERATURE_DPS,\r\n self.subject,\r\n min=13,\r\n max=32,\r\n )\r\n self.setUpBasicSelect(\r\n UNIT_DPS,\r\n self.entities.get(\"select_temperature_unit\"),\r\n {\r\n True: \"Fahrenheit\",\r\n False: \"Celsius\",\r\n },\r\n )\r\n self.mark_secondary([\"select_temperature_unit\"])\r\n\r\n def test_supported_features(self):\r\n self.assertEqual(\r\n self.subject.supported_features,\r\n (\r\n ClimateEntityFeature.TARGET_TEMPERATURE\r\n | ClimateEntityFeature.FAN_MODE\r\n | ClimateEntityFeature.SWING_MODE\r\n ),\r\n )\r\n\r\n def test_icon(self):\r\n self.dps[POWER_DPS] = True\r\n self.dps[HVACMODE_DPS] = \"1\"\r\n self.assertEqual(self.subject.icon, \"mdi:fire\")\r\n self.dps[HVACMODE_DPS] = \"2\"\r\n self.assertEqual(self.subject.icon, \"mdi:water\")\r\n self.dps[HVACMODE_DPS] = \"3\"\r\n self.assertEqual(self.subject.icon, \"mdi:snowflake\")\r\n self.dps[HVACMODE_DPS] = \"4\"\r\n self.assertEqual(self.subject.icon, \"mdi:fan\")\r\n self.dps[POWER_DPS] = False\r\n self.assertEqual(self.subject.icon, \"mdi:hvac-off\")\r\n\r\n def test_temperature_unit(self):\r\n self.dps[UNIT_DPS] = False\r\n self.assertEqual(self.subject.temperature_unit, UnitOfTemperature.CELSIUS)\r\n self.dps[UNIT_DPS] = True\r\n self.assertEqual(self.subject.temperature_unit, UnitOfTemperature.FAHRENHEIT)\r\n\r\n def test_minimum_target_temperature_f(self):\r\n self.dps[UNIT_DPS] = True\r\n self.assertEqual(self.subject.min_temp, 55)\r\n\r\n def test_maximum_target_temperature_f(self):\r\n self.dps[UNIT_DPS] = True\r\n self.assertEqual(self.subject.max_temp, 90)\r\n\r\n def test_temperature_redirects_f(self):\r\n self.dps[UNIT_DPS] = True\r\n self.dps[TEMPERATURE_DPS] = 20\r\n self.dps[TEMPF_DPS] = 90\r\n self.assertEqual(self.subject.target_temperature, 90)\r\n\r\n async def test_set_temperature_redirects_f(self):\r\n self.dps[UNIT_DPS] = True\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {TEMPF_DPS: 85},\r\n ):\r\n await self.subject.async_set_target_temperature(85)\r\n\r\n def test_hvac_mode(self):\r\n self.dps[POWER_DPS] = True\r\n self.dps[HVACMODE_DPS] = \"1\"\r\n self.assertEqual(self.subject.hvac_mode, HVACMode.HEAT)\r\n self.dps[HVACMODE_DPS] = \"2\"\r\n self.assertEqual(self.subject.hvac_mode, HVACMode.DRY)\r\n self.dps[HVACMODE_DPS] = \"3\"\r\n self.assertEqual(self.subject.hvac_mode, HVACMode.COOL)\r\n self.dps[HVACMODE_DPS] = \"4\"\r\n self.assertEqual(self.subject.hvac_mode, HVACMode.FAN_ONLY)\r\n\r\n self.dps[HVACMODE_DPS] = \"3\"\r\n self.dps[POWER_DPS] = False\r\n self.assertEqual(self.subject.hvac_mode, HVACMode.OFF)\r\n\r\n def test_hvac_modes(self):\r\n self.assertCountEqual(\r\n self.subject.hvac_modes,\r\n [\r\n HVACMode.OFF,\r\n HVACMode.COOL,\r\n HVACMode.DRY,\r\n HVACMode.FAN_ONLY,\r\n HVACMode.HEAT,\r\n ],\r\n )\r\n\r\n async def test_set_hvac_mode_to_heat(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {POWER_DPS: True, HVACMODE_DPS: \"1\"}\r\n ):\r\n await self.subject.async_set_hvac_mode(HVACMode.HEAT)\r\n\r\n async def test_set_hvac_mode_to_dry(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {POWER_DPS: True, HVACMODE_DPS: \"2\"}\r\n ):\r\n await self.subject.async_set_hvac_mode(HVACMode.DRY)\r\n\r\n async def test_set_hvac_mode_to_cool(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {POWER_DPS: True, HVACMODE_DPS: \"3\"}\r\n ):\r\n await self.subject.async_set_hvac_mode(HVACMode.COOL)\r\n\r\n async def test_set_hvac_mode_to_fan(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {POWER_DPS: True, HVACMODE_DPS: \"4\"}\r\n ):\r\n await self.subject.async_set_hvac_mode(HVACMode.FAN_ONLY)\r\n\r\n async def test_turn_off(self):\r\n async with assert_device_properties_set(\r\n self.subject._device, {POWER_DPS: False}\r\n ):\r\n await self.subject.async_set_hvac_mode(HVACMode.OFF)\r\n\r\n def test_fan_mode(self):\r\n self.dps[FAN_DPS] = \"1\"\r\n self.assertEqual(self.subject.fan_mode, \"low\")\r\n self.dps[FAN_DPS] = \"2\"\r\n self.assertEqual(self.subject.fan_mode, \"medium\")\r\n self.dps[FAN_DPS] = \"3\"\r\n self.assertEqual(self.subject.fan_mode, \"high\")\r\n self.dps[FAN_DPS] = \"0\"\r\n self.assertEqual(self.subject.fan_mode, \"auto\")\r\n\r\n def test_fan_modes(self):\r\n self.assertCountEqual(\r\n self.subject.fan_modes,\r\n [\r\n \"auto\",\r\n \"low\",\r\n \"medium\",\r\n \"high\",\r\n ],\r\n )\r\n\r\n async def test_set_fan_mode_to_low(self):\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {FAN_DPS: \"1\"},\r\n ):\r\n await self.subject.async_set_fan_mode(\"low\")\r\n\r\n async def test_set_fan_mode_to_medium(self):\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {FAN_DPS: \"2\"},\r\n ):\r\n await self.subject.async_set_fan_mode(\"medium\")\r\n\r\n async def test_set_fan_mode_to_high(self):\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {FAN_DPS: \"3\"},\r\n ):\r\n await self.subject.async_set_fan_mode(\"high\")\r\n\r\n async def test_set_fan_mode_to_auto(self):\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {FAN_DPS: \"0\"},\r\n ):\r\n await self.subject.async_set_fan_mode(\"auto\")\r\n\r\n def test_swing_mode(self):\r\n self.dps[SWING_DPS] = True\r\n self.assertEqual(self.subject.swing_mode, SWING_VERTICAL)\r\n self.dps[SWING_DPS] = False\r\n self.assertEqual(self.subject.swing_mode, SWING_OFF)\r\n\r\n def test_swing_modes(self):\r\n self.assertCountEqual(\r\n self.subject.swing_modes,\r\n [\r\n SWING_VERTICAL,\r\n SWING_OFF,\r\n ],\r\n )\r\n\r\n async def test_set_swing_mode_to_vertical(self):\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {SWING_DPS: True},\r\n ):\r\n await self.subject.async_set_swing_mode(SWING_VERTICAL)\r\n\r\n async def test_set_swing_mode_to_off(self):\r\n async with assert_device_properties_set(\r\n self.subject._device,\r\n {SWING_DPS: False},\r\n ):\r\n await self.subject.async_set_swing_mode(SWING_OFF)\r\n\r\n def test_extra_state_attributes(self):\r\n self.dps[UNKNOWN4_DPS] = 4\r\n self.dps[UNKNOWN13_DPS] = 13\r\n self.dps[UNKNOWN14_DPS] = 14\r\n self.dps[UNKNOWN15_DPS] = 15\r\n self.dps[UNKNOWN17_DPS] = True\r\n self.dps[UNKNOWN19_DPS] = False\r\n\r\n self.assertDictEqual(\r\n self.subject.extra_state_attributes,\r\n {\r\n \"unknown_4\": 4,\r\n \"unknown_13\": 13,\r\n \"unknown_14\": 14,\r\n \"unknown_15\": 15,\r\n \"unknown_17\": True,\r\n \"unknown_19\": False,\r\n },\r\n )\r\n","repo_name":"make-all/tuya-local","sub_path":"tests/devices/test_eberg_cooly_c35hd.py","file_name":"test_eberg_cooly_c35hd.py","file_ext":"py","file_size_in_byte":8687,"program_lang":"python","lang":"en","doc_type":"code","stars":613,"dataset":"github-code","pt":"61"} +{"seq_id":"39202966695","text":"#!/usr/share/env python3\n\nimport os, sys\n# Get the file current run path\nrunpath = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(runpath,'..'))\napproot = os.path.abspath(os.path.join(runpath, os.pardir))\nimport lib.tcp_server as server\n\ndef sc(clienthandler):\n print(clienthandler.name)\n\ns = server.MultiTCPServer(connections=2)\ns.session_created +=sc\ns.run()\n\nprint(f\"Connections: {s.connected_clinets}\")\n","repo_name":"IsaPeter/PythonProjects","sub_path":"blackclaw/lib/teszt.py","file_name":"teszt.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42714145746","text":"import csv\nimport json\nimport sys\n\n\ndef csv_change(fp_in, delimiter=',', quotechar='\"', remove_empty=False, \n custom_headers=None, **kwargs):\n r = csv.DictReader(fp_in, delimiter=delimiter, quotechar=quotechar, \n fieldnames=custom_headers)\n rows = [row_dct for row_dct in r]\n if remove_empty:\n rows = [dict([(k, item) for k, item in row.items() if item]) for row in rows]\n return rows\n\n\ndef return_json(data, pretty_spaces=4, sort_keys=False, **kwargs):\n json_value= json.dumps(data, indent=pretty_spaces, sort_keys=sort_keys)\n return(json_value)\n\ndef change(csv,**kwargs):\n csv_local= None\n try:\n if csv == '-' or csv is None:\n csv = sys.stdin\n elif isinstance(csv, str):\n csv = csv_local = open(csv, 'r')\n\n data = csv_change(csv, **kwargs)\n return return_json(data)\n finally:\n if csv_local is not None:\n csv_local.close()\n","repo_name":"SJ029626/csv_too_json","sub_path":"csv__too__json/csv__too__json/csv__too__json.py","file_name":"csv__too__json.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8215310075","text":"import os, sys, shutil\nimport h5py\nfrom datetime import datetime\nimport pytz\nfrom tzlocal import get_localzone\nlocal_tz = get_localzone()\n# c51 imports\nsys.path.append(os.path.join(os.path.dirname(__file__)))\nsys.path.append(os.path.join(os.path.dirname(__file__),'area51_files'))\nimport c51_mdwf_hisq as c51\n# Evan's tape hpss module\nimport hpss.hsi as hsi\n\nfrom typing import Union, List, Dict, Optional, TypeVar\n\nfrom nucleon_elastic_ff.data.h5io import get_dsets\nfrom nucleon_elastic_ff.data.scripts.h5migrate import dset_migrate as h5_dset_migrate\nfrom lattedb.project.formfac.models import (\n TSlicedSAveragedFormFactor4DFile,\n DiskTSlicedSAveragedFormFactor4DFile,\n TapeTSlicedSAveragedFormFactor4DFile,\n TSlicedFormFactor4DFile,\n DiskTSlicedFormFactor4DFile,\n FormFactor4DFile,\n DiskFormFactor4DFile,\n TSlicedSAveragedSpectrum4DFile,\n TapeTSlicedSAveragedSpectrum4DFile,\n DiskTSlicedSAveragedSpectrum4DFile,\n TSlicedSpectrum4DFile,\n DiskTSlicedSpectrum4DFile,\n)\nfrom lattedb.project.formfac.models.data.correlator import (\n CorrelatorMeta,\n DiskCorrelatorH5Dset,\n TapeCorrelatorH5Dset\n)\n\ndef corr_disk_tape_update(corr_updates,dt='disk',debug=False):\n dt_push = []\n for ff,dd in corr_updates:\n if dt == 'disk':\n d = ff.disk\n elif dt == 'tape':\n d = ff.tape\n for k,v in dd.items():\n if debug:\n if k == 'exists':\n print('DEBUG:',dt,ff.correlator,ff.source,'BEFORE UPDATE: exists',getattr(d,k),'AFTER UPDATE: exists',v)\n setattr(d,k,v)\n dt_push.append(d)\n #if debug:\n # print('DEBUG:',dt,ff.correlator,ff.source,{k:getattr(d,k) for k,v in dd.items()})\n if dt == 'disk':\n DiskCorrelatorH5Dset.objects.bulk_update(dt_push, fields=list(dd.keys()))\n elif dt == 'tape':\n TapeCorrelatorH5Dset.objects.bulk_update(dt_push, fields=list(dd.keys()))\n else:\n sys.exit('unrecognized dt type',dt)\n\ndef check_tape(t_path,t_file):\n t_dict = dict()\n t_dict['machine'] = c51.machine\n check = os.popen('hsi -P ls -l -D %s' %(t_path+'/'+t_file)).read().split('\\n')\n #On Summit, the first line from hsi returns the directory one is looking at\n # the \"-D\" option in ls gives the full date/time information\n if check[0] == t_path+':':\n t_dict['path'] = t_path\n t_dict['exists'] = True\n t_dict['size'] = int(check[1].split()[4])\n local_time = datetime.strptime(\" \".join(check[1].split()[5:10]),\"%a %b %d %H:%M:%S %Y\")\n if c51.machine == 'summit':\n timezone = pytz.timezone(\"US/Eastern\")\n elif c51.machine == 'lassen':\n timezone = pytz.timezone(\"US/Pacific\")\n else:\n sys.exit('ADD TIME ZONE FOR YOUR MACHINE!')\n t_dict['date_modified'] = timezone.localize(local_time)\n else:\n t_dict['exists'] = False\n return t_dict\n\ndef check_disk(d_path,d_file):\n d_dict = dict()\n d_dict['path'] = d_path\n d_dict['machine'] = c51.machine\n if os.path.exists(d_path+'/'+d_file):\n d_dict['exists'] = True\n d_dict['size'] = os.path.getsize(d_path+'/'+d_file)\n utc = datetime.utcfromtimestamp(os.path.getmtime(d_path+'/'+d_file)).replace(microsecond=0)\n local_time = utc.replace(tzinfo=pytz.utc).astimezone(local_tz)\n d_dict['date_modified'] = local_time\n else:\n d_dict['exists'] = False\n d_dict['size'] = None\n d_dict['date_modified'] = None\n return d_dict\n\ndef sync_h5_data_files(tape_file, data_file, tmp_file, atol=0.0, rtol=1e-10, verbose=False):\n ''' Our data collection utilizes three files\n - tape_file - long term storage of all temporal correlators\n - data_file - volatile disk copy of tape_file\n - tmp_file - temporary copy of data_file\n\n We only want to collect data we have not already collected. We also do not want\n to lose any collected data. To proceed, we first ensure that the contents of the\n data_file matches the tape_file, it if exists. The data_file and tmp_file do not\n have to be synced, as the the collection step will examine both files for content\n prior to collecting.\n '''\n\n t_dict = check_tape('/'.join(tape_file.split('/')[:-1]) , tape_file.split('/')[-1])\n d_dict = check_disk('/'.join(data_file.split('/')[:-1]) , data_file.split('/')[-1])\n\n if t_dict['exists']:\n if d_dict['exists']:\n # do time stamps match and file sizes match\n if (t_dict['date_modified'] != d_dict['date_modified']) or (t_dict['size'] != d_dict['size']):\n print('tape and disk times or sizes do not match - sync disk to tmp, then pull')\n if verbose:\n print('TAPE date',t_dict['date_modified'], 'size',t_dict['size'])\n print('DISK date',d_dict['date_modified'], 'size',d_dict['size'])\n # first copy d_file to tmp_file\n if d_dict['size'] > 0:\n h5_dset_migrate(data_file, tmp_file, atol=atol, rtol=rtol)\n else:\n os.remove(data_file)\n # pull file from tape, overwriting data_file\n hsi.get('/'.join(data_file.split('/')[:-1]), tape_file)\n else:\n hsi.cget('/'.join(data_file.split('/')[:-1]), tape_file)\n\n\ndef corr_compare_tape_disk(h5_file, tape_dir, disk_dir, tmp_disk_dir, d_sets, atol, rtol, tape_push=False):\n ''' We want to compare h5 data files on tape and disk to decide if we should update\n input: \n h5_file name of h5 file\n tape_dir location on tape\n disk_dir location of disk dir where files are pushed/pulled from tape\n tmp_disk_dir location of temporary disk_dir where new data is collected\n d_sets list of data sets to inquire of existence in file\n tape_push push disk file to tape\n\n t_e = exists on tape or not\n d_e = exists on disk or not\n\n t_e d_e date action\n --------------------------------------------------\n n n -- collect = true\n n y -- if data in disk: push to tape, collect = false\n else: push to tape, collect = true\n y n -- pull, if data in file: collect = false\n else: collect = true\n y y t=d if data in disk: collect = true\n else: collect = false\n y y t!=d pull, h5_compare d_sets, equilibrate or raise exception\n '''\n t_dict = check_tape(tape_dir, h5_file)\n d_dict = check_disk(disk_dir, h5_file)\n tmp_d_dict = check_disk(tmp_disk_dir, h5_file)\n d_file = disk_dir +'/'+ h5_file\n tmp_d_file = tmp_disk_dir +'/'+ h5_file\n have_dsets = True\n\n # do we need to synchronize disk and tmp_disk?\n if d_dict['exists'] or tmp_d_dict['exists']:\n print('\\nSYNCING DISK AND TMP DISK FILES') \n sync_disk_files(tmp_disk_dir, h5_file, disk_dir, h5_file, atol, rtol)\n d_dict = check_disk(disk_dir, h5_file)\n if not t_dict['exists'] and tape_push:\n hsi.cput(d_file, tape_dir+'/'+h5_file)\n\n # if tape DOES NOT exist, see if disk files exist\n if not t_dict['exists']:\n if d_dict['exists']:\n query_disk_file = True\n else:\n query_disk_file = False\n have_dsets = False\n # if tape DOES exist, we have to synchronize with disk files or pull\n else:\n query_disk_file = True\n if t_dict['exists']:\n print('\\nSYNCING TAPE AND DISK FILES')\n sync_tape_disk(t_dict, h5_file, d_dict, h5_file, tmp_d_dict, atol, rtol, tape_push)\n else:\n print('\\nPULLING FROM TAPE: %s' %h5_file)\n hsi.cget(disk_dir, tape_dir +'/'+ h5_file)\n\n # check disk_dir/h5_file for data\n if query_disk_file:\n print('\\nCHECKING DISK FILE FOR FF D_SETS: %s' %h5_file)\n with h5py.File(disk_dir+'/'+h5_file,'r') as f5_tmp:\n dsets_tmp = get_dsets(f5_tmp, load_dsets=False)\n for dset in d_sets:\n if dset not in dsets_tmp:\n have_dsets = False\n print(dset)\n\n return have_dsets\n\ndef sync_disk_files(path_src,file_src,path_dest,file_dest, atol, rtol):\n src_dict = check_disk(path_src, file_src)\n dest_dict = check_disk(path_dest, file_dest)\n if src_dict['exists'] and dest_dict['exists']:\n if src_dict['date_modified'] != dest_dict['date_modified']:\n print('DATES DO NOT MATCH - comparing d_sets')\n h5_dset_migrate(path_src+'/'+file_src, path_dest+'/'+file_dest, atol=atol, rtol=rtol)\n if not dest_dict['exists'] and src_dict['exists']:\n # copy2 preserves metadata, such as timestamp\n shutil.copy2(path_src+'/'+file_src, path_dest+'/'+file_dest)\n\ndef sync_tape_disk(tape_dict, t_file_name, disk_dict, d_file_name, tmp_disk_dict, atol, rtol, tape_push):\n d_file = disk_dict['path'] +'/'+ d_file_name\n tmp_d_file = tmp_disk_dict['path'] +'/'+ d_file_name\n t_file = tape_dict['path'] +'/'+ t_file_name\n if disk_dict['date_modified'] != tape_dict['date_modified']:\n print('TAPE AND DISK TIME do not match - pulling from tape to compare')\n # move file to temp location and pull from tape\n shutil.move(d_file, tmp_d_file)\n hsi.cget(disk_dict['path'], t_file)\n # then check if any data in tmp_disk is not in disk\n with h5py.File(d_file,'r') as f5_disk:\n dsets_disk = get_dsets(f5_disk, load_dsets=False)\n with h5py.File(tmp_d_file,'r') as f5_tmp_disk:\n dsets_tmp_disk = get_dsets(f5_tmp_disk, load_dsets=False)\n new_data = [dset for dset in dsets_tmp_disk if dset not in dsets_disk]\n # an empty list is False\n if new_data:\n try:\n h5_dset_migrate(tmp_d_file, d_file, atol=atol, rtol=rtol)\n if tape_push:\n hsi.cput(d_file, t_file)\n except Exception as e:\n print(e)\n else:\n print('TAPE AND DISK TIMES MATCH - no need to pull')\n\ndef collect_ff_4D_tslice_src_avg(params, db_entries):\n f_type = 'formfac_4D_tslice_src_avg'\n disk_dir = params[f_type]\n f_dict = dict()\n f_dict['ensemble'] = params['ENS_S'].split('_')[0]\n f_dict['stream'] = params['STREAM']\n f_dict['configuration'] = int(params['CFG'])\n f_dict['source_set'] = params['SRC_SET']\n f_dict['t_separation'] = params['T_SEP']\n f_name = (c51.names[f_type]+'.h5') % params\n f_dict['name'] = f_name\n # filter db for unique entry\n entry = db_entries.filter(**f_dict).first()\n if not entry:\n print('something went wrong - this entry should have been created already')\n print(f_dict)\n sys.exit('exiting')\n t_exists = entry.tape.exists\n d_exists = entry.disk.exists\n # the disk and tape entries will have been created already, unless something went wrong\n if not t_exists and (params['UPDATE'] or params['TAPE_UPDATE']):\n t_dict = check_tape(entry.tape.path, entry.name)\n if t_dict['exists'] != t_exists:\n for k,v in t_dict.items():\n setattr(entry.tape, k, v)\n entry.tape.save()\n \n if not d_exists and t_exists and params['TAPE_GET']:\n print('retrieving from tape: %s' %entry.name)\n hsi.cget(entry.disk.path, entry.tape.path+'/'+entry.name)\n\n # if data does not exists on disk or tape, try src avg\n if not d_exists and not t_exists:\n d_dict = check_disk(entry.disk.path, entry.name)\n if not d_dict['exists']:\n os.system(c51.python+' %s/avg_4D.py formfac --cfgs %s -t %s --src_set %s %s %s %s' \\\n %(c51.script_dir, params['CFG'], params['T_SEP'], params['si'],params['sf'],params['ds'], params['bad_size']))\n d_dict = check_disk(entry.disk.path, entry.name)\n if d_dict['exists']:\n # save to tape\n hsi.cput(entry.disk.path+'/'+entry.name, entry.tape.path+'/'+entry.name)\n t_dict = check_tape(entry.tape.path, entry.name)\n # update DB\n print('updating lattedb')\n for k,v in d_dict.items():\n setattr(entry.disk, k, v)\n entry.disk.save()\n for k,v in t_dict.items():\n setattr(entry.tape, k, v)\n entry.tape.save()\n\ndef collect_spec_ff_4D_tslice_src_avg(fs_type, params, db_entries):\n if fs_type == 'spec':\n f_type = 'spec_4D_tslice_avg'\n params['SRC'] = 'src_avg'+params['SRC_SET']\n elif fs_type == 'formfac':\n f_type = 'formfac_4D_tslice_src_avg'\n params['SRC'] = 'src_avg'\n disk_dir = params[f_type]\n f_dict = dict()\n f_dict['ensemble'] = params['ENS_S'].split('_')[0]\n f_dict['stream'] = params['STREAM']\n f_dict['configuration'] = int(params['CFG'])\n f_dict['source_set'] = params['SRC_SET']\n if fs_type == 'formfac':\n f_dict['t_separation'] = params['T_SEP']\n f_dict['name'] = (c51.names[f_type]+'.h5') % params\n # filter db for unique entry\n entry = db_entries.filter(**f_dict).first()\n if not entry:\n print('something went wront - - this entry should have been created already')\n print(f_dict)\n sys.exit('exiting')\n t_exists = entry.tape.exists\n d_exists = entry.disk.exists\n # if tape exists but not disk and we want to retrieve\n if not d_exists and t_exists and params['TAPE_GET']:\n print('retrieving from tape: %s' %entry.name)\n hsi.cget(entry.disk.path, entry.tape.path+'/'+entry.name)\n # check tape, try and collect\n if not t_exists:\n # check disk, try and collect if missing\n print(entry.disk.path,entry.name)\n d_dict = check_disk(entry.disk.path, entry.name)\n print(d_dict)\n if not d_dict['exists']:\n if fs_type == 'spec':\n tsep_args = ''\n else:\n tsep_args = '-t '+params['T_SEP']\n os.system(c51.python+' %s/avg_4D.py %s --cfgs %s %s --src_set %s %s %s %s' \\\n %(c51.script_dir, fs_type, params['CFG'], tsep_args, params['si'],params['sf'],params['ds'], params['bad_size']))\n d_dict = check_disk(entry.disk.path, entry.name)\n if d_dict['exists']: \n # save to tape\n hsi.cput(entry.disk.path+'/'+entry.name, entry.tape.path+'/'+entry.name)\n t_dict = check_tape(entry.tape.path, entry.name)\n # update DB\n print('updating lattedb')\n for k,v in d_dict.items():\n setattr(entry.disk, k, v)\n entry.disk.save()\n for k,v in t_dict.items():\n setattr(entry.tape, k, v)\n entry.tape.save()\n if fs_type == 'spec':# we are saving individual spec_4D_tslice files for now as well\n for s0 in params['SOURCES']:\n params['SRC'] = s0\n spec_4D_tslice_file = (c51.names['spec_4D_tslice']+'.h5') % params\n d_path = params['prod']+'/spec_4D_tslice/'+params['CFG']\n t_path = c51.tape+'/'+params['ENS_S']+'/spec_4D_tslice/'+params['CFG']\n hsi.cput(d_path+'/'+spec_4D_tslice_file, t_path+'/'+spec_4D_tslice_file)\n\n'''\n FF 4D get or create db entrie functions\n'''\ndef get_or_create_ff4D_tsliced_savg(\n params: dict,\n configuration_range: List[int],\n ensemble: str,\n stream: str,\n source_set: str,\n ) -> List[TSlicedSAveragedFormFactor4DFile]:\n \"\"\"Returns queryset of TSlicedSAveragedFormFactor4DFile entries for given input\n \n Creates entries in bulk if they do not exist.\n \"\"\"\n f_type = 'formfac_4D_tslice_src_avg'\n params['SRC'] = 'src_avg'\n t_seps = params['t_seps']\n # Pull all relevant meta entries to local python script\n meta_entries = TSlicedSAveragedFormFactor4DFile.objects.filter(\n configuration__in = configuration_range,\n ensemble = ensemble,\n stream = stream,\n source_set = source_set,\n t_separation__in = t_seps,\n )\n\n kwargs = {\n \"ensemble\": ensemble,\n \"stream\": stream,\n \"source_set\": source_set,\n }\n\n # Check if all entries are present\n new_entries = []\n for cfg in configuration_range:\n params['CFG'] = str(cfg)\n for tsep in t_seps:\n params['T_SEP'] = str(tsep)\n meta_data = kwargs.copy()\n f_name = (c51.names[f_type]+'.h5') % params\n #print(f_name)\n #sys.exit()\n meta_data[\"name\"] = f_name\n meta_data[\"configuration\"] = cfg\n meta_data[\"t_separation\"] = tsep\n\n if not meta_entries.filter(**meta_data).first():\n new_entries.append(TSlicedSAveragedFormFactor4DFile(**meta_data))\n\n # Create entries if not present\n if new_entries:\n created_entries = TSlicedSAveragedFormFactor4DFile.objects.bulk_create(new_entries)\n print(f\"Created {len(created_entries)} FF_4D_Tslice_Savg entries\")\n meta_entries = TSlicedSAveragedFormFactor4DFile.objects.filter(\n configuration__in=configuration_range,\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n t_separation__in=t_seps,\n )\n\n # Return all entries\n return meta_entries.prefetch_related('disk','tape')\n\ndef get_or_create_spec4D_tsliced_savg(\n params: dict,\n configuration_range: List[int],\n ensemble: str,\n stream: str,\n source_set: str,\n ) -> List[TSlicedSAveragedSpectrum4DFile]:\n \"\"\"Returns queryset of TSlicedSAveragedSpectrum4DFile entries for given input\n\n Creates entries in bulk if they do not exist.\n \"\"\"\n f_type = 'spec_4D_tslice_avg'\n params['SRC'] = 'src_avg'+params['SRC_SET']\n meta_entries = TSlicedSAveragedSpectrum4DFile.objects.filter(\n configuration__in = configuration_range,\n ensemble = ensemble,\n stream = stream,\n source_set = source_set,\n )\n\n kwargs = {\n \"ensemble\": ensemble,\n \"stream\": stream,\n \"source_set\": source_set,\n }\n\n # Check if all entries are present\n new_entries = []\n for cfg in configuration_range:\n params['CFG'] = str(cfg)\n meta_data = kwargs.copy()\n f_name = (c51.names[f_type]+'.h5') % params\n meta_data[\"name\"] = f_name\n meta_data[\"configuration\"] = cfg\n \n if not meta_entries.filter(**meta_data).first():\n new_entries.append(TSlicedSAveragedSpectrum4DFile(**meta_data))\n\n # Create entries if not present\n if new_entries:\n created_entries = TSlicedSAveragedSpectrum4DFile.objects.bulk_create(new_entries)\n print(f\"Created {len(created_entries)} SPEC_4D_Tslice_Savg entries\")\n meta_entries = TSlicedSAveragedSpectrum4DFile.objects.filter(\n configuration__in=configuration_range,\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n )\n\n # Return all entries\n return meta_entries.prefetch_related('disk','tape')\n\n''' \n 2pt Correlator DB entry creation function\n'''\ndef get_or_create_meta_entries(\n correlator: str,\n configuration_range: List[int],\n ensemble: str,\n stream: str,\n source_set: str,\n sources: Dict[int,List[str]],\n ) -> List[CorrelatorMeta]:\n \"\"\"Returns queryset of CorrelatorMeta entries for given input\n \n Creates entries in bulk if they do not exist.\n \"\"\"\n # Pull all relevant meta entries to local python script\n meta_entries = CorrelatorMeta.objects.filter(\n correlator=correlator,\n configuration__in=configuration_range,\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n )\n\n kwargs = {\n \"correlator\": correlator,\n \"ensemble\": ensemble,\n \"stream\": stream,\n \"source_set\": source_set,\n }\n\n # Check if all entries are present\n entries_to_create = []\n for cfg in configuration_range:\n for src in sources[cfg]:\n meta_data = kwargs.copy()\n meta_data[\"source\"] = src\n meta_data[\"configuration\"] = cfg\n\n if not meta_entries.filter(**meta_data).first():\n entries_to_create.append(CorrelatorMeta(**meta_data))\n\n # Create entries if not present\n if entries_to_create:\n created_entries = CorrelatorMeta.objects.bulk_create(entries_to_create)\n print(f\"Created {len(created_entries)} entries\")\n meta_entries = CorrelatorMeta.objects.filter(\n correlator=correlator,\n configuration__in=configuration_range,\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n )\n\n # Return all entries\n return meta_entries.prefetch_related('disk','tape')\n\n''' Get or create DISK and TAPE functions that work with both 4D and CORR data files\n\n'''\nDiskEntries = TypeVar(\"DiskEntries\")\ndef get_or_create_disk_entries(meta_entries: List, disk_entries: DiskEntries, path: str, machine: str, name: Optional[str] = None,\n )-> List[DiskEntries]:\n \"\"\"Returns queryset of DiskCorrelatorH5Dset entries for given CorrelatorMeta entries\n \n Creates entries in bulk with status does not exist if they do not exist in DB.\n \"\"\"\n if disk_entries == DiskCorrelatorH5Dset:\n file_entries = disk_entries.objects.filter(meta__in=meta_entries)\n elif disk_entries in [DiskTSlicedSAveragedFormFactor4DFile, DiskTSlicedSAveragedSpectrum4DFile]:\n file_entries = disk_entries.objects.filter(file__in=meta_entries)\n else:\n print('Disk Entry Error: we dont know what this is',disk_entries)\n sys.exit() \n\n # Create entries if not present\n kwargs = {\n \"path\" : path,\n \"machine\" : machine,\n \"exists\" : False,\n }\n \n if not file_entries.count() == meta_entries.count():\n entries_to_create = []\n for meta in meta_entries:\n if not hasattr(meta, 'disk'):\n data = kwargs.copy()\n if disk_entries == DiskCorrelatorH5Dset:\n data[\"name\"] = name %{'CFG':meta.configuration}\n data[\"dset\"] = f\"DUMMY_PLACE_HOLDER_H5_PATH\"\n data[\"meta\"] = meta\n else:# disk_entries in [DiskTSlicedSAveragedFormFactor4DFile, DiskTSlicedSAveragedSpectrum4DFile]:\n data[\"path\"] = data[\"path\"] %{'ENS_S':meta.ensemble+'_'+meta.stream, 'CFG':str(meta.configuration)}\n data[\"file\"] = meta\n d_dict = check_disk(data['path'], meta.name)\n for k in d_dict:\n data[k] = d_dict[k]\n entries_to_create.append(disk_entries(**data))\n \n if entries_to_create:\n created_entries = disk_entries.objects.bulk_create(entries_to_create)\n print(f\"Created {len(created_entries)} DISK entries\")\n if disk_entries == DiskCorrelatorH5Dset:\n file_entries = disk_entries.objects.filter(meta__in=meta_entries)\n else:\n file_entries = disk_entries.objects.filter(file__in=meta_entries)\n\n return file_entries\n\nTapeEntries = TypeVar(\"TapeEntries\")\ndef get_or_create_tape_entries(meta_entries: List, tape_entries: TapeEntries, path: str, machine: str, name: Optional[str] = None, dbl_check=False\n ) -> List[TapeEntries]:\n \"\"\"Returns queryset of TapeEntries entries for given CorrelatorMeta entries\n \n Creates entries in bulk with status does not exist if they do not exist in DB.\n \"\"\"\n if tape_entries == TapeCorrelatorH5Dset:\n file_entries = tape_entries.objects.filter(meta__in=meta_entries)\n elif tape_entries in [TapeTSlicedSAveragedFormFactor4DFile, TapeTSlicedSAveragedSpectrum4DFile]:\n file_entries = tape_entries.objects.filter(file__in=meta_entries)\n else:\n print('Tape Entry Error: we dont know what this is',tape_entries)\n sys.exit() \n \n # Create entries if not present\n kwargs = {\n \"path\" : path,\n \"machine\" : machine,\n \"exists\" : False,\n }\n create_entries=False\n if file_entries.count() != meta_entries.count():\n create_entries=True\n elif file_entries.count() == meta_entries.count() and dbl_check:\n create_entries=True\n if create_entries:\n entries_to_create = []\n for meta in meta_entries:\n if not hasattr(meta, 'tape'):\n data = kwargs.copy()\n if tape_entries == TapeCorrelatorH5Dset:\n data[\"name\"] = name %{'CFG':meta.configuration}\n data[\"dset\"] = f\"DUMMY_PLACE_HOLDER_H5_PATH\"\n data[\"meta\"] = meta\n else:# tape_entries in [TapeTSlicedSAveragedFormFactor4DFile, TapeTSlicedSAveragedSpectrum4DFile]:\n data[\"path\"] = data[\"path\"] %{'ENS_S':meta.ensemble+'_'+meta.stream, 'CFG':str(meta.configuration)}\n data[\"file\"] = meta\n t_dict = check_tape(data['path'], meta.name)\n for k in t_dict:\n data[k] = t_dict[k]\n entries_to_create.append(tape_entries(**data))\n \n if entries_to_create:\n created_entries = tape_entries.objects.bulk_create(entries_to_create)\n print(f\"Created {len(created_entries)} TAPE entries\")\n if tape_entries == TapeCorrelatorH5Dset:\n file_entries = tape_entries.objects.filter(meta__in=meta_entries)\n else:\n file_entries = tape_entries.objects.filter(file__in=meta_entries)\n\n return file_entries\n\n\ndef querry_corr_disk_tape(meta_entries,corr,db_filter,dt='tape',debug=False):\n has_dt = True\n db_filter_copy = dict(db_filter)\n db_filter_copy.update({'correlator':corr})\n for entry in meta_entries[corr].filter(**db_filter):\n if dt == 'tape':\n if hasattr(entry, 'tape'):\n if debug:\n print(corr,'tape exists',entry.tape.exists)\n if not entry.tape.exists:\n has_dt = False\n else:\n has_dt = False\n elif dt == 'disk':\n if hasattr(entry, 'disk'):\n if debug:\n print(corr,'disk exists',entry.tape.exists)\n if not entry.disk.exists:\n has_dt = False\n else:\n has_dt = False\n else:\n sys.exit('unrecognized disk/tape options: %s' %dt)\n return has_dt\n\ndef del_corr_entries(\n correlator: str,\n configuration_range: List[int],\n ensemble: str,\n stream: str,\n source_set: str,\n sources: Dict[int,List[str]],\n ) -> List[CorrelatorMeta]:\n \"\"\" Delete entries of type CorrelatorMeta\n \"\"\"\n # Pull all relevant meta entries to local python script\n meta_entries = CorrelatorMeta.objects.filter(\n correlator=correlator,\n configuration__in=configuration_range,\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n )\n for de in meta_entries:\n de.delete()\n\ndef delete_ff4D_avg(\n ensemble : str,\n stream : str,\n source_set : str,\n t_seps : List[int],\n configuration_range : List[int],\n ) -> List[TSlicedSAveragedFormFactor4DFile]:\n \"\"\"Delete entries of TSlicedSAveragedFormFactor4DFile type\n \"\"\"\n meta_entries = CorrelatorMeta.objects.filter(\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n configuration__in=configuration_range,\n t_separation__in=t_seps,\n )\n for de in meta_entries:\n de.delete()\n\ndef delete_spec4D_avg(\n ensemble : str,\n stream : str,\n source_set : str,\n configuration_range : List[int],\n ) -> List[TSlicedSAveragedFormFactor4DFile]:\n \"\"\"Delete entries of TSlicedSAveragedFormFactor4DFile type\n \"\"\"\n meta_entries = CorrelatorMeta.objects.filter(\n ensemble=ensemble,\n stream=stream,\n source_set=source_set,\n configuration__in=configuration_range,\n )\n for de in meta_entries:\n de.delete()\n\n","repo_name":"callat-qcd/nucleon_elastic_FF","sub_path":"scripts/lattedb_ff_disk_tape_functions.py","file_name":"lattedb_ff_disk_tape_functions.py","file_ext":"py","file_size_in_byte":28897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23632342981","text":"#!/usr/bin/env python\r\n#-*- coding: ISO-8859-1 -*-\r\n\r\nimport sys\r\n\r\nfile_in = 'A-small.in'\r\nfile_out = 'A-small.out'\r\n# 'abcdefghijklmnopqrstuvwxyz'\r\nfrom_to = 'yhesocvxduiglbkrztnwjpfmaq'\r\n\r\ndef solve(G):\r\n result = ''\r\n \r\n for l in G:\r\n b = ord(l)\r\n if b == 32:\r\n result += ' '\r\n else:\r\n result += from_to[b - 97]\r\n \r\n return result\r\n\r\nif __name__ == '__main__':\r\n i = open(file_in, 'r')\r\n o = open(file_out, 'w')\r\n T = int(i.readline().strip())\r\n for case in xrange(T):\r\n G = i.readline().strip()\r\n result = solve(G)\r\n o.write('Case #%d: %s\\n' % (case + 1, result))\r\n o.close()\r\n i.close()\r\n sys.exit(0)\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/1992.py","file_name":"1992.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13037866689","text":"import random\nfrom src.routing_algorithms.BASE_routing import BASE_routing\nfrom src.utilities import utilities as util\nfrom src.utilities.policies import *\nimport numpy as np\nimport math\n\nclass QMAR(BASE_routing):\n\n def __init__(self, drone, simulator):\n BASE_routing.__init__(self, drone, simulator)\n self.maxReward = 5\n self.minReward = -5\n self.w = 0.7\n\n\n def feedback(self, outcome, id_j, Q_value_best_action):\n \"\"\"\n Feedback returned when the packet arrives at the depot or\n \"\"\"\n alpha = self.drone.neighbor_table[id_j, 10]\n gamma = self.drone.neighbor_table[id_j, 7]\n Q_value_i_j = self.drone.neighbor_table[id_j, 9]\n\n #gives the max reward when the packet arrives to the depot\n if (outcome == 1):\n self.drone.neighbor_table[id_j, 9] = Q_value_i_j + alpha * (self.maxReward)\n\n #the packet is arrived to the node j, but it isn't the depot\n elif (outcome == 0):\n delay = self.drone.neighbor_table[id_j, 8] + self.drone.neighbor_table[id_j, 11]\n reward = self.computeReward(outcome, delay)\n #Update Q table\n self.drone.neighbor_table[id_j, 9] = Q_value_i_j + alpha * (reward + gamma * Q_value_best_action - Q_value_i_j)\n\n else:\n self.drone.neighbor_table[id_j, 9] = Q_value_i_j + alpha * (self.minReward + gamma * Q_value_best_action - Q_value_i_j)\n\n\n def computeReward(self, outcome, delay):\n return self.w * math.exp(delay) + (1 - self.w) * (self.drone.residual_energy/self.drone.initial_energy)\n\n\n def relay_selection(self, opt_neighbors, data):\n\n packet = data[0]\n candidates = []\n candidates2 = []\n for node_j in self.simulator.drones:\n if (node_j in opt_neighbors):\n j = node_j.identifier\n #first of all we need to compute the requested velocity not to expire the packet\n deadline = 2001 - (self.simulator.cur_step - packet.time_step_creation)\n if (deadline == 0):\n print()\n distance_i = util.euclidean_distance(self.drone.coords, self.simulator.depot_coordinates) #distance from node i to depot\n req_v = distance_i / deadline\n\n #we compute the actual velocity from node i to node j\n actual_v, distance_i_j = self.computeActualVel(j, node_j, distance_i)\n\n if (actual_v >= req_v):\n #node_j is a possible candidate!!! Now we need to compute the weight k\n LQ = self.drone.neighbor_table[j, 12]\n\n #Computing relationship coefficient\n R = self.drone.communication_range\n\n if (distance_i_j > R):\n M = 0\n else:\n M = 1 - (distance_i_j/R)\n\n k = M * LQ\n candidates.append((node_j, k))\n\n elif(actual_v > 0):\n '''\n we append in the secondary array of candidates the neighbors\n whose actual velocities are greater than 0, so the neighbor associated\n with the maximum actual velocity will be selected as the next hop\n '''\n candidates2.append((node_j, actual_v))\n\n if len(candidates) == 0:\n maxx = -3414212\n if (len(candidates2) > 0):\n for i in range(len(candidates2)):\n if (candidates2[i][1] > maxx):\n maxx = candidates2[i][1]\n chosen = candidates2[i][0]\n else:\n #we've encountered the routing hole problem so we give to the previous hop node 𝑖 the minimum reward\n return \"RHP\"\n\n\n else:\n #Choosing the next hop with weighted Q_value max\n maxx = -100000\n chosen = None\n for i in range(len(candidates)):\n candidate = candidates[i][0]\n k = candidates[i][1]\n Q_val = self.drone.neighbor_table[candidate.identifier, 9]\n if (Q_val * k > maxx):\n chosen = candidate\n maxx = Q_val * k\n\n #Select the id of the chosen drone\n if (chosen == None):\n id = self.drone.identifier\n else:\n id = chosen.identifier\n\n return chosen\n\n def computeActualVel(self, j, node_j, distance_i):\n\n #we try to estimate the position of node j at time t3, so when the packet should arrive\n x2 = self.drone.neighbor_table[j, 4]\n y2 = self.drone.neighbor_table[j, 5]\n x1 = self.drone.neighbor_table[j, 0]\n y1 = self.drone.neighbor_table[j, 1]\n\n\n if (x2 - x1 != 0):\n angle_j = math.atan((y2 - y1) / (x2 - x1))\n else:\n #it's possible that the hello packet from node_j isn't arrived yet\n angle_j = math.atan(0)\n\n delay = self.drone.neighbor_table[j, 8] + self.drone.neighbor_table[j, 11]\n if (delay == 0):\n delay = 0.01\n t1 = self.drone.neighbor_table[j, 6] #timestamp of the last update of the node j in the neighbor table of node i (=self.drone)\n t3 = self.simulator.cur_step + delay\n x = x1 + node_j.speed * math.cos(angle_j) * (t3 - t1)\n y = node_j.coords[1] + node_j.speed * math.sin(angle_j) * (t3 - t1)\n distance_j = util.euclidean_distance((x, y), self.simulator.depot_coordinates)\n distance_i_j = util.euclidean_distance(self.drone.coords, (x, y))\n return (distance_i - distance_j) / delay, distance_i_j\n","repo_name":"LeonardoBerti00/QMR-Q-learning-based-Multi-objective-optimization-Routing-protocol","sub_path":"src/routing_algorithms/q_learning_routing.py","file_name":"q_learning_routing.py","file_ext":"py","file_size_in_byte":5696,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"14064306126","text":"#DEADPOOL\r\nfrom turtle import *\r\nimport time\r\ncolor('red')\r\npenup()\r\ngoto(0, -200)\r\npendown()\r\nbegin_fill() \r\ncircle(200) \r\nend_fill()\r\ndef black_circle():\r\n begin_fill()\r\n color('black')\r\n circle(160, -160)\r\n end_fill()\r\n\r\ngoto(0, -160)\r\ncircle(160, -10)\r\nblack_circle()\r\n\r\ncolor('red')\r\n\r\ngoto(25, 160)\r\nrt(20)\r\nblack_circle()\r\ndef eye(a):\r\n begin_fill()\r\n goto(a * 40, 0)\r\n color('white')\r\n pendown()\r\n goto(a * 140, 45)\r\n goto(a * 120, 10)\r\n goto(a * 40, 0)\r\n end_fill()\r\n penup()\r\n\r\neye(1) \r\neye(-1) \r\n\r\nhideturtle()\r\ntime.sleep(8)\r\n\r\n\r\n\r\n","repo_name":"Uday-23/GRAPHICS-","sub_path":"DEADPOOL.py","file_name":"DEADPOOL.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18899045754","text":"from evennia import DefaultScript\nfrom .objects import DBVERSION, SPACEDB, SpaceInstance, NAME, VERSION, HSOT\nfrom world.utils import cemit\nfrom .objects import HSObject\nimport pickle\nimport os.path\n\n# Statee data\nSTATE = None\n\n\nclass SpaceScript(DefaultScript):\n\n def add_object(self, sobj):\n # If the object is not scheduled for deletion...\n if sobj in self.deletions:\n return\n self.insertions.append(sobj)\n cemit(\"space\", f\"{sobj.name} scheduled for insertion.\")\n\n def del_object(self, sobj):\n if sobj in self.insertions:\n self.insertions.remove(sobj)\n cemit(\"space\", f\"Deleted object {sobj.name} removed from insertions list\")\n else:\n self.deletions.append(sobj) \n cemit(\"space\", f\"{sobj.name} scheduled for deletion.\")\n\n \"\"\" Timer and data store \"\"\"\n def at_script_creation(self) -> None:\n \"\"\"Called once, when script is first created\"\"\"\n self.key = \"space_script\"\n self.desc = \"Space script\"\n self.interval = 2 # 2 sec tick\n self.has_state = False\n\n def load_state(self):\n global STATE\n\n with open(SPACEDB, \"rb\") as f:\n STATE = pickle.load(f)\n self.has_state = True\n\n def save_state(self):\n with open(SPACEDB, \"wb\") as f:\n pickle.dump(STATE, f)\n\n def get_status(self, player):\n tmp = f'{NAME} version {VERSION} Stats:'\n player.msg(tmp)\n player.msg(\"-\" * len(tmp))\n player.msg(f'STATE: {STATE}')\n player.msg(f' # Ships: {len(STATE.ship_list)}')\n player.msg(f'# Objects: {len(STATE.object_list)}')\n\n def at_server_reload(self):\n cemit(\"space\", \"State saved.\")\n self.save_state()\n\n\n def at_server_start(self):\n global STATE\n \n self.deletions = set()\n self.insertions = set()\n cemit(\"space\", f\"Initializing {NAME} {VERSION}...\")\n self.has_state = False\n if os.path.isfile(SPACEDB):\n self.load_state()\n if not self.has_state:\n cemit(\"space\", \"|RFailed to initialize state handler.|n\")\n self.active = False\n return\n else:\n cemit(\"space\", \"State handler initialized.\")\n else:\n STATE = SpaceInstance()\n self.has_state = True\n self.save_state()\n\n self.active = False\n if self.has_state:\n cemit(\"space\", \"Initialization was |GSUCCESSFUL|n.\")\n cemit(\"space\", f\"Cycling is {'|GENABLED|n' if STATE.active else '|RDISABLED|n'}.\")\n else:\n cemit(\"space\", \"Initialization was not successful.\")\n return\n\n cemit(\"space\", \"Loading objects...\")\n count = 12\n cemit(\"space\", f\"Done loading {count} object(s).\")\n if not STATE.active:\n cemit(\"space\", \"Use \\'|Csdb/start|n\\' to enable cycling.\")\n\n def at_repeat(self):\n for x in self.insertions:\n try:\n if x not in STATE.ship_list:\n STATE.ship_list.append(x)\n self.insertions.remove(x)\n x.on_add()\n except Exception as e:\n cemit(\"space\", f\"Error in at_repeat(): {e}.\")\n\n #cemit(\"space\", \"PING\")\n if STATE.active or True:\n for x in STATE.ship_list or []:\n try:\n x.cycle()\n #cemit(\"space\", f\"Ship: {x.name}\")\n except Exception as e:\n cemit(\"space\", f\"Error in at_repeat(): {e}.\")\n continue\n\n def on_add(self, sobj: HSObject) -> None:\n cemit(\"space\", f\"{sobj} was added to the active list.\")\n sobj.activate()\n\n\n def on_del(self, sobj: HSObject) -> None:\n cemit(\"space\", f\"{sobj} was removed from the active list.\")\n sobj.deactivate()\n\n\n\ndef get_state():\n return STATE\n\n","repo_name":"Darren791/newgame","sub_path":"space/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22959439353","text":"__author__ = 'Volodymyr Varchuk'\n\nfrom PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\nimport math\n\nimport cv2\nimport argparse\n\n\ndef is_point_inside_circle(center, point, radius):\n x0, y0 = center\n x, y = point\n if (x - x0)*(x - x0) + (y - y0)*(y - y0) <= radius * radius:\n return True\n else:\n False\n\n\ndef is_point_acceptable(point, img_grayscale, treshold, radius):\n max_width, max_height = img_grayscale.shape\n max_i, max_j, max_val = point\n wrong_point = False\n sq_radius = int(round(radius, 0))\n sq_radius2 = sq_radius * 5\n for i in range (max(0,max_i - sq_radius), min(max_width, max_i + sq_radius)):\n for j in range (max(0, max_j - sq_radius), min(max_height, max_j + sq_radius)):\n if is_point_inside_circle((max_i, max_j), (i, j), radius):\n if img_grayscale[i,j] < int(max_val - max_val * treshold):\n wrong_point = True\n if wrong_point:\n return False\n else:\n wrong_point2 = False\n for i in range (max(0, max_i - sq_radius2), min(max_width, max_i + sq_radius2)):\n for j in range (max(0, max_j - sq_radius2), min(max_height, max_j + sq_radius2)):\n if is_point_inside_circle((max_i, max_j), (i, j), sq_radius2):\n if img_grayscale[i,j] < int(max_val - max_val * treshold):\n return True\n if not wrong_point2:\n return False\n\n\ndef get_max_point(img_grayscale, point_radius):\n max_i = 100\n max_j = 100\n max_val = 220\n indent = 150\n im_h, im_w = img_grayscale.shape\n max_points = []\n for i in range(indent, im_h-indent):\n for j in range(indent, im_w-indent):\n #print (img_grayscale[i,j], max_val)\n if img_grayscale[i,j] >= max_val:\n #print (\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n if is_point_acceptable ((i, j, img_grayscale[i,j]), img_grayscale, 0.1, point_radius):\n max_val = img_grayscale[i,j]\n max_i = i\n max_j = j\n\n return (max_i, max_j, max_val)\n\n\ndef open_img(image_path):\n gray_img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE) # grayscale\n return gray_img\n\n\ndef round_points(center, radius):\n X0, Y0, max_value = center\n coordinates = []\n cirlce_points = []\n cp_step = 360.0/1200.0\n for i in range(1200):\n cp_val = i*cp_step\n cirlce_points.append(cp_val)\n for i in cirlce_points:\n Y1 = math.cos(math.radians(i)) * radius + Y0\n X1 = math.sin(math.radians(i)) * radius + X0\n point = (int(Y1), int(X1))\n if not point in coordinates:\n coordinates.append(point)\n\n return coordinates\n\n\ndef find_chains(whites_point, image_gary, iterations, draw, step_circle):\n success_count = 0\n x0, y0, max_val = whites_point\n check_reverse = 0\n direction = 0 #0 - East(45-134), 1 - North(135-224), 2 - West(225-314), 3 - South(315-44)\n points = []\n angle = 0\n step_angle = 190/40\n for i in range(iterations):\n # print (i)\n # start from 0 degree to 360\n if check_reverse == 0:\n range_cust = xrange(0, 180, (190 / 40))\n else:\n range_cust = xrange(180, 360, (190 / 40))\n print (range_cust)\n find_in_circle = 0\n for deg in range_cust:\n x = int(math.cos(math.radians(deg)) * step_circle + x0)\n y = int(math.sin(math.radians(deg)) * step_circle + y0)\n point_to_check = (x,y)\n if draw is not None:\n draw.point((y,x), 128)\n points.append(point_to_check)\n if is_point_acceptable((x,y,max_val),image_gary,0.1,0.5):\n x0, y0 = point_to_check\n success_count = success_count + 1\n find_in_circle = 1\n # print (x0,y0)\n # angle = deg\n break\n if find_in_circle == 0:\n if check_reverse == 1:\n return False\n else:\n x0, y0, max_val = whites_point\n check_reverse = 1\n # angle = 180\n # print ('reverse')\n print (success_count, iterations)\n if success_count == iterations:\n return True\n else:\n return False\n\n#img_055_19541.jpg\n#img_105_67039.jpg\n\n#-f /media/sf_share_linux/video/2016-05-18/SJCM0005/af_jpg/img_017_08231.jpg\n\n#-f /media/sf_share_linux/video/2016-05-18/SJCM0004/af_jpg/img_080_45696.jpg\n#-f /media/sf_share_linux/video/2016-05-18/SJCM0004/af_jpg/img_105_67039.jpg\n#-f /media/sf_share_linux/video/img_055_19541.jpg\n# -f /tmp/SJCM0003/jpg/image007822.jpg\n\n\n\n# ap = argparse.ArgumentParser()\n# ap.add_argument(\"-f\", \"--file\", required=True, help=\"Path to the image file\")\n# args = vars(ap.parse_args())\n#\n# print('read_image')\n# gray_img = open_img(args['file'])\n# max_point = (0,0,0)\n# print('getting max point')\n# max_point = get_max_point(gray_img, 3)\n# print (max_point)\n#\n#\n# rrr = round_points(max_point, 4)\n# im = Image.open(args['file'])\n# draw = ImageDraw.Draw(im)\n#\n# for it in rrr:\n# draw.point(it, 128)\n#\n#\n# found = find_chains(max_point,gray_img,20,draw,10)\n# print (found)\n# # for it in points:\n# # draw.point(it, 128)\n# del draw\n#\n# # find_white_points_chain(max_point, 5, 5, gray_img, 255, draw)\n# im.save(\"output.png\")\n# im.show()\n\n#print (rrr)","repo_name":"VarchukVladimir/strange_repo","sub_path":"strikes_finder.py","file_name":"strikes_finder.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23568945061","text":"# A certain bathroom has N + 2 stalls in a single row; the stalls on the left and right ends are permanently occupied by the bathroom guards. The other N stalls are for users.\r\n#\r\n# Whenever someone enters the bathroom, they try to choose a stall that is as far from other people as possible.\r\n# To avoid confusion, they follow deterministic rules: For each empty stall S, they compute two values LS and RS,\r\n# each of which is the number of empty stalls between S and the closest occupied stall to the left or right, respectively.\r\n# Then they consider the set of stalls with the farthest closest neighbor, that is, those S for which min(LS, RS) is maximal.\r\n# If there is only one such stall, they choose it; otherwise, they choose the one among those where max(LS, RS) is maximal.\r\n# If there are still multiple tied stalls, they choose the leftmost stall among those.\r\n#\r\n# K people are about to enter the bathroom; each one will choose their stall before the next arrives. Nobody will ever leave.\r\n#\r\n# When the last person chooses their stall S, what will the values of max(LS, RS) and min(LS, RS) be?\r\n\r\n# input: T [test cases] followed by rows of \"N K\"\r\n# we return the max(LS, RS) and min(LS, RS) values for the last person we enter\r\n\r\n# brute force it?\r\n\r\nfrom collections import deque\r\nDEBUG = False\r\nfor T in range(1, int(input()) + 1):\r\n\r\n x = 0\r\n y = 0\r\n\r\n n, k = [int(x) for x in input().split()]\r\n config = deque('o')\r\n config.extend([ '.' for __ in range(n)])\r\n config.append('o')\r\n # n = 5 let's say and k=2\r\n # config = o . . . . . o\r\n mid = 0 # EDIT: don't initializelen(config) / 2 # rounds down so will get leftmost\r\n mids = [] # EDIT: don't initialize#[mid]\r\n for person in range(k):\r\n # find the greatest number of periods in our list?\r\n # brute force: simply do a linear search xD\r\n # correct: do a binary search, go through mids and divide it up\r\n # for now, linear search\r\n\r\n #\r\n #\r\n largestPeriods = 0\r\n startIndex = 0\r\n currentPeriods = 0\r\n currentStart = 0\r\n for i in range(len(config)):\r\n if config[i] == '.':\r\n currentPeriods += 1\r\n if currentPeriods > largestPeriods:\r\n startIndex = currentStart\r\n largestPeriods = currentPeriods\r\n else:\r\n currentStart = i+1\r\n currentPeriods = 0\r\n # ok, we got our startIndex and the number of periods in a row\r\n # this one will be the leftmost of those\r\n if largestPeriods % 2 == 0:\r\n mid = startIndex + int(largestPeriods / 2) - 1\r\n else:\r\n mid = startIndex + int(largestPeriods / 2)\r\n # mid = startIndex + int(largestPeriods / 2)\r\n mids.append(mid)\r\n config[mid] = 'o'\r\n if DEBUG:\r\n print(list(config))\r\n print(mid)\r\n if person == k - 1:\r\n # we need to calculate x & y\r\n # make list of .'s with length largestPeriods\r\n mList = ['.' for x in range(largestPeriods)]\r\n # middle of it is mid - startIndex\r\n middleOfmList = mid-startIndex\r\n if DEBUG:\r\n mList[middleOfmList] = 'o'\r\n print(mList)\r\n # number on left\r\n leftSide = len(mList[:middleOfmList])\r\n # number on right\r\n rightSide = len(mList[middleOfmList + 1:])\r\n x = max(leftSide, rightSide)\r\n y = min(leftSide, rightSide)\r\n\r\n if DEBUG:\r\n print (\"Right side: \" + str(rightSide))\r\n print(\"Case #\" + str(T) + \": \" + str(x) + \" \" + str(y))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/1974.py","file_name":"1974.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74715805955","text":"\"\"\"empty message\n\nRevision ID: 6850c5c8e7fb\nRevises: 1d5351822d50\nCreate Date: 2019-01-08 14:37:31.247705\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6850c5c8e7fb'\ndown_revision = '1d5351822d50'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n # op.add_column('admin', sa.Column('team', sa.String(length=100), nullable=True))\n op.add_column('messages', sa.Column('emp_type', sa.String(length=10), nullable=True))\n op.add_column('messages', sa.Column('location', sa.String(length=50), nullable=True))\n op.drop_column('messages', 'frequency')\n op.drop_column('messages', 'number_of_sends')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('messages', sa.Column('number_of_sends', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('messages', sa.Column('frequency', sa.VARCHAR(length=5), autoincrement=False, nullable=True))\n op.drop_column('messages', 'location')\n op.drop_column('messages', 'emp_type')\n op.drop_column('admin', 'team')\n # ### end Alembic commands ###\n","repo_name":"mozilla-it/newbie-bot","sub_path":"newbie/bot/migrations/versions/6850c5c8e7fb_.py","file_name":"6850c5c8e7fb_.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"34803966787","text":"#https://atcoder.jp/contests/abc081/tasks/abc081_b\n\nN=int(input())\nA_list = list(map(int, input().split()))\ncount = 0\nflag = True\n\nwhile flag:\n for i in range(N):\n if A_list[i] % 2 == 0: ##偶数\n A_list[i] = A_list[i]//2\n else: ##奇数\n flag = False\n break\n else:\n count += 1\n\nprint(count)\n","repo_name":"anderson-0000/sandbox","sub_path":"atcoder/B - Shift only.py","file_name":"B - Shift only.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40638288952","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass AdaMoCo(nn.Module):\n def __init__(self, src_model, momentum_model, features_length, num_classes, dataset_length, temporal_length):\n super(AdaMoCo, self).__init__()\n\n self.m = 0.999\n\n self.first_update = True\n\n self.src_model = src_model\n self.momentum_model = momentum_model\n\n self.momentum_model.requires_grad_(False)\n\n self.queue_ptr = 0\n self.mem_ptr = 0\n\n self.T_moco = 0.07\n\n #queue length\n self.K = min(16384, dataset_length)\n self.memory_length = temporal_length\n\n self.register_buffer(\"features\", torch.randn(features_length, self.K))\n self.register_buffer(\n \"labels\", torch.randint(0, num_classes, (self.K,))\n )\n self.register_buffer(\n \"idxs\", torch.randint(0, dataset_length, (self.K,))\n )\n self.register_buffer(\n \"mem_labels\", torch.randint(0, num_classes, (dataset_length, self.memory_length))\n )\n\n self.register_buffer(\n \"real_labels\", torch.randint(0, num_classes, (dataset_length,))\n )\n\n self.features = F.normalize(self.features, dim=0)\n\n self.features = self.features.cuda()\n self.labels = self.labels.cuda()\n self.mem_labels = self.mem_labels.cuda()\n self.real_labels = self.real_labels.cuda()\n self.idxs = self.idxs.cuda()\n\n @torch.no_grad()\n def _momentum_update_key_encoder(self):\n # encoder_q -> encoder_k\n for param_q, param_k in zip(\n self.src_model.parameters(), self.momentum_model.parameters()\n ):\n param_k.data = param_k.data * self.m + param_q.data * (1.0 - self.m)\n\n @torch.no_grad()\n def update_memory(self, epoch, idxs, keys, pseudo_labels, real_label):\n start = self.queue_ptr\n end = start + len(keys)\n idxs_replace = torch.arange(start, end).cuda() % self.K\n self.features[:, idxs_replace] = keys.T\n self.labels[idxs_replace] = pseudo_labels\n self.idxs[idxs_replace] = idxs\n self.real_labels[idxs_replace] = real_label\n self.queue_ptr = end % self.K\n\n self.mem_labels[idxs, self.mem_ptr] = pseudo_labels\n self.mem_ptr = epoch % self.memory_length\n\n @torch.no_grad()\n def get_memory(self):\n return self.features, self.labels\n\n def forward(self, im_q, im_k=None, cls_only=False):\n # compute query features\n feats_q, logits_q = self.src_model(im_q)\n\n if cls_only:\n return feats_q, logits_q\n\n q = F.normalize(feats_q, dim=1)\n\n # compute key features\n with torch.no_grad(): # no gradient to keys\n self._momentum_update_key_encoder() # update the key encoder\n\n k, _ = self.momentum_model(im_k)\n k = F.normalize(k, dim=1)\n\n # compute logits\n # Einstein sum is more intuitive\n # positive logits: Nx1\n l_pos = torch.einsum(\"nc,nc->n\", [q, k]).unsqueeze(-1)\n # negative logits: NxK\n l_neg = torch.einsum(\"nc,ck->nk\", [q, self.features.clone().detach()])\n\n # logits: Nx(1+K)\n logits_ins = torch.cat([l_pos, l_neg], dim=1)\n\n # apply temperature\n logits_ins /= self.T_moco\n\n # dequeue and enqueue will happen outside\n return feats_q, logits_q, logits_ins, k\n","repo_name":"MattiaLitrico/Guiding-Pseudo-labels-with-Uncertainty-Estimation-for-Source-free-Unsupervised-Domain-Adaptation","sub_path":"moco.py","file_name":"moco.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"61"} +{"seq_id":"17706399289","text":"from models import empty\n\nfrom flask import abort\n\nimport requests, os\n\n##s\n\ndef get_steam_user(steamid):\n resp = requests.get('http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key={0}&steamids={1}'.format(os.environ['steam_api_key'], steamid))\n\n if resp.status_code == 200:\n print('Succesful request')\n\n steam_user_obj = empty()\n\n player_summary = resp.json()['response']['players'][0]\n\n steam_user_obj.image = player_summary['avatarfull']\n steam_user_obj.name = player_summary['personaname']\n\n return steam_user_obj\n \n abort(401)\n \n \n\n ","repo_name":"lnfernal/vgo_gambling_website","sub_path":"utilities/steam_user.py","file_name":"steam_user.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30783484711","text":"from typing import Optional, Union, Tuple, List\nimport sys\nfrom sys import setrecursionlimit\nimport threading\nfrom io import IOBase, BytesIO\nfrom os import read, write, fstat\n#################################################################################\n\"\"\"Обертка для быстрого ввода/вывода\"\"\"\n\nBUFSIZE = 8192\n\n\nclass FastIO(IOBase):\n newlines = 0\n\n def __init__(self, file):\n self._fd = file.fileno()\n self.buffer = BytesIO()\n self.writable = \"x\" in file.mode or \"r\" not in file.mode\n self.write = self.buffer.write if self.writable else None\n\n def read(self):\n while True:\n b = read(self._fd, max(fstat(self._fd).st_size, BUFSIZE))\n if not b:\n break\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines = 0\n return self.buffer.read()\n\n def readline(self, size: int = ...):\n while self.newlines == 0:\n b = read(self._fd, max(fstat(self._fd).st_size, BUFSIZE))\n self.newlines = b.count(b\"\\n\") + (not b)\n ptr = self.buffer.tell()\n self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)\n self.newlines -= 1\n return self.buffer.readline()\n\n def flush(self):\n if self.writable:\n write(self._fd, self.buffer.getvalue())\n self.buffer.truncate(0), self.buffer.seek(0)\n\n\nclass IOWrapper(IOBase):\n def __init__(self, file):\n self.buffer = FastIO(file)\n self.flush = self.buffer.flush\n self.writable = self.buffer.writable\n self.write = lambda s: self.buffer.write(s.encode(\"ascii\"))\n self.read = lambda: self.buffer.read().decode(\"ascii\")\n self.readline = lambda: self.buffer.readline().decode(\"ascii\")\n\n\nstdin, stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)\n\n\ndef split_input():\n return stdin.readline().split()\n\n\ndef _input():\n return stdin.readline()\n\n\ndef fast_print(*args, sep=\" \", end=\"\\n\"):\n for a in args:\n stdout.write(f\"{a}{sep}\")\n stdout.write(f\"{end}\")\n#################################################################################\n\n\nclass Graph:\n \"\"\"Граф\"\"\"\n def __init__(self, number_of_vertices: int) -> None:\n if number_of_vertices <= 0:\n raise ValueError(\"number of vertices should be greater than 0\")\n self.edges = [[] for _ in range(number_of_vertices)]\n self.number_of_vertices = number_of_vertices\n\n def add_edge(self, a: int, b: int) -> None:\n \"\"\"Добавление ребра\"\"\"\n a -= 1\n b -= 1\n if max(a, b) > self.number_of_vertices:\n raise ValueError(\"a and b should be less than or equal number of vertices\")\n self.edges[a].append(b)\n\n def __max_depth_helper(self, vertex: int, depth: int = 1, depths: Optional[List] = None) -> int:\n \"\"\"Поиск максимальной глубины\"\"\"\n if depths is None:\n depths = []\n if not self.edges[vertex]:\n depths.append(depth)\n for u in self.edges[vertex]:\n self.__max_depth_helper(u, depth + 1, depths)\n return max(depths)\n\n def max_depth(self, vertex: int) -> int:\n \"\"\"Поиск максимальной глубины\"\"\"\n return self.__max_depth_helper(vertex)\n\n\ndef main() -> None:\n \"\"\"Чтение, обработка, вывод\"\"\"\n number_of_reposts = int(_input())\n edges = []\n encoded_names = {}\n counter = 0\n for _ in range(number_of_reposts):\n name1, _, name2 = _input().lower().split()\n if name1 not in encoded_names:\n counter += 1\n encoded_names[name1] = counter\n if name2 not in encoded_names:\n counter += 1\n encoded_names[name2] = counter\n edges.append((encoded_names[name2], encoded_names[name1]))\n graph = Graph(counter)\n for edge in edges:\n graph.add_edge(*edge)\n res = graph.max_depth(1)\n stdout.write(f\"{res}\")\n\n\nif __name__ == \"__main__\":\n setrecursionlimit(10 ** 9)\n threading.stack_size(2 ** 26) # лучше использовать именно эту константу\n thread = threading.Thread(target=main)\n thread.start()\n","repo_name":"ZingyKizz/MADE","sub_path":"algo/10/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16191579648","text":"import json\nimport os\nimport sys\nfrom typing import Any, List, Union, cast\n\nimport click\nimport kachery_p2p as kp\n\n\n@click.group(help=\"Kachery peer-to-peer command-line client\")\ndef cli():\n pass\n\n@click.command(help=\"Show the channels that this node belongs to.\")\ndef get_channels():\n joined_channels = kp.get_channels()\n for a in joined_channels:\n url = a['channelConfigUrl']\n optstrings = []\n if a.get('isMessageProxy', False):\n optstrings.append('(message-proxy)')\n if a.get('isDataProxy', False):\n optstrings.append('(data-proxy)')\n if a.get('isPublic', False):\n optstrings.append('(public)')\n print(f'{url} {\" \".join(optstrings)}')\n\ndef _get_joined_channels_config() -> dict:\n c = kp.get('_joined_channels_config')\n if c is None:\n c = {\n 'joinedChannels': []\n }\n return cast(dict, c)\n\n# def _get_joined_channels_config_from_old_method() -> dict:\n# try:\n# f = kp.load_feed('_kachery_p2p_config', create=False)\n# except:\n# return {\n# 'joinedChannels': []\n# }\n# sf = f.get_subfeed('joined-channels')\n# num_messages = sf.get_num_local_messages()\n# if (num_messages > 0):\n# sf.set_position(num_messages - 1)\n# joined_channels_config = cast(dict, sf.get_next_message(wait_msec=100))\n# else:\n# joined_channels_config = {\n# 'joinedChannels': []\n# }\n# return joined_channels_config\n\ndef _set_joined_channels_config(joined_channels_config: dict):\n kp.set('_joined_channels_config', joined_channels_config)\n\n@click.command(help=\"Join a kachery-p2p channel\")\n@click.argument('channel_config_url')\n@click.option('--message-proxy', is_flag=True, help='Serve as a message proxy in this channel')\n@click.option('--data-proxy', is_flag=True, help='Serve as a data proxy in this channel')\n@click.option('--public', is_flag=True, help='Serve as a public node in this channel')\ndef join_channel(channel_config_url: str, message_proxy: bool, data_proxy: bool, public: bool):\n # handle this URL\n # https://gist.githubusercontent.com/magland/542b2ef7c268eb99d87d7b965567ece0/raw/0c1a9671b37ca117f7c0d4f2e6057a9a8eeb75b2/ccm-test-channel.yaml\n if channel_config_url.startswith('https://gist.githubusercontent.com/'):\n a = channel_config_url.split('/')\n if a[5] == 'raw':\n if len(a) == 8:\n print('WARNING: adjusting the URL of this gist in order to point to the latest snapshot')\n channel_config_url = '/'.join(a[:6]) + '/' + a[7]\n\n joined_channels_config = _get_joined_channels_config()\n joined_channels = joined_channels_config['joinedChannels']\n new_joined_channel_config = {\n 'channelConfigUrl': channel_config_url,\n 'isMessageProxy': message_proxy,\n 'isDataProxy': data_proxy,\n 'isPublic': public\n }\n new_joined_channels = []\n updated_existing = False\n for a in joined_channels:\n if a['channelConfigUrl'] == channel_config_url:\n updated_existing = True\n new_joined_channels.append(new_joined_channel_config)\n else:\n new_joined_channels.append(a)\n if not updated_existing:\n new_joined_channels.append(new_joined_channel_config)\n joined_channels_config['joinedChannels'] = new_joined_channels\n _set_joined_channels_config(joined_channels_config)\n if updated_existing:\n print('Updated existing channel configuration')\n else:\n print('Joined channel')\n\n@click.command(help=\"Leave a kachery-p2p channel\")\n@click.argument('channel_config_url')\ndef leave_channel(channel_config_url):\n joined_channels_config = _get_joined_channels_config()\n joined_channels = joined_channels_config['joinedChannels']\n if channel_config_url not in [c['channelConfigUrl'] for c in joined_channels]:\n print(f'Channel not found in joined channels: {channel_config_url}')\n else:\n joined_channels_config['joinedChannels'] = [c for c in joined_channels if c['channelConfigUrl'] != channel_config_url]\n _set_joined_channels_config(joined_channels_config)\n print(f'Left channel: {channel_config_url}')\n\n@click.command(help=\"Find a file.\")\n@click.argument('uri')\ndef find_file(uri):\n infos = kp.find_file(uri)\n for info in infos:\n print(json.dumps(info, indent=4))\n\n@click.command(help=\"Download a file.\")\n@click.argument('uri')\n@click.option('--dest', default=None, help='Optional local path of destination file.')\n@click.option('--from-node', default=None, help='Optionally specify the ID of the node to download from')\n@click.option('--from-channel', default=None, help='Optionally specify the name of the channel to download from')\n@click.option('--exp-nop2p', is_flag=True, help='Disable p2p')\n@click.option('--exp-file-server-url', multiple=True, help='Optional URLs of static file servers')\ndef load_file(uri, dest, from_node, from_channel, exp_nop2p, exp_file_server_url):\n kp._experimental_config(nop2p=exp_nop2p, file_server_urls=list(exp_file_server_url))\n x = kp.load_file(uri, dest=dest, from_node=from_node, from_channel=from_channel)\n print(x)\n\n@click.command(help=\"Store a file locally.\")\n@click.argument('path')\ndef store_file(path: str):\n x = kp.store_file(path)\n print(x)\n\n@click.command(help=\"Store a link to file locally.\")\n@click.argument('path')\ndef link_file(path: str):\n x = kp.link_file(path)\n print(x)\n\n@click.command(help=\"Download a file and write the content to stdout.\")\n@click.argument('uri')\n@click.option('--start', help='The start byte (optional)', default=None)\n@click.option('--end', help='The end byte non-inclusive (optional)', default=None)\n@click.option('--exp-nop2p', is_flag=True, help='Disable p2p')\n@click.option('--exp-file-server-url', multiple=True, help='Optional URLs of static file servers')\ndef cat_file(uri, start, end, exp_nop2p, exp_file_server_url):\n old_stdout = sys.stdout\n sys.stdout = sys.stderr\n\n kp._experimental_config(nop2p=exp_nop2p, file_server_urls=list(exp_file_server_url))\n\n if start is None and end is None:\n path1 = kp.load_file(uri)\n if not path1:\n raise Exception('Error loading file for cat.')\n sys.stdout = old_stdout\n with open(path1, 'rb') as f:\n while True:\n data = os.read(f.fileno(), 4096)\n if len(data) == 0:\n break\n os.write(sys.stdout.fileno(), data)\n else:\n assert start is not None and end is not None\n start = int(start)\n end = int(end)\n assert start <= end\n if start == end:\n return\n sys.stdout = old_stdout\n kp.load_bytes(uri=uri, start=start, end=end, write_to_stdout=True)\n\n@click.command(help=\"Print messages in a subfeed.\")\n@click.argument('uri')\ndef print_messages(uri):\n sf = kp.load_subfeed(uri)\n sf.print_messages()\n\n@click.command(help=\"Start the daemon.\")\n@click.option('--label', required=True, help='Label for this node')\n@click.option('--isbootstrap', is_flag=True, help='This is a bootstrap node')\n@click.option('--noudp', is_flag=True, help='Do not use a udp socket')\n@click.option('--nomulticast', is_flag=True, help='Do not use multicast udp')\n@click.option('--verbose', default=0, help='Verbosity level')\n@click.option('--method', default='npx', help='Method for starting daemon: npx (default) or dev')\n@click.option('--host', default='', help='IP for connecting to this daemon')\n@click.option('--public-url', default='', help='Base URL for public http access')\n@click.option('--port', default=0, help='Public http port to listen on')\n@click.option('--udp-port', default=None, help='Override the udp listen port (by default equals the http port)')\n@click.option('--websocket-port', default=0, help='Port for websocket server')\n@click.option('--static-config', default='', help='A URL or path to a configuration file for static configuration')\n@click.option('--node-arg', multiple=True, help='Additional arguments to send to node')\n@click.option('--install-only', is_flag=True, help='Only install the npm package (do not install)')\n@click.option('--auth-group', default='', help='The os group that has access to this daemon')\ndef start_daemon(label: str, method: str, verbose: int, host: str, public_url: str, port: int, udp_port: Union[int, None], websocket_port: int, isbootstrap: bool, noudp: bool, nomulticast: bool, static_config: str, node_arg: List[str], install_only: bool, auth_group: str):\n kp.start_daemon(\n label=label,\n method=method,\n verbose=verbose,\n host=host,\n public_url=public_url,\n port=port,\n udp_port=udp_port,\n websocket_port=websocket_port,\n isbootstrap=isbootstrap,\n noudp=noudp,\n nomulticast=nomulticast,\n static_config=static_config,\n node_arg=node_arg,\n install_only=install_only,\n auth_group=auth_group\n )\n\n@click.command(help=\"Stop the daemon.\")\ndef stop_daemon():\n kp.stop_daemon()\n\n@click.command(help=\"Print information about this node.\")\ndef node_info():\n node_id = kp.get_node_id()\n print(f'Node ID: {node_id}')\n\n@click.command(help=\"Display kachery_p2p version and exit.\")\ndef version():\n click.echo(f\"This is kachery_p2p version {kp.__version__} [protocol version {kp.__protocol_version__}].\")\n exit()\n\ncli.add_command(cat_file)\ncli.add_command(find_file)\ncli.add_command(get_channels)\ncli.add_command(join_channel)\ncli.add_command(leave_channel)\ncli.add_command(load_file)\ncli.add_command(store_file)\ncli.add_command(link_file)\ncli.add_command(node_info)\ncli.add_command(print_messages)\ncli.add_command(start_daemon)\ncli.add_command(stop_daemon)\ncli.add_command(version)","repo_name":"flatironinstitute/kachery-p2p","sub_path":"kachery_p2p/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":9785,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"27507761422","text":"import boto3\n\ndef build_response(message):\n return {\n \"dialogAction\":{\n \"type\":\"Close\",\n \"fulfillmentState\":\"Fulfilled\",\n \"message\":{\n \"contentType\":\"PlainText\",\n \"content\":message\n }\n }\n }\n\n# DynamoDB table has two fields - question, answer\ndef lambda_handler(event, context):\n client = boto3.client('dynamodb')\n \n question = event['currentIntent']['slots']['slotOne']\n question = question.lower()\n\n response = client.get_item(TableName='bookname', Key={'book':{'S':str(question)}})\n \n #print(context)\n message =\"sorry book is not available \"\n #message = \"sorry book is not available \"\n \n if 'Item' in response.keys():\n message = \"Great !! book is available with us \"\n else:\n message=\"sorry book is not available \"\n \n return build_response(message)\n","repo_name":"mekarahul1/ml-advance","sub_path":"q3_chatbot/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73840374915","text":"\nimport time\nfrom selenium import webdriver\n\nbrowser = webdriver.Chrome()\n\nurl = \"https://www.google.com\"\nbrowser.get(url)\n\ntime.sleep(2)\nname = 'q'\nsearch_el = browser.find_element(\"name\",\"q\")\n#print(search_el)\nsearch_el.send_keys(\"selenium python\")\n\nsubmit_btn_ele = browser.find_element_by_css_selector(\"input[type='submit']\")\nprint(submit_btn_ele.get_attribute('name'))\ntime.sleep(2)\nsubmit_btn_ele.click()","repo_name":"fhraju/30_Days_Of_Python","sub_path":"Day_16/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39655812052","text":"import random\nfrom time import sleep\ns = 0\ntitulo = 'JOGO DE PAR OU IMPAR'\nwhile True:\n print(titulo.center(64))\n print('-' * 64)\n print('Máquina: par ou impar? ')\n x = str(input('Você: '))\n if x.upper() == 'PAR':\n print('Máquina: Impar')\n print('-' * 64)\n elif x.upper() == 'IMPAR':\n print('Máquina: Par')\n print('-' * 64)\n else:\n print('Máquina: Não entendi')\n break\n y = int(input('Digite 1 ou 2: '))\n if y != 1 and y != 2:\n print('Máquina: Não sabe jogar par ou impar? Só vale 1 e 2!')\n break\n z = int(random.randrange(1,3))\n print(f'Máquina: Coloco {z}')\n sleep(1)\n if x.upper() == 'PAR' and (z + y) % 2 == 1:\n print('Máquina: Ganhei, kkkkkk')\n break\n \n elif x.upper() == 'IMPAR' and (z + y) % 2 == 0:\n print(f'Máquina: Ganhei, kkkkkk')\n break\n else:\n s += 1\n print(f'Máquina: Parabéns, tu ganhou! Essa é tua {s}° vitória consecutiva!')\n print('-' * 64)","repo_name":"edufsi/CursoPython","sub_path":"Exercícios_Básicos_CursoEmVideo/Par_ou_ímpar.py","file_name":"Par_ou_ímpar.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23542537331","text":"from pdb import set_trace\n\ndef flip(row, k):\n res, i = 0, 0\n while i < len(row):\n if row[i] == '+':\n i += 1\n continue\n\n if i + k > len(row): return 'IMPOSSIBLE'\n row[i:i + k] = ''.join(map(lambda c: '-' if c == '+' else '+', row[i:i + k]))\n res += 1\n\n return res\n\ndef smart_flip(n, k, npancakes):\n r, nshift = 0, 0\n\n while nshift < npancakes:\n if n & 1:\n n >>= 1\n nshift += 1\n continue\n\n n ^= (1 << k) - 1\n r += 1\n\n if n > 0: return 'IMPOSSIBLE'\n return r\n\ndef as_number(string):\n return int(''.join(map(lambda c: '1' if c == '+' else '0', string)), 2)\n\nif __name__ == '__main__' :\n import sys\n T = int(sys.stdin.readline())\n for i in range(1, T + 1):\n row, k = sys.stdin.readline().split(' ')\n npancakes = len(row)\n k = int(k)\n print('Case #{}: {}'.format(i, smart_flip(as_number(row), k, npancakes)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/2135.py","file_name":"2135.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2001041542","text":"'''\ndel command:\nThis command is used to remove any specific element in the list or to remove entire list object\npermanently. \n'''\nlst=[10,20,'Python',30,True,'Narayana',10,3+4j]\ndel lst[2] \nprint(lst)\n\nlst=[10,20,'Python',30,True,'Narayana',10,3+4j]\ndel lst \nprint(lst)\n\n","repo_name":"Debasis72/Python-Pratice-file","sub_path":"List function/del command.py","file_name":"del command.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21055099651","text":"from flask import Blueprint, redirect, url_for, render_template\nfrom flask_login import current_user\nfrom sqlalchemy import and_\n\nfrom bluelog import User, db\nfrom bluelog.models import Post, Comment, UnreadMessage\n\nadmin_bp = Blueprint('admin', __name__)\n\n\n@admin_bp.route('/test')\ndef login():\n return \"this is admin's test\"\n\n\n@admin_bp.route('/index')\ndef index():\n if not current_user.is_authenticated:\n return redirect(url_for('auth.login'))\n else:\n recentPosts = Post.query.filter(Post.user_id == current_user.id).order_by(Post.postTime.desc()).limit(5)\n recentComments = Comment.query.filter(Comment.user_id == current_user.id) \\\n .order_by(Comment.timestamp.desc()).limit(5)\n return render_template('admin/index.html', posts=recentPosts, comments=recentComments)\n\n\n@admin_bp.route('/posts')\ndef managePosts():\n if not current_user.is_authenticated:\n return redirect(url_for('blog.index'))\n else:\n allPosts = Post.query.filter(Post.user_id == current_user.id).order_by(Post.postTime.desc()).all()\n return render_template('admin/managePosts.html', posts=allPosts)\n\n\n@admin_bp.route('/comments')\ndef manageComments():\n if not current_user.is_authenticated:\n return redirect(url_for('blog.index'))\n else:\n allComments = Comment.query.filter(Comment.user_id == current_user.id).order_by(Comment.timestamp.desc()).all()\n return render_template('admin/manageComments.html', comments=allComments)\n\n\n@admin_bp.route('/deletePost/')\ndef deletePost(post_id):\n post = Post.query.get(post_id)\n if post:\n posterId = post.user_id\n if current_user.is_authenticated and current_user.id == posterId:\n for comment in post.comments:\n unreadMessages = UnreadMessage.query.filter(UnreadMessage.comment_id == comment.id).all()\n for unreadMessage in unreadMessages:\n db.session.delete(unreadMessage)\n db.session.delete(comment)\n db.session.delete(post)\n db.session.commit()\n return redirect(url_for('admin.managePosts'))\n else:\n return redirect(url_for('blog.index'))\n else:\n return redirect(url_for('blog.index'))\n\n\n@admin_bp.route('/deleteComment/')\ndef deleteComment(comment_id):\n comment = Comment.query.get(comment_id)\n if comment:\n commenterId = comment.user_id\n if current_user.is_authenticated and current_user.id == commenterId:\n unreadMessages = UnreadMessage.query.filter(UnreadMessage.comment_id == comment.id).all()\n for unreadMessage in unreadMessages:\n db.session.delete(unreadMessage)\n db.session.delete(comment)\n db.session.commit()\n return redirect(url_for('admin.manageComments'))\n return redirect(url_for('blog.index'))\n\n\n@admin_bp.route('/unreadMessages')\ndef manageUnreadMessages():\n if not current_user.is_authenticated:\n return redirect(url_for('blog.index'))\n else:\n unreadMessages = UnreadMessage.query.filter(and_(UnreadMessage.user_id == current_user.id,\n UnreadMessage.haveRead == 0)).all()\n readMessages = UnreadMessage.query.filter(and_(UnreadMessage.user_id == current_user.id,\n UnreadMessage.haveRead == 1)).all()\n unreadMessages = Comment.query.filter(Comment.id.in_([unreadMessage.comment_id for unreadMessage in unreadMessages])).order_by(Comment.timestamp.desc()).all()\n readMessages = Comment.query.filter(Comment.id.in_([readMessage.comment_id for readMessage in readMessages])).order_by(Comment.timestamp.desc()).all()\n return render_template('admin/manageUnreadMessage.html', comments=unreadMessages, readComments=readMessages)\n\n\n@admin_bp.route('/readAll')\ndef readAll():\n if not current_user.is_authenticated:\n return redirect(url_for('blog.index'))\n else:\n unreadMessages = UnreadMessage.query.filter(and_(UnreadMessage.user_id == current_user.id,\n UnreadMessage.haveRead == 0)).all()\n for unreadMessage in unreadMessages:\n unreadMessage.haveRead = True\n db.session.commit()\n return redirect(url_for('admin.manageUnreadMessages'))\n\n\n","repo_name":"summerTony9/Bluelog","sub_path":"bluelog/blueprints/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20308624655","text":"def readfiles():\n with open(\"dane5-1.txt\",\"r\")as dane1:\n x1=[int(x.strip()) for x in dane1]\n with open(\"dane5-2.txt\",\"r\")as dane2:\n x2=[int(x.strip()) for x in dane2]\n with open(\"dane5-3.txt\",\"r\")as dane3:\n x3=[int(x.strip()) for x in dane3]\n return(x1,x2,x3)\ndata1,data2,data3=readfiles()\ndef getmaxsum(data):\n maxnum=0\n maxresult=0\n for num in data:\n maxnum+=num\n if maxnum>maxresult:\n maxresult=maxnum\n if maxnum<0:\n maxnum=0\n return maxresult\ndef zadB(d1,d2,d3):\n print(f\"dane1 {getmaxsum(d1)}\")\n print(f\"dane2 {getmaxsum(d2)}\")\n print(f\"dane3 {getmaxsum(d3)}\")\n return 0\nzadB(data1,data2,data3)","repo_name":"aloix123/Korepetycje","sub_path":"matura2005maj/zad5b.py","file_name":"zad5b.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18098889115","text":"from sqlalchemy.engine import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import date\n\n\nfrom models import Address_book, Address, Birthday, Phone, Email, Record\n\n\nengine = create_engine(\"sqlite:///helper.db\")\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nbd1 = Birthday(bd_date=date(day=1, month=12, year=1998))\nbd2 = Birthday(bd_date=date(day=14, month=8, year=2000))\n\nad1 = Address(address=\"Peremohy str\")\nad2 = Address(address=\"Bohunska str\")\nad3 = Address(address=\"Theatralna str\")\n\nemail1 = Email(email=\"u1@gmail.com\")\nemail2 = Email(email=\"u2@gmail.com\")\nemail3 = Email(email=\"u3@gmail.com\")\n\nphone1 = Phone(number=\"0948684832\")\nphone2 = Phone(number=\"04950684958\")\nphone3 = Phone(number=\"9847564234\")\n\nuser1 = Record(name=\"Daria\", birthday=bd1)\nuser2 = Record(name=\"Claire\", birthday=bd2)\n\nuser1.phones = [phone1, phone2]\nuser2.phones = [phone3]\n\nuser1.emails = [email1]\nuser2.emails = [email2, email3]\n\nuser1.addresses = [ad1, ad2]\nuser2.addresses = [ad3]\n\nbook = Address_book()\n\nbook.records = [user1, user2]\n\n\nsession.add(book)\nsession.commit()\nsession.close()","repo_name":"DariaKutkanych/go-it-web","sub_path":"lesson9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21320559610","text":"# Imports\nfrom pathlib import Path\nimport blobconverter\nimport numpy as np\nimport math\nimport cv2\nimport depthai as dai\nimport re\nimport time\n\n# Set BlobConverter Information\nopenvinoVersion = \"2021.4\"\np = dai.Pipeline()\np.setOpenVINOVersion(version=dai.OpenVINO.Version.VERSION_2021_4)\n\n# Download Model for use\nsize = (300,300)\n# size = (544,320)\n# nnPath = blobconverter.from_zoo(\"person-detection-retail-0013\", shaves = 6)\nnnPath = blobconverter.from_zoo(\"face-detection-retail-0004\", shaves=6)\n# Labels\nlabelMap = [\"background\", \"person\"]\n\n# Set Resolution of BW Cameras\nbw_resolution = dai.MonoCameraProperties.SensorResolution.THE_720_P\n\n# Set Resolution of Color Camera\ncolor_resolution = dai.ColorCameraProperties.SensorResolution.THE_1080_P\n\n# Create RGB Camera Node\nrgb_cam = p.create(dai.node.ColorCamera)\nrgb_cam.setPreviewSize(size[0], size[1])\nrgb_cam.setBoardSocket(dai.CameraBoardSocket.RGB)\nrgb_cam.setResolution(color_resolution)\nrgb_cam.setInterleaved(False)\nrgb_cam.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)\nrgb_cam.setPreviewKeepAspectRatio(False)\nrgb_cam.setFps(15)\n# Create L-MONO Node\nl_cam = p.create(dai.node.MonoCamera)\nl_cam.setBoardSocket(dai.CameraBoardSocket.LEFT)\nl_cam.setResolution(bw_resolution)\nl_cam.setFps(15)\n# Create R-MONO Node\nr_cam = p.create(dai.node.MonoCamera)\nr_cam.setBoardSocket(dai.CameraBoardSocket.RIGHT)\nr_cam.setResolution(bw_resolution)\nr_cam.setFps(15)\n# Create Depth Node\nstereo = p.create(dai.node.StereoDepth)\nstereo.setLeftRightCheck(False)\nstereo.setExtendedDisparity(False)\nstereo.setSubpixel(False)\nstereo.initialConfig.setConfidenceThreshold(255)\n# Link R-Mono to Stereo\nr_cam.out.link(stereo.right)\n\n# Link L-Mono to Stero\nl_cam.out.link(stereo.left)\n\n# Create NN Node\nnn = p.create(dai.node.MobileNetSpatialDetectionNetwork)\nnn.setBlobPath(str(Path(nnPath).resolve().absolute()))\n# ignore detections below 50%\nnn.setConfidenceThreshold(0.5)\nnn.input.setBlocking(True)\n\n# Link RGB_Cam to NN\nrgb_cam.preview.link(nn.input)\n\n# Link Stereo to NN\nstereo.depth.link(nn.inputDepth)\n\n# Create Blurring Node\nblur = p.create(dai.node.NeuralNetwork)\nblur.setBlobPath(str(Path(__file__).parent/'out'/'model.blob'))\nnn.passthrough.link(blur.input)\n\n# Create RGB Out Node\nxout_rgb = p.create(dai.node.XLinkOut)\nxout_rgb.setStreamName(\"rgb\")\n\n# Link NN.RGB Passthrough -> XLinkOut\nnn.passthrough.link(xout_rgb.input)\n# rgb_cam.video.link(xout_rgb.input)\n# blur.out.link(xout_rgb.input)\n\n# Create Depth-Output Node\nxout_depth = p.create(dai.node.XLinkOut)\nxout_depth.setStreamName(\"depth\")\n\n# Link NN.passthroughDepth -> XLinkOut\n# stereo.depth.link(xout_depth.input)\nnn.passthroughDepth.link(xout_depth.input)\n\n# Create NN-Out Node\nxout_nn = p.create(dai.node.XLinkOut)\nxout_nn.setStreamName(\"nn\")\n\n# Link NN.Out -> NNout (Information about detection)\nnn.out.link(xout_nn.input)\n\n# # Create BoundingBox Output Node\nxout_bb = p.create(dai.node.XLinkOut)\nxout_bb.setStreamName(\"bb\")\n\n# Create Left Camera Stream\nxout_left = p.create(dai.node.XLinkOut)\nxout_left.setStreamName(\"left\")\nl_cam.out.link(xout_left.input)\n# Create Right Camera Stream\nxout_right = p.create(dai.node.XLinkOut)\nxout_right.setStreamName(\"right\")\nr_cam.out.link(xout_right.input)\n# # Link NN.boundingBoxMapping -> bb_out\nnn.boundingBoxMapping.link(xout_bb.input)\n\ncv2.namedWindow(\"Depth\")\ncv2.namedWindow(\"Image\")\ncv2.namedWindow(\"Right\")\ncv2.namedWindow(\"Left\")\ncv2.moveWindow(\"Depth\", 0,0)\ncv2.moveWindow(\"Image\", 0, 513)\ncv2.moveWindow(\"Left\", 800, 0)\ncv2.moveWindow(\"Right\", 800, 513)\n# Connect to Device and Start Pipeline\nwhile True:\n try:\n with dai.Device(p) as dev:\n rgbQueue = dev.getOutputQueue(name = \"rgb\", maxSize = 4, blocking = False)\n leftQueue = dev.getOutputQueue(name = \"left\", maxSize = 4, blocking = False)\n rightQueue = dev.getOutputQueue(name = \"right\", maxSize = 4, blocking = False)\n depthQueue = dev.getOutputQueue(name=\"depth\", maxSize= 4, blocking=False)\n nnQueue = dev.getOutputQueue(name=\"nn\", maxSize=4, blocking=False)\n bbQueue = dev.getOutputQueue(name=\"bb\", maxSize=4, blocking=False)\n\n startTime = time.monotonic()\n counter = 0\n fps = 0\n\n while time.monotonic()-startTime <60*5:\n rgb_in = rgbQueue.get()\n left_in = leftQueue.get()\n right_in = rightQueue.get()\n depth_in = depthQueue.get()\n nn_out = nnQueue.get()\n\n counter += 1\n # current_time = time.monotonic()\n # if (current_time - startTime) > 1:\n # fps = counter/(current_time - startTime)\n # counter = 0\n # startTime = current_time\n # rgbFrame = rgb_in.getFirstLayerFp16()\n # rgbFrame = np.array(rgbFrame, dtype=np.uint8)\n # shape = (300, 300, 3)\n # rgbFrame = rgbFrame.reshape(shape)\n rgbFrame = rgb_in.getCvFrame()\n rgbFrame = cv2.resize(rgbFrame, (int(1600*0.45), int(900*0.45)))\n\n leftFrame = left_in.getCvFrame()\n leftFrame = cv2.resize(leftFrame, (int(1600*0.45), int(900*0.45)))\n\n rightFrame = right_in.getCvFrame()\n rightFrame = cv2.resize(rightFrame, (int(1600*0.45), int(900*0.45)))\n\n depthFrame = depth_in.getFrame()\n depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)\n depthFrameColor = cv2.equalizeHist(depthFrameColor)\n depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_TURBO)\n depthFrameColor = cv2.resize(depthFrameColor, (int(1600*0.45), int(900*0.45)))\n\n detections = nn_out.detections\n if len(detections) != 0:\n bbMapping = bbQueue.get()\n roiDatas = bbMapping.getConfigData()\n\n for roiData in roiDatas:\n roi = roiData.roi\n roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0])\n topLeft = roi.topLeft()\n bottomRight = roi.bottomRight()\n xmid = int((int(topLeft.x)-int(bottomRight.x))/2)\n xmin = int(topLeft.x)+xmid\n ymin = int(topLeft.y)\n xmax = int(bottomRight.x)+xmid\n ymax = int(bottomRight.y)\n\n cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), 255, 10)\n rgb_height = rgbFrame.shape[0]\n rgb_width = rgbFrame.shape[1]\n\n for detection in detections:\n rgb_x1 = int(detection.xmin*rgb_width)\n rgb_x2 = int(detection.xmax*rgb_width)\n rgb_y1 = int(detection.ymin*rgb_height)\n rgb_y2 = int(detection.ymax*rgb_height)\n\n cv2.rectangle(rgbFrame, (rgb_x1, rgb_y1), (rgb_x2, rgb_y2), 255, 10)\n cv2.putText(rgbFrame, f\"Z: {int(detection.spatialCoordinates.z)/1000} m\",\n (rgb_x1+int((rgb_x2-rgb_x1)/4), rgb_y1+int((rgb_y2-rgb_y1)/2)), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))\n\n # cv2.putText(rgbFrame, \"NN fps: {:.2f}\".format(fps), (2, rgbFrame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, (255, 255, 255))\n cv2.putText(rgbFrame, \"COLOR IMAGE\", (int(rgbFrame.shape[0]/2)+50, 50), cv2.FONT_HERSHEY_TRIPLEX, 1.0, (255, 255, 255), 2)\n cv2.putText(depthFrameColor, \"DEPTH IMAGE\", (int(\n depthFrameColor.shape[0]/2)+50, 50), cv2.FONT_HERSHEY_TRIPLEX, 1.0, (0, 0, 0), 2)\n\n cv2.imshow(\"Depth\", depthFrameColor)\n cv2.imshow(\"Image\", rgbFrame)\n cv2.imshow(\"Left\", leftFrame)\n cv2.imshow(\"Right\", rightFrame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n except:\n None\n","repo_name":"cfarm6/crs_demo","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":8075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36976187388","text":"# to run this: python3 https://demoqa.com/buttons\n# playwright show-trace logs/trace.zip\nimport asyncio\nfrom playwright.async_api import async_playwright, expect\n\n\nasync def main():\n async with async_playwright() as p:\n browser = await p.chromium.launch(headless=False)\n context = await browser.new_context()\n await context.tracing.start(screenshots=True, snapshots=True, sources=True)\n page = await context.new_page()\n\n await page.set_viewport_size({\"width\": 1800, \"height\": 1200})\n await page.goto(\"https://demoqa.com/checkbox\")\n # -Actions\n await page.check('[for=\"tree-node-home\"]')\n await page.screenshot(path=\"screenshots/checkboxes.png\")\n # -Assertions\n await page.is_checked('[for=\"tree-node-home\"]') is True\n await expect(page.locator(\"#result\")).to_have_text(\"You have selected :homedesktopnotescommandsdocumentsworkspacereactangularveuofficepublicprivateclassifiedgeneraldownloadswordFileexcelFile\")\n # -Stopping Tracing\n await context.tracing.stop(path=\"logs/trace.zip\")\n # -Closing browser\n await browser.close()\n\nasyncio.run(main())\n","repo_name":"iAntonova/pythonProjectTry1","sub_path":"inputs/checkboxes.py","file_name":"checkboxes.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71353882754","text":"#!/usr/bin/env python\n#! coding:utf-8\nimport numpy as np\nimport rospy\nimport tf\nfrom geometry_msgs.msg import Quaternion\nfrom std_msgs.msg import Int32MultiArray\nfrom visualization_msgs.msg import Marker, MarkerArray\n\nfrom amsl_navigation_msgs.msg import NodeEdgeMap\n\nclass GlobalPathViz:\n def __init__(self):\n rospy.init_node('global_path_viz')\n\n #Publisher\n self.global_path_marker_pub = rospy.Publisher('/global_path/path/viz', MarkerArray, queue_size=1, latch=True)\n\n #Subscriber\n self.node_edge_map_sub = rospy.Subscriber('/node_edge_map/map', NodeEdgeMap, self.node_edge_map_callback)\n self.global_path_sub = rospy.Subscriber('/global_path/path', Int32MultiArray, self.global_path_callback)\n\n self.global_path_marker = MarkerArray()\n self.global_path = Int32MultiArray()\n self.node_edge_map = NodeEdgeMap()\n self.sub_map = False\n\n def node_edge_map_callback(self, msg):\n self.node_edge_map = msg\n self.sub_map = True\n\n def global_path_callback(self, msg):\n self.global_path = msg\n\n def process(self):\n rate = rospy.Rate(10)\n\n while not rospy.is_shutdown():\n if self.sub_map:\n self.make_global_path_marker()\n self.global_path_marker_pub.publish(self.global_path_marker)\n\n rate.sleep()\n\n def make_global_path_marker(self):\n global_path_marker = MarkerArray()\n time = rospy.get_rostime()\n for i in range(len(self.global_path.data)-1):\n current_node_id = self.global_path.data[i]\n next_node_id = self.global_path.data[i+1]\n x = (self.node_edge_map.nodes[current_node_id].point.x + self.node_edge_map.nodes[next_node_id].point.x ) / 2.0\n y = (self.node_edge_map.nodes[current_node_id].point.y + self.node_edge_map.nodes[next_node_id].point.y ) / 2.0\n yaw = np.arctan2(self.node_edge_map.nodes[next_node_id].point.y - self.node_edge_map.nodes[current_node_id].point.y, self.node_edge_map.nodes[next_node_id].point.x - self.node_edge_map.nodes[current_node_id].point.x)\n x += np.cos(np.pi*0.5-yaw)\n y -= np.sin(np.pi*0.5-yaw)\n length = 0.0\n for j in range(len(self.node_edge_map.edges)):\n if self.node_edge_map.edges[j].node0_id == current_node_id:\n if self.node_edge_map.edges[j].node1_id == next_node_id:\n length = self.node_edge_map.edges[j].distance*0.3\n\n n = Marker()\n n.ns = \"global_path\"\n n.header.frame_id = self.node_edge_map.header.frame_id\n n.header.stamp = time\n n.id = i\n n.action = Marker().ADD\n n.type = Marker().ARROW\n n.lifetime = rospy.Duration()\n self.set_marker_scale(n, length, 0.5, 0.1)\n self.set_marker_rgb(n, 1., 1., 1.)\n self.set_marker_position(n, x, y, 1.0)\n self.set_marker_orientation(n, 0, 0, yaw)\n global_path_marker.markers.append(n)\n self.global_path_marker = global_path_marker\n\n def set_marker_scale(self, marker, x, y, z):\n marker.scale.x = x\n marker.scale.y = y\n marker.scale.z = z\n\n def set_marker_position(self, marker, x, y, z):\n marker.pose.position.x = x\n marker.pose.position.y = y\n marker.pose.position.z = z\n\n def set_marker_rgb(self, marker, r, g, b, a=0.7):\n marker.color.r = r\n marker.color.g = g\n marker.color.b = b\n marker.color.a = a\n\n def set_marker_orientation(self, marker, r, p, y):\n q = tf.transformations.quaternion_from_euler(r, p, y)\n marker.pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])\n\nif __name__=='__main__':\n global_path_viz = GlobalPathViz()\n global_path_viz.process()\n","repo_name":"amslabtech/dijkstra_global_planner","sub_path":"scripts/global_path_viz.py","file_name":"global_path_viz.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"73917628353","text":"import logging\nimport os\nfrom abc import ABC, abstractmethod\n\nfrom google.protobuf import json_format\nfrom ymir.protos import mir_controller_service_pb2 as mirsvrpb\nfrom ymir.protos import mir_common_pb2 as mir_common\n\nfrom controller.utils import code, checker\n\n\nclass BaseMirControllerInvoker(ABC):\n \"\"\"\n base class for all mir controller invokers \\n\n\n Attributes:\n sandbox_root: root of sandbox dir of all users\n user_name/user_path: user unique id and corresponding root path\n repo_name/repo_path: repo unique id and corresponding root path\n task_id: unique task id, check ymir_proto.util for more infos.\n _request: grpc request\n \"\"\"\n def __init__(self,\n sandbox_root: str,\n request: mirsvrpb.GeneralReq,\n assets_config: dict,\n async_mode: bool = False) -> None:\n super().__init__()\n\n # check sandbox_root\n if not os.path.isdir(sandbox_root):\n raise RuntimeError(f\"sandbox root {sandbox_root} not found, abort\")\n self._sandbox_root = sandbox_root\n\n ret = checker.check_request(request=request, prerequisites=[checker.Prerequisites.CHECK_TASK_ID])\n if (ret.code != code.ResCode.CTR_OK):\n raise RuntimeError(f\"task_id {request.task_id} error, abort\")\n self._task_id = request.task_id\n\n # check user_id\n user_id = request.user_id\n if user_id:\n self._user_id = user_id\n self._user_root = os.path.join(sandbox_root, user_id)\n\n # check repo_id\n repo_id = request.repo_id\n if user_id and repo_id:\n self._repo_id = repo_id\n self._repo_root = os.path.join(self._user_root, repo_id)\n\n self._request = request\n self._assets_config = assets_config\n self._async_mode = async_mode\n self._work_dir = self.prepare_work_dir()\n\n # functions about invoke and pre_invoke\n def server_invoke(self) -> mirsvrpb.GeneralResp:\n logging.info(str(self))\n\n response = self.pre_invoke()\n if response.code != code.ResCode.CTR_OK:\n return response\n\n return self.invoke()\n\n @abstractmethod\n def pre_invoke(self) -> mirsvrpb.GeneralResp:\n pass\n\n @abstractmethod\n def invoke(self) -> mirsvrpb.GeneralResp:\n pass\n\n def prepare_work_dir(self) -> str:\n # Prepare working dir.\n if self._request.req_type == mirsvrpb.TASK_CREATE:\n type_dir = mir_common.TaskType.Name(self._request.req_create_task.task_type)\n else:\n type_dir = mirsvrpb.RequestType.Name(self._request.req_type)\n\n work_dir = os.path.join(self._sandbox_root, \"work_dir\", type_dir, self._request.task_id)\n os.makedirs(os.path.join(work_dir, \"out\"), exist_ok=True)\n\n return work_dir\n\n def __repr__(self) -> str:\n \"\"\"show infos about this invoker and the request\"\"\"\n req_info = json_format.MessageToDict(self._request,\n preserving_proto_field_name=True,\n use_integers_for_enums=True)\n\n return f\"{self._repr()} \\n request: \\n {req_info}\"\n\n @abstractmethod\n def _repr(self) -> str:\n raise NotImplementedError()\n","repo_name":"IJtLJZ8Rm4Yr/ymir-backend","sub_path":"src/pymir-controller/controller/invoker/invoker_cmd_base.py","file_name":"invoker_cmd_base.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24489317745","text":"#code to reverse any string\ndef reverseString(str):\n counter = len(str) -1\n rev=\"\"\n while counter >= 0:\n rev = rev +str[counter]\n counter = counter - 1\n #this is the code that evealuates the string that was inputed\n\n return rev\nname = input(\"enter your name: \")\nprint(f'This is your name spelled backwards: {reverseString(name)}')\nif name==reverseString(name):\n print('Your name is a palindrome')\nelse:#this code evaluates if the string inputed was a palindrome or not\n print('Nice Name')\n\n\n","repo_name":"shyamsiryes/pythonClass","sub_path":"reverseString.py","file_name":"reverseString.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38334094008","text":"# coding=utf-8\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport os\nimport random\nimport pandas as pd\nfrom PIL import Image\n# from sklearn.model_selection import train_test_split\n\ndef convert_label(data):\n columns=['species','individual_id']\n for f in columns:\n data[f]=data[f].map(dict(zip(data[f].unique(),range(0,data[f].nunique()))))\n return data\n\ndef file_split(data_path, csv_name, train_classes, species, special_id,percentage):\n '''\n special_id:新类个数\n percentage:训练集百分比\n '''\n data = pd.read_csv(os.path.join(data_path,csv_name))\n data['species']=data['species'].map(dict(zip(data['species'].unique(),range(0,data['species'].nunique()))))\n data_copy=data.drop_duplicates(subset=['individual_id'] , keep='first', inplace=False)\n test_id=[]\n # 训练集类数+新类数\n numbers=random.sample(range(train_classes+special_id),special_id)\n for i in range(special_id):\n item=str(data_copy.iloc[numbers[i],2])\n test_id.append(item)\n #print(test_id)\n special=data[data['individual_id'].isin(test_id)]\n other=data[~data['individual_id'].isin(test_id)]\n other = convert_label(other)\n # 训练集spieces数 暂不用\n special.reset_index(drop=True, inplace=True)\n special['species'] =pd.Series((species)*np.ones(special.shape[0]))\n # 训练集类数\n special['individual_id'] =pd.Series((train_classes)*np.ones(special.shape[0]))\n\n other = other.sample(frac=1.0) # 全部打乱\n cut_idx = int(round(percentage* other.shape[0]))\n other_all, special_all = other.iloc[:cut_idx], other.iloc[cut_idx:]\n # for i in special_all.itertuples():\n # if i[3] ==358:\n # print(i)\n # print('------')\n # for i in other_all.itertuples():\n # if i[3] ==358:\n # print(i)\n # special_all=special_all.append(special)\n \n return other_all,special_all\n\n#print(data)\nclass WhaleDataset(Dataset):\n '''\n 数据集,存在3个特征\n image,piece, label\n '''\n def __init__(self, data, data_path, transform= None):\n self.data_path = data_path\n\n data = np.array(data).tolist()\n self.data = []\n for line in data:\n # line = np.char.rsplit(np.char.strip(line), ' ')\n self.data.append(line)\n self.transform = transform\n self.target_transform = None\n print('init final!')\n #print(self.data)\n def __getitem__(self, index):\n fn, piece, label = self.data[index]\n # print(type(fn),type(piece),type(label))\n fn, piece, label = fn, np.array(int(piece)), np.array(int(label))\n image = Image.open(os.path.join(self.data_path,'train_images',fn)).convert('RGB')\n w,h = image.size\n if w>=h:\n image=image.resize([224,int(224*h/w)])\n else:\n image=image.resize([int(224*w/h),224])\n #image.show()\n #image = np.array(image)\n if self.transform is not None:\n image = self.transform(image)\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n return {'image':image, 'species':torch.from_numpy(piece), 'label':torch.from_numpy(label)}\n def __len__(self):\n return len(self.data)\n\n#train_ds, val_ds = file_split('/home/public/happy-whale-and-dolphin', 'train.csv', 15287, 30, 300, 0.7)\n# train_transform = transforms.Compose([\n# transforms.RandomCrop([224, 224],pad_if_needed=True,padding_mode=symmetric),\n# transforms.RandomHorizontalFlip(p=0.5),\n# transforms.Resize([224,224]),\n\n# # 归一化 均值 方差待修正\n# transforms.ToTensor(),\n# transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])\n# # [0.656,0.487,0.411], [1., 1., 1.]\n# ])\n\n# ds_train = WhaleDataset(train_ds, '/home/public/happy-whale-and-dolphin',transform=train_transform)\n# plt.figure(\"Image\") # 图像窗口名称\n# for i in range(20):\n# image = ds_train[i]['image']\n# img = transforms.ToPILImage()(image)\n# plt.imshow(img)\n# plt.axis('on') # 关掉坐标轴为 off\n# plt.title('img') # 图像题目\n# plt.show()","repo_name":"sweetieeWang/happy_whale","sub_path":"Renet50/whale_dataset.py","file_name":"whale_dataset.py","file_ext":"py","file_size_in_byte":4200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25074710094","text":"from __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport sys\nimport os\n\nfrom aiida.engine import run\nfrom aiida.plugins.factories import DataFactory\nfrom aiida.orm import Group\nfrom aiida.orm import Code\n\nfrom wf_aiida_1_0 import TestWorkChain\n################################################################\n\nStructureData = DataFactory('structure')\n\ntry:\n codename = sys.argv[1]\nexcept IndexError:\n codename = None\n\nif not codename:\n print (\"Please provide a valid codename\")\n sys.exit(1)\ncode = Code.get_from_string(codename)\n\nalat = 4. # angstrom\ncell = [[alat, 0., 0., ],\n [0., alat, 0., ],\n [0., 0., alat, ],\n]\n\n# BaTiO3 cubic structure\ns = StructureData(cell=cell)\ns.append_atom(position=(0., 0., 0.), symbols=['Ba'])\ns.append_atom(position=(alat / 2., alat / 2., alat / 2.), symbols=['Ti'])\ns.append_atom(position=(alat / 2., alat / 2., 0.), symbols=['O'])\ns.append_atom(position=(alat / 2., 0., alat / 2.), symbols=['O'])\ns.append_atom(position=(0., alat / 2., alat / 2.), symbols=['O'])\n\ng = Group(name=\"input_group\").store()\ng.add_nodes(s.store())\n\nw = TestWorkChain\nrun(w, structure=s.store(), code=code)\n","repo_name":"aiidateam/aiida-export-migration-tests","sub_path":"aiida-export-migration-tests/.qm/run_wf_aiida_1_0.py","file_name":"run_wf_aiida_1_0.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30518829005","text":"import os\nimport torch\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom torchvision import transforms\nfrom models.data.data_utils.transforms import (ResizeAll, Save, Normalize,\n AddDepthMask, ToTensorAll)\n\nfrom sacred import Experiment\n\nnyuv2_labeled_ingredient = Experiment('data_config')\n\n@nyuv2_labeled_ingredient.config\ndef cfg():\n data_name = \"nyu_depth_v2_labeled\"\n root_dir = os.path.join(\"data\", \"nyu_depth_v2_labeled_numpy\")\n train_files = {\n \"rgb\": \"train_images.npy\",\n \"rgb_cropped\": \"train_images_cropped.npy\",\n \"depth\": \"train_depths.npy\",\n \"depth_cropped\": \"train_depths_cropped.npy\",\n \"rawdepth\": \"train_rawDepths.npy\",\n \"rawdepth_cropped\": \"train_rawDepths_cropped.npy\"\n }\n test_files = {\n \"rgb\": \"test_images.npy\",\n \"rgb_cropped\": \"test_images_cropped\",\n \"depth\": \"test_depths.npy\",\n \"depth_cropped\": \"test_depths_cropped.npy\",\n \"rawdepth\": \"test_rawDepths.npy\",\n \"rawdepth_cropped\": \"test_rawDepths_cropped.npy\"\n }\n crop_file = \"crop.npy\"\n\n # True for DORN\n bgr_mode = False\n\n # True for DORN\n channels_first=False\n\n min_depth = 0.\n max_depth = 10.\n\n\nclass NYUDepthv2LabeledDataset(Dataset):\n \"\"\"\n The official NYUv2 Labeled dataset, loaded from numpy files.\n Evaluation crop specified in a separate numpy file.\n \"\"\"\n def __init__(self, rootdir, rgb_file, depth_file, rawdepth_file, crop_file, transform=None,\n bgr_mode=False):\n \"\"\"\n\n :param rootdir:\n :param rgb_file:\n :param depth_file:\n :param rawdepth_file:\n :param crop_file:\n :param transform:\n :param bgr_mode:\n :param channels_first: True for tensorflow, False for pytorch\n \"\"\"\n self.rootdir = rootdir\n self.bgr_mode = bgr_mode # set to True if you're loading data for DORN.\n self.depth = np.load(os.path.join(rootdir, depth_file)) # H x W x N\n self.rawdepth = np.load(os.path.join(rootdir, rawdepth_file)) # H x W x N\n self.rgb = np.load(os.path.join(rootdir, rgb_file)) # H x W x C x N\n\n\n self.crop = np.load(os.path.join(rootdir, crop_file))\n self.depth_cropped = self.depth[self.crop[0]:self.crop[1],\n self.crop[2]:self.crop[3],\n :\n ]\n self.rawdepth_cropped = self.rawdepth[self.crop[0]:self.crop[1],\n self.crop[2]:self.crop[3],\n :\n ]\n self.rgb_cropped = self.rgb[self.crop[0]:self.crop[1],\n self.crop[2]:self.crop[3],\n :,\n :\n ]\n\n channel_axis = 2\n if bgr_mode:\n self.bgr = np.flip(self.rgb, axis=channel_axis).copy()\n self.bgr_cropped = np.flip(self.rgb_cropped, axis=channel_axis).copy()\n self.transform = transform\n\n def __len__(self):\n return self.depth.shape[-1]\n\n def __getitem__(self, i):\n # Convert to torch tensor\n sample = {\n \"depth_cropped\": self.depth_cropped[..., i],\n \"depth\": self.depth[..., i],\n \"rawdepth_cropped\": self.rawdepth_cropped[..., i],\n \"rawdepth\": self.rawdepth[..., i],\n \"crop\": self.crop,\n \"entry\": str(i)\n }\n if self.bgr_mode:\n sample.update({\n \"bgr\": self.bgr[..., i],\n \"bgr_cropped\": self.bgr_cropped[..., i]\n })\n else:\n sample.update({\n \"rgb\": self.rgb[..., i],\n \"rgb_cropped\": self.rgb_cropped[..., i]\n })\n if self.transform is not None:\n sample = self.transform(sample)\n return sample\n\n def get_item_by_id(self, entry):\n return self[int(entry)]\n\n\n@nyuv2_labeled_ingredient.capture\ndef load_data(root_dir, train_files, test_files, crop_file, min_depth, max_depth, dorn_mode):\n \"\"\"\n Wonka:\n Input: rgb\n Output size: Same as *_cropped\n Compare to: rawdepth_cropped, mask\n\n DORN:\n Input: Resized version of bgr_cropped\n Output size: Same as unresized bgr_cropped\n Comapre to: rawdepth_cropped, mask\n\n :param root_dir: The root directory from which to load the dataset\n :param use_dorn_normalization: Whether or not to normalize the rgb images according to DORN statistics.\n :return: test: a NYUDepthv2TestDataset object.\n \"\"\"\n\n train = NYUDepthv2LabeledDataset(root_dir, train_files[\"rgb\"],\n train_files[\"depth\"],\n train_files[\"rawdepth\"],\n crop_file,\n transform=None, bgr_mode=dorn_mode)\n test = NYUDepthv2LabeledDataset(root_dir, test_files[\"rgb\"],\n test_files[\"depth\"],\n test_files[\"rawdepth\"],\n crop_file,\n transform=None, bgr_mode=dorn_mode)\n transform_list = [\n AddDepthMask(min_depth, max_depth, \"rawdepth_cropped\", \"mask_cropped\"),\n AddDepthMask(min_depth, max_depth, \"rawdepth\", \"mask\")\n ]\n if dorn_mode:\n print(\"Using dataset in DORN mode.\")\n transform_mean = np.array([[[103.0626, 115.9029, 123.1516]]]).astype(np.float32)\n transform_var = np.ones((1, 1, 3)).astype(np.float32)\n transform_list += [\n Save([\"bgr\"], \"_orig\"),\n ResizeAll((353, 257), keys=[\"bgr\"]),\n Normalize(transform_mean, transform_var, key=\"bgr\"),\n ToTensorAll(keys=[\"bgr\", \"bgr_orig\", \"depth_cropped\"],\n channels_first=dorn_mode)\n ]\n else:\n print(\"Using dataset in Wonka mode.\")\n transform_list.append(ToTensorAll(keys=[\"rgb\", \"depth_cropped\"], channels_first=dorn_mode))\n train.transform = transforms.Compose(transform_list)\n test.transform = transforms.Compose(transform_list)\n return train, test\n\n\n###########\n# Testing #\n###########\n@nyuv2_labeled_ingredient.automain\ndef test_load_data(min_depth, max_depth):\n train, test = load_data(dorn_mode=False)\n\n sample = test[300]\n # print(sample[\"rgb\"])\n print([(key, sample[key].shape) for key in sample if isinstance(sample[key], torch.Tensor)])\n print(np.min(sample[\"depth\"]))\n print(np.max(sample[\"depth\"]))\n print(sample[\"rgb\"].shape)\n print(sample[\"rgb\"][30, 30, :]) # Channels should be last\n # cv2 imwrite presumes BGR order.\n # cv2.imwrite(\"flip.png\", sample[\"rgb\"].astype('uint8'))\n # cv2.imwrite(\"noflip.png\", sample[\"rgb\"][:,:,::-1].astype('uint8'))\n\n train, test = load_data(dorn_mode=True)\n sample = test[300]\n # print(sample[\"rgb\"])\n print([(key, sample[key].shape) for key in sample if isinstance(sample[key], torch.Tensor)])\n # print(torch.min(sample[\"depth\"]))\n # print(torch.max(sample[\"depth\"]))\n print(sample[\"bgr\"].shape)\n print(sample[\"bgr\"][:, 30, 30]) # Channels should be first\n\n # Find entries where even inpainted depth has invalid entries\n for i in range(len(test)):\n depth = test[i][\"depth_cropped\"]\n less_than_zero = torch.sum((depth < 0).float())\n greater_than_ten = torch.sum((depth > 10.).float())\n if less_than_zero + greater_than_ten > 0:\n print(\"invalid depth entries for image {}\".format(i))\n break\n\n\n\n\n\n\n","repo_name":"computational-imaging/spad_single","sub_path":"models/data/nyuv2_labeled_dataset.py","file_name":"nyuv2_labeled_dataset.py","file_ext":"py","file_size_in_byte":7729,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"12799675199","text":"import pdb\nfrom framework.BaseValidator import BaseValidator\n\n\ndef get_substring_after_delim(haystack, delimiter, end_delimiter='<'):\n start_index = haystack.find(delimiter) + len(delimiter)\n if start_index == -1:\n if not BaseValidator.force_matches:\n raise Exception('Could not find the delimiter: {} in {}'.format(delimiter, haystack))\n else:\n return 'test applicant'\n end_index = haystack[start_index:].index(end_delimiter) + start_index\n return haystack[start_index:end_index]\n\n\n# parse the thread created when a tenant submits their application \n# and return the tenant email address\ndef get_new_application_email(thread):\n decoded_html = thread.last_message_text()\n substring = 'Email:'\n return get_substring_after_delim(decoded_html, substring)\n\ndef get_new_application_name(thread, return_as_list=False):\n decoded_html = thread.last_message_text()\n substring = 'Applicant:'\n name = get_substring_after_delim(decoded_html, substring)\n if not return_as_list:\n return name\n else:\n return name.split()[:2]\n\ndef get_approved_application_name(thread, return_as_list=False):\n decoded_html = thread.last_message_text()\n substring = 'Applicant name:'\n name = get_substring_after_delim(decoded_html, substring)\n if not return_as_list:\n return name\n else:\n return name.split()[:2]\n\ndef get_approved_application_email(thread):\n decoded_html = thread.last_message_text()\n substring = 'Applicant email:'\n return get_substring_after_delim(decoded_html, substring)\n\n# Since this is an org specific implementation we can have a hardcoded name mapping\ndef signature(thread):\n my_message_count = thread.get_user_message_count()\n name = thread.get_user_name()\n first, last = '', ''\n if name == 'tyler':\n first = 'Tyler'\n last = 'Galdes'\n elif name == 'wyatt':\n first = 'Wyatt'\n last = 'Cornelius'\n elif name == 'apply':\n first = 'Tyler'\n last = 'Galdes'\n if my_message_count == 0:\n return 'Best,
{} {}
Clean Floors & Locking Doors Team
'.format(first, last)\n elif my_message_count == 1:\n return 'Best,
{}
CF&LD Team
'.format(first)\n return 'Best,
{}
'.format(first)\n\n# Retun a string representing the short name of the school\n# empty string if we can't find a label matching 'Schools/.*'\ndef short_name_from_thread(thread):\n delim = 'Schools/'\n for label_name in thread.labels():\n if label_name and label_name.find(delim) == 0:\n return label_name[len(delim):]\n return 'the campus'\n\n\n# When we send out a lease doc to an owner and a tenant, we want to return just the tenant email here for the redirect to search on\n# arg will look like 'j.stan.hill@gmail.com and ivan.gonzalez@cimat.mx'\n# or 'ivan.gonzalez@cimat.mx'\ndef get_lease_sent_out_email(email_list_from_subject, all_owner_emails):\n #all_owner_emails = lookup_info('lease_owners', 'lease_owners')\n emails = []\n for token in email_list_from_subject.split():\n if '@' in token and token not in all_owner_emails:\n emails.append(token)\n if len(emails) != 1:\n raise Exception('In get_lease_sent_out_email with arg: {} we found {} non owner emails when we expected 1: {}'.format(email_list_from_subject, len(emails), emails))\n return emails[0]\n\ndef get_signed_lease_email(thread):\n # Since Adobe doesn't know how to set the 'Reply-To' header in emails they send\n # out we added a hack that optionally allows you to work around that\n for email in thread.default_reply(force_all=True):\n if email.split('@')[1].split('.')[0] in ['cleanfloorslockingdoors', 'echosign']:\n continue\n return email\n\n \n\n","repo_name":"tgaldes/projects","sub_path":"cfld/gmail/orgs/cfld/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37067117460","text":"import json\nimport traceback\nimport datetime\nimport os\nimport glob\nimport ipaddress\nimport time\nimport typing\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\n\nfrom queue import Queue\nfrom uuid import uuid4\n\nfrom SWARMRDS.core.client import SWARMClient\nfrom SWARMRDS.utilities.date_utils import convert_datetime_to_str\nfrom SWARMRDS.utilities.file_utils import find_file_path, find_folder_path\nfrom SWARMRDS.utilities.settings_utils import (\n receive_user_input,\n generate_new_user_settings_file,\n)\n\n\nclass SWARM:\n \"\"\"\n Core wrapper that contains all logic for running the SWARM platform.\n\n ### Arguements:\n - ip_address [str] The API address to connect if you using SWARM in\n the cloud or remote setting.\n \"\"\"\n\n def __init__(\n self, ip_address: str = \"127.0.0.1\", debug=False, response_queue: Queue = None,\n user_file_path: str = None\n ) -> None:\n self.client = SWARMClient(\n ip_address=ip_address, debug=debug, response_queue=response_queue, user_file_path=user_file_path\n )\n self.ip_address = ip_address\n self._file_path = None\n if user_file_path is not None:\n self._file_path = user_file_path\n self.generate_submission_tracking()\n self.map_name = \"\"\n self.debug = debug\n self._response_queue = response_queue\n self._has_trajectory = False\n\n def regenerate_connection(self) -> None:\n \"\"\"\n Regenerate the connection since we kill it after the simulation\n is finished.\n \"\"\"\n self.client = SWARMClient(\n ip_address=self.ip_address, response_queue=self._response_queue\n )\n self.client.connect()\n\n # =========================================================================\n # Helper Functions\n # =========================================================================\n\n def _get_supported_environments(self, working_path: str = os.getcwd()) -> dict:\n \"\"\"\n Get the SupportedEnvironments.json file that exists in the\n settings folder. This describes all valid options.\n\n ### Inputs:\n - None\n\n ### Returns:\n - A dictonary containing the supported environments.\n \"\"\"\n try:\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/SupportedEnvironments.json\"\n else:\n file_path = find_file_path(\"SupportedEnvironments.json\", \"settings\")\n with open(file_path, \"r\") as file:\n return json.load(file)\n except FileNotFoundError:\n raise AssertionError(\n \"Error!\\n\"\n + \"The file titled SupportedEnvironments.json was not found in the settings folder\\n\"\n + \"If you do not see this file, please run example in the examples folder titled 'retrieve_environment_info.py\\n\"\n + \"which will download this file from the server to provide the most up to date information.\"\n )\n\n def _get_supported_scenarios(self, working_path: str = os.getcwd()) -> dict:\n \"\"\"\n Get the SupportedScenarios.json file that exists in the\n settings folder. This describes all valid options.\n\n ### Inputs:\n - None\n\n ### Returns:\n - A dictonary containing the supported scenarios.\n \"\"\"\n try:\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/SupportedScenarios.json\"\n else:\n file_path = find_file_path(\"SupportedScenarios.json\", \"settings\")\n with open(file_path, \"r\") as file:\n return json.load(file)[\"Scenarios\"]\n except FileNotFoundError:\n raise AssertionError(\n \"Error!\\n\"\n + \"The file titled SupportedScenarios.json was not found in the settings folder\\n\"\n + \"If you do not see this file, please run example in the examples folder titled 'retrieve_environment_info.py\\n\"\n + \"which will download this file from the server to provide the most up to date information.\"\n )\n\n def setup_simulation(\n self,\n map_name: str = None,\n settings_file_name: str = \"settings/DefaultSimulationSettings.json\",\n ) -> None:\n \"\"\"\n Run the setup process for a Simulation, which includes choosing\n what maps to run.\n\n ### Inputs:\n - map_name [str] The name of the map to load the simulation\n - view_map [bool] Whether you want to view the map\n - settings_file_name [str] The name of the settings file to use\n Default is settings/DefaultSimulationSettings.json\n\n ### Outputs:\n - None\n \"\"\"\n input_options = [\"Yes\", \"No\"]\n prompt = \"Would you like to create a New Settings file?\\n(Please input the number for you choice!)\\n(Choose No to use settings/DefaultSimulationSettings.json)\\n\"\n for i, option in enumerate(input_options):\n prompt += \"\\t{}. {}\\n\".format(i + 1, option)\n prompt += \"Your Choice: \"\n user_input = receive_user_input(int, prompt, input_options, isList=True)\n bool_options = [option == \"Yes\" for option in input_options]\n if bool_options[user_input]:\n new_settings, settings_file_name = generate_new_user_settings_file()\n\n print(\"Reading map name from {}\".format(settings_file_name))\n settings_map_name = self.read_map_name_from_settings(settings_file_name)\n if map_name != settings_map_name:\n self.set_environment_name(settings_file_name, map_name)\n\n valid = self.validate_environment_name(map_name)\n\n if not valid:\n print(\"Environment name is invalid!\")\n return\n\n input_options = [\"Yes\", \"No\"]\n prompt = \"Would you like to view a map of the enviroment?\\n(Please input the number for you choice!)\\n\"\n for i, option in enumerate(input_options):\n prompt += \"\\t{}. {}\\n\".format(i + 1, option)\n prompt += \"Your Choice: \"\n user_input = receive_user_input(int, prompt, input_options, isList=True)\n bool_options = [option == \"Yes\" for option in input_options]\n if bool_options[user_input]:\n # TODO Check if the map file exists and call the appropriate\n # function to get the Map from the Server.\n self.map_name = map_name\n # Run the GUI application to pull up the map\n with open(settings_file_name, \"r\") as file:\n settings = json.load(file)\n\n # We only use Trajectoris in DataCollection and other scenarios\n if settings[\"Scenario\"][\"Name\"] == \"DataCollection\":\n self.display_map_image_with_trajectories(\n settings[\"Scenario\"][\"Options\"][\"LevelNames\"],\n settings[\"Environment\"][\"Name\"],\n )\n else:\n self.display_map_image(\n settings[\"Scenario\"][\"Options\"][\"LevelNames\"],\n settings[\"Environment\"][\"Name\"],\n )\n\n def read_map_name_from_settings(self, settings_file_name: str) -> str:\n \"\"\"\n Read the map name given from the settings file and validate\n that this is supported before moving on.\n\n ### Inputs:\n - None\n\n ### Outputs:\n - A map name as a string\n \"\"\"\n try:\n with open(\"{}\".format(settings_file_name), \"r\") as file:\n settings = json.load(file)\n return settings[\"Environment\"][\"Name\"]\n except Exception:\n traceback.print_exc()\n return \"\"\n\n def set_environment_name(self, settings_file_name: str, map_name: str) -> None:\n \"\"\"\n Set the environment name in the settings file.\n \"\"\"\n with open(\"{}\".format(settings_file_name), \"r\") as file:\n settings = json.load(file)\n \n settings[\"Environment\"][\"Name\"] = map_name\n\n with open(\"{}\".format(settings_file_name), \"w\") as file:\n json.dump(settings, file, indent=4)\n\n return\n\n def load_map_metadata(self, level_name: str, env_name: str) -> None:\n \"\"\"\n Load the metadata for the map to help with the coordinates\n \"\"\"\n with open(\"maps/{}_metadata_{}.json\".format(env_name, level_name), \"r\") as file:\n metadata = json.load(file)\n\n return metadata\n\n def display_map_image_with_coordinates_v1(self) -> None:\n \"\"\"\n Display the map of the selected environment with the corrected\n coordinate frame. This will show up in MatPlotLib correctly\n so that a user can decide where to send the agent.\n\n NOTE Map name is \"ENVIRONMENT_NAME\".png\n TODO Add levels to this as we advance the environment\n \"\"\"\n img = plt.imread(\"maps/{}.png\".format(self.map_name))\n self.load_map_metadata()\n plt.imshow(img)\n fig = plt.gcf()\n for ax in fig.axes:\n scale = 1e1 # Map is in centimeters\n # Shift the axes of the map by half the bounds of the image, then\n # scale, then reverse both axes to make sense for NED coordinates\n ticks_x = ticker.FuncFormatter(\n lambda x, pos: \"{0:g}\".format(\n 1 * (x - (self.map_metadata[\"ImageSize\"][0] / 2)) / scale\n )\n )\n ax.xaxis.set_major_formatter(ticks_x)\n ax.yaxis.set_major_formatter(ticks_x)\n plt.xlabel(\"X Coordinate (meters)\")\n plt.ylabel(\"Y Coordinate (meters)\")\n plt.show()\n\n def display_map_image(\n self, level_names: list, env_name: str, maps_dir: str = \"maps\"\n ) -> None:\n \"\"\"\n Display a Map Image of the Environment with no trajectory\n\n ### Inputs:\n - level_names [list] The levels to display\n - env_name [str] The name of the environment to run\n - maps_dir [str] The folder where the map system exists\n \"\"\"\n for level_name in level_names:\n metadata = self.load_map_metadata(level_name, env_name)\n img = plt.imread(\"maps/{}_{}.png\".format(env_name, level_name))\n plt.imshow(img)\n fig = plt.gcf()\n\n for ax in fig.axes:\n # ax.axis('off')\n # ax.margins(0,0)\n # ax.xaxis.set_major_locator(plt.NullLocator())\n # ax.yaxis.set_major_locator(plt.NullLocator())\n # scale = 1.925\n ticks_x = ticker.FuncFormatter(\n lambda x, pos: \"{0:g}\".format(\n 1 * (x - metadata[\"Offset\"][0]) / metadata[\"Scale\"]\n )\n )\n ticks_y = ticker.FuncFormatter(\n lambda x, pos: \"{0:g}\".format(\n 1 * (x - metadata[\"Offset\"][1]) / metadata[\"Scale\"]\n )\n )\n ax.xaxis.set_major_formatter(ticks_x)\n ax.yaxis.set_major_formatter(ticks_y)\n plt.plot(\n metadata[\"Offset\"][0] + metadata[\"Origin\"][0],\n metadata[\"Offset\"][1] + metadata[\"Origin\"][1],\n marker=\".\",\n color=\"white\",\n label=\"Origin\",\n )\n plt.plot(\n metadata[\"Offset\"][0] + metadata[\"Origin\"][0] + 7,\n metadata[\"Offset\"][1] + metadata[\"Origin\"][1],\n marker=\">\",\n color=\"blue\",\n label=\"Origin\",\n )\n plt.text(\n metadata[\"Offset\"][0] + metadata[\"Origin\"][0] - 10,\n metadata[\"Offset\"][1] + metadata[\"Origin\"][1] - 8,\n \"Origin\",\n color=\"white\",\n )\n plt.text(\n metadata[\"Offset\"][0] + metadata[\"Origin\"][0] + 30,\n metadata[\"Offset\"][1] + metadata[\"Origin\"][1],\n \"Front\",\n color=\"white\",\n )\n plt.xlabel(\"X Coordinate (meters)\")\n plt.ylabel(\"Y Coordinate (meters\")\n plt.show()\n\n def display_map_image_with_trajectories(\n self, level_names: list, env_name: str, maps_dir: str = \"maps\"\n ) -> None:\n \"\"\"\n Display the map image with the trajectory overlayed.\n \"\"\"\n\n for level_name in level_names:\n img = plt.imread(\"{}/{}_{}.png\".format(maps_dir, env_name, level_name))\n\n map_metadata = self.load_map_metadata(level_name, env_name)\n plt.imshow(img)\n fig = plt.gcf()\n capture_size = map_metadata[\"CaptureSize\"]\n grid_marks = [\n measure * (1 / map_metadata[\"CaptureIncrement\"])\n for measure in capture_size\n ]\n origin_offset = map_metadata[\"Origin\"]\n\n for ax in fig.axes:\n scale = 1e1 # Map is in centimeters\n # Shift the axes of the map by half the bounds of the image, then\n # scale, then reverse both axes to make sense for NED coordinates\n ticks_x = ticker.FuncFormatter(\n lambda x, pos: \"{0:g}\".format(\n 1 * (x - (map_metadata[\"ImageSize\"][0] / 2)) / scale\n )\n )\n ax.xaxis.set_major_formatter(ticks_x)\n ax.yaxis.set_major_formatter(ticks_x)\n\n origin = [\n grid_marks[0] / 2 + origin_offset[0],\n grid_marks[1] / 2 + origin_offset[1],\n ]\n plt.plot(origin[0], origin[1], marker=\".\", color=\"red\", label=\"1\")\n\n # load the waypoints from the json file\n with open(\"settings/DefaultTrajectory.json\", \"r\") as file:\n trajectory = json.load(file)\n # level = map_name[(map_name.index(\"_\") + 1):]\n trajectory = trajectory[\"Trajectory\"][level_name]\n\n # Loop through the waypoints and plot the points\n trajectorylist = [origin]\n n = 0\n scale = 11.5\n for waypoint in trajectory:\n plt.plot(\n origin[0] + (waypoint[\"X\"] * scale),\n origin[1] + (waypoint[\"Y\"] * scale),\n marker=\".\",\n color=\"blue\",\n label=n,\n )\n trajectorylist.append(\n [\n origin[0] + (waypoint[\"X\"] * scale),\n origin[1] + (waypoint[\"Y\"] * scale),\n ]\n )\n n += 1\n # Loop through the points and draw a line connecting the current point to the next point\n for i in range(len(trajectorylist) - 1):\n x = [trajectorylist[i][0], trajectorylist[i + 1][0]]\n y = [trajectorylist[i][1], trajectorylist[i + 1][1]]\n plt.plot(x, y, color=\"green\", linewidth=2)\n\n plt.xlabel(\"X Coordinate (meters)\")\n plt.ylabel(\"Y Coordinate (meters)\")\n\n # Save the trajectory map to the data assets folder\n # TODO: Define the location of the UE project content folder\n print(\"Saving map with trajecotry to /{}\".format(maps_dir))\n plt.savefig(\"{}/{}_{}_Display.png\".format(maps_dir, env_name, level_name))\n\n plt.show()\n plt.close()\n\n def generate_submission_tracking(self) -> None:\n \"\"\"\n Generate the necessary files for submission tracking and\n history, so that users can track what settings have been\n changed over time.\n\n ### Inputs:\n - None\n\n ### Outputs:\n - None\n \"\"\"\n print(\"Checking if submission history exsits\")\n try:\n if self._file_path is not None:\n folder_path = self._file_path + \"/settings\"\n else:\n folder_path = find_folder_path(\"settings\")\n os.chdir(folder_path)\n except FileNotFoundError:\n print(\"Settings folder not found!\")\n print(\"Creating settings folder\")\n if self._file_path is not None:\n os.mkdir(self._file_path + \"/settings\")\n folder_path = self._file_path + \"/settings\"\n os.chdir(folder_path)\n else:\n # We are running this from the root directory of the Client\n # repo and don't need to worry about paths\n os.mkdir(\"settings\")\n folder_path = find_folder_path(\"settings\")\n os.chdir(folder_path)\n # We should now be in file_path + \"/settings\"\n files = glob.glob(\"*.json\")\n if \"SubmissionHistory.json\" not in files:\n print(\"Generating new submission history\")\n sub_histroy = {\"History\": list()}\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/SubmissionHistory.json\"\n else:\n file_path = find_folder_path(\"settings\") + \"/SubmissionHistory.json\"\n with open(file_path, \"a\") as file:\n json.dump(sub_histroy, file)\n if \"SubmissionList.json\" not in files:\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/SubmissionList.json\"\n else:\n file_path = find_folder_path(\"settings\") + \"/SubmissionList.json\"\n print(\"Generating new submission list\")\n sub_list = {\"Submissions\": {}}\n with open(file_path, \"a\") as file:\n json.dump(sub_list, file)\n os.chdir(\"..\")\n print(\"File path is now {}\".format(os.getcwd()))\n if self._file_path is not None:\n print(\"User File path is now {}\".format(self._file_path))\n print(\"Submission history has been successfully setup!\")\n return\n\n def _get_scenarios_with_trajectories(self) -> list:\n \"\"\"\n Access the SupportedScenarios.json file and determine which\n environments have trajectories by reading the member named\n \"TrajectoryRequired\".\n\n ### Inputs:\n - None\n\n ### Outputs:\n - A list of environments that have trajectories\n \"\"\"\n scenarios = self._get_supported_scenarios()\n scenarios_with_trajectories = list()\n for scenario_name, details in scenarios.items():\n if details[\"TrajectoryRequired\"]:\n scenarios_with_trajectories.append(scenario_name)\n return scenarios_with_trajectories\n\n def _set_using_trajectory(self, settings_file_name: str) -> None:\n \"\"\"\n Read in the Settings file and extract the Scenario name,\n determining if we are using a trajectory or not.\n\n ### Inputs:\n - settings_file_name [str] The name of the settings file\n\n ### Outputs:\n - None\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/\" + settings_file_name\n else:\n file_path = find_file_path(settings_file_name, \"settings\")\n with open(file_path, \"r\") as file:\n settings = json.load(file)\n\n if settings[\"Scenario\"][\"Name\"] in self._get_scenarios_with_trajectories():\n self._has_trajectory = True\n else:\n self._has_trajectory = False\n\n def build_simulation(\n self,\n map_name: str,\n custom_name: str = None,\n settings_file_name: str = \"DefaultSimulationSettings.json\",\n trajectory_file_name: str = \"DefaultTrajectory.json\",\n submission_list_location: str = \"settings\",\n ) -> str:\n \"\"\"\n Build a Simulation Package using the setup from the previous\n step. This creates a new unique name and id to reference all of\n the simulation steps, including the ability to load this data\n into the SWARM Web Portal for viewing.\n\n ### Inputs:\n - custom_name [str] A custom name that can be set instead of a\n random UUID.\n\n ### Outputs:\n - The simulation name as a string\n \"\"\"\n valid = self.validate_environment_name(map_name)\n\n if not valid:\n print(\"Environment name is invalid!\")\n return\n\n print(\"Reading map name from {}\".format(settings_file_name))\n if self._file_path is not None:\n file_name = self._file_path + \"/\" + submission_list_location + \"/\" + settings_file_name\n else:\n file_name = submission_list_location + \"/\" + settings_file_name\n settings_map_name = self.read_map_name_from_settings(file_name)\n if map_name != settings_map_name:\n self.set_environment_name(file_name, map_name)\n\n if custom_name:\n sim_name = custom_name\n else:\n sim_name = uuid4()\n sim_name = sim_name.__str__()\n\n self._set_using_trajectory(settings_file_name)\n\n settings, trajectory = self.generate_simulation_package(\n submission_list_location + \"/\" + settings_file_name,\n submission_list_location + \"/\" + trajectory_file_name,\n sim_name,\n )\n\n if self._file_path is not None:\n file_path = self._file_path + \"/\" + submission_list_location + \"/SubmissionList.json\"\n else:\n file_path = find_file_path(\"SubmissionList.json\", submission_list_location)\n\n with open(file_path, \"r\") as file:\n submission_list = json.load(file)\n\n print(submission_list[\"Submissions\"].keys())\n print(\"Building submission package for {}\".format(sim_name))\n if sim_name in submission_list[\"Submissions\"].keys():\n submission_list[\"Submissions\"][sim_name][\"Settings\"] = settings\n submission_list[\"Submissions\"][sim_name][\"Trajectory\"] = trajectory\n submission_list[\"Submissions\"][sim_name][\"Number Of Runs\"] += 1\n else:\n submission_list[\"Submissions\"][sim_name] = {\n \"Completed\": False,\n \"Settings\": settings,\n \"Trajectory\": trajectory,\n \"Created\": convert_datetime_to_str(datetime.datetime.now()),\n \"Submitted\": True,\n \"Number Of Runs\": 1,\n }\n\n with open(file_path, \"w\") as file:\n json.dump(submission_list, file)\n\n return sim_name\n\n def generate_simulation_package(\n self, settings_file_name: str, trajectory_file_name: str, sim_name: str\n ) -> tuple:\n \"\"\"\n Generate the Submission package by reading hte settings and the\n Trajectory file (assumign a Trajectory is required)\n\n ### Inputs:\n - settings_file_name [str] The name of the settings file\n - trajectory_file_name [str] The name of the trajectory file\n - sim_name [str] The name of the simulation\n\n ### Outputs:\n - The settings and trajectory as JSON strings\n \"\"\"\n if self._file_path is not None:\n settings_file_name = self._file_path + \"/\" + settings_file_name\n trajectory_file_name = self._file_path + \"/\" + trajectory_file_name\n\n settings = self.read_json_file(settings_file_name)\n settings = self.add_simulation_name_to_settings(\n sim_name, settings, settings_file_name\n )\n settings = self._update_all_sensor_settings(settings, settings_file_name)\n if self._has_trajectory:\n trajectory = self.read_json_file(trajectory_file_name)\n else:\n trajectory = None\n return settings, trajectory\n\n def add_simulation_name_to_settings(\n self, sim_name: str, settings: str, settings_file_name: str\n ) -> str:\n \"\"\"\n Update the name of the simulation name in the settings file.\n \"\"\"\n settings = json.loads(settings)\n settings[\"SimulationName\"] = sim_name\n with open(\"{}\".format(settings_file_name), \"w\") as file:\n json.dump(settings, file, indent=4)\n settings = json.dumps(settings)\n return settings\n\n def _update_all_sensor_settings(\n self, settings: dict, settings_file_name: str\n ) -> str:\n \"\"\"\n Update and fill in all Sensor Settings that aren't provided,\n so we show the user what is possible but don't need to\n validate the settings just yet.\n\n ### Inputs:\n - settings [dict] The User settings file\n\n ### Outputs:\n - The updated settings files as a JSON string\n \"\"\"\n valid_sensor_info = self._retrieve_valid_sensor_info()\n\n settings = json.loads(settings)\n\n # If they have provided agents\n if \"Agents\" in settings.keys():\n for agent_name, agent_info in settings[\"Agents\"].items():\n # If they attached sensors to an agent\n if \"Sensors\" in agent_info.keys():\n for sensor_type_name, sensors in agent_info[\"Sensors\"].items():\n # If the sensor is a GPS or IMU device\n if sensor_type_name == \"GPS\" or sensor_type_name == \"IMU\":\n for sensor_name, sensor_info in sensors.items():\n # Go through each device, determining if the User included these parameters\n for param_name, param_info in valid_sensor_info[\n sensor_type_name\n ][\"Parameters\"].items():\n if param_name not in sensor_info.keys():\n settings[\"Agents\"][agent_name][\"Sensors\"][\n sensor_type_name\n ][sensor_name][param_name] = param_info[\n \"Default\"\n ]\n\n with open(\"{}\".format(settings_file_name), \"w\") as file:\n json.dump(settings, file, indent=4)\n settings = json.dumps(settings)\n return settings\n\n def read_json_file(self, file_name: str) -> str:\n \"\"\"\n Read a JSON file and then return the string form of the file so\n that we can send this to the server.\n\n ### Inputs:\n - file_name [str] The file to read from\n\n ### Outputs:\n - The contents of the JSON file as a JSON string\n \"\"\"\n with open(file_name, \"r\", encoding=\"utf-8\") as file:\n code = json.load(file)\n\n return json.dumps(code)\n\n def retrieve_sim_package(self, sim_name: str, folder: str = \"settings\") -> tuple:\n \"\"\"\n Retrieve the settings files as JSON strings to send to the\n server.\n\n ### Inputs:\n - sim_name [str] The unique simulation name\n\n ### Outputs:\n - The settings, trajectory and vehicle settings to send to the\n server\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/\" + folder + \"/\" + \"SubmissionList.json\"\n else:\n file_path = find_file_path(\"SubmissionList.json\", folder)\n with open(file_path, \"r\") as file:\n submission_list = json.load(file)\n\n try:\n settings = submission_list[\"Submissions\"][sim_name][\"Settings\"]\n trajectory = submission_list[\"Submissions\"][sim_name][\"Trajectory\"]\n except KeyError:\n traceback.print_exc()\n print(\n \"{} doesn't exist in the submission list! Please try a different name!\".format(\n sim_name\n )\n )\n return {}, {}\n\n return settings, trajectory\n\n def update_submission_list(self, message: dict, folder: str = \"settings\") -> None:\n \"\"\"\n Once we have received a finish message, update the submission\n list to reflect this fact.\n\n ### Inputs:\n - message [dict] - The message received from the client\n\n Message should have the following format:\n {\n \"Sim_name\": self.settings[\"SimulationName\"],\n \"Status\": \"NotCompleted\",\n \"Minutes\": 5\n \"Seconds\": 12\n }\n\n ### Outputs:\n - None\n \"\"\"\n if self._file_path is not None:\n list_file_path = self._file_path + \"/\" + folder + \"/SubmissionList.json\"\n else:\n list_file_path = find_file_path(\"SubmissionList.json\", folder)\n with open(list_file_path, \"r\") as file:\n sub_list = json.load(file)\n\n if self._file_path is not None:\n history_file_path = self._file_path + \"/\" + folder + \"/SubmissionHistory.json\"\n else:\n history_file_path = find_file_path(\"SubmissionHistory.json\", folder)\n\n with open(history_file_path, \"r\") as file:\n history = json.load(file)\n\n try:\n submission = sub_list[\"Submissions\"][message[\"Sim_name\"]]\n if message[\"Status\"] == \"Completed\":\n submission[\"Completed\"] = True\n print(\"Simulation {} has been completed!\".format(message[\"Sim_name\"]))\n print(\n \"Completion Time: {} Mins {} Seconds\".format(\n message[\"Minutes\"], message[\"Seconds\"]\n )\n )\n history[\"History\"].append(submission)\n with open(list_file_path, \"w\") as file:\n json.dump(sub_list, file)\n with open(history_file_path, \"w\") as file:\n json.dump(history, file)\n except KeyError:\n traceback.print_exc()\n print(\"This simulation was not apart of the Submission List\")\n except Exception:\n traceback.print_exc()\n\n def access_environment_list(self) -> list:\n \"\"\"\n Retrieve the environment list to determine what environments\n are supported.\n \"\"\"\n try:\n files = os.listdir(\"settings\")\n assert \"SupportedEnvironments.json\" in files\n with open(\"settings/SupportedEnvironments.json\", \"r\") as file:\n envs = json.load(file)\n return envs[\"Environments\"]\n except AssertionError:\n print(\"The file settings/SupportedEnvironments.json does not exist!\")\n return False\n except Exception:\n traceback.print_exc()\n return False\n\n # =========================================================================\n # Validation Functions\n # =========================================================================\n\n def validate_environment_name(\n self, map_name: str, folder: str = \"settings\"\n ) -> bool:\n \"\"\"\n Given a User-defined string, validate that the string contains\n a supported map name of an environment the SWARM Core system\n actually supports.\n\n ### Inputs:\n - map_name [str] The map name to run\n\n ### Outputs:\n - A boolean describing whether the map name is valid\n \"\"\"\n try:\n assert isinstance(map_name, str)\n if self._file_path is not None:\n file_path = self._file_path + \"/\" + folder + \"/SupportedEnvironments.json\"\n else:\n file_path = find_file_path(\"SupportedEnvironments.json\", folder)\n with open(file_path, \"r\") as file:\n envs = json.load(file)\n\n if map_name in envs[\"Environments\"]:\n return True\n else:\n return False\n except AssertionError:\n print(\"The provided map name is not a string!\")\n return False\n except Exception:\n traceback.print_exc()\n return False\n\n def validate_scenario_name(\n self, scenario_name: str, folder: str = \"settings\"\n ) -> bool:\n try:\n assert isinstance(scenario_name, str)\n if self._file_path is not None:\n file_path = self._file_path + \"/\" + folder + \"/SupportedScenarios.json\"\n else:\n file_path = find_file_path(\"SupportedScenarios.json\", folder)\n with open(file_path, \"r\") as file:\n scenarios = json.load(file)\n scenario_name = \"\".join(scenario_name.split(\" \"))\n if scenario_name in scenarios[\"Scenarios\"].keys():\n return True\n else:\n print(\"Error! {} is not a supported scenario\".format(scenario_name))\n return False\n except AssertionError:\n print(\"The provided scenario name is not a string!\")\n return False\n except Exception:\n traceback.print_exc()\n return False\n\n def validate_settings_file(self, settings_file: dict) -> bool:\n \"\"\"\n Validate the settings file provided by the user before sending\n this to the SWARM Core Server. Provide information about\n acceptable values for each parameter field.\n\n ### Inputs:\n - settings_file [dict] The JSON file as a dictionary containing\n the settings for the SWARM Core system\n\n ### Outputs:\n - A boolean indicating whether the file is valid or not\n \"\"\"\n key_components = [\n \"ID\",\n \"RunLength\",\n \"SimulationName\",\n \"Scenario\",\n \"Environment\",\n \"Agents\",\n ]\n if \"Data\" in settings_file.keys():\n key_components.append(\"Data\")\n try:\n # First, validate that all required sections are present\n try:\n assert list(settings_file.keys()).sort() == key_components.sort()\n except AssertionError:\n statement = (\n \"The required components are not containined in this file.\\n\"\n )\n statement += \"Required Components: {}\\n\".format(key_components)\n statement += \"What you submitted: {}\\n\".format(\n list(settings_file.keys())\n )\n print(\"The required components are not containined in this file.\")\n print(\"Required Components: {}\".format(key_components))\n print(\"What you submitted: {}\".format(list(settings_file.keys())))\n return False, statement\n\n # Next, validate each section\n for key, options in settings_file.items():\n print(\"\\nValidating section {}\".format(key))\n if key == \"ID\":\n try:\n assert isinstance(options, int)\n except AssertionError:\n statement = \"The required ID is not an integer. Please regenerate this file.\"\n\n print(statement)\n return False, statement\n elif key == \"RunLength\":\n try:\n assert isinstance(options, int) or isinstance(options, float)\n if not (options >= 10.0 and options <= 9999):\n raise ValueError()\n except AssertionError:\n statement = \"The required Run Length is not an integer or float. Please fix this!\"\n print(\n statement\n )\n return False, statement\n except ValueError:\n statement = \"The Run Length specified is not a valid value.\\n\"\n statement += \"Valid values are 10.0 to 9999.0 seconds\\n\"\n statement += \"Please change this value in your settings file\"\n print(statement)\n\n return False, statement\n elif key == \"Scenario\":\n valid_keys = [\"Name\", \"Options\"]\n try:\n if valid_keys != list(options.keys()):\n raise AssertionError(\n \"Error! Please ensure you have the following sections: {}\".format(\n valid_keys\n )\n )\n if not self.validate_scenario_name(options[\"Name\"]):\n raise AssertionError(\n \"Scenario with name {} is not supported!\".format(\n options[\"Name\"]\n )\n )\n if \"LevelNames\" in options[\"Options\"]:\n for level_name in options[\"Options\"][\"LevelNames\"]:\n if not self.validate_level_is_supported(\n settings_file[\"Environment\"][\"Name\"], level_name\n ):\n raise AssertionError(\n \"Level {} is not supported for environment with name {}!\".format(\n level_name,\n settings_file[\"Environment\"][\"Name\"],\n )\n )\n if \"MultiLevel\" in options[\"Options\"]:\n if not isinstance(options[\"Options\"][\"MultiLevel\"], bool):\n raise AssertionError(\n \"Error! The field MultiLevel must be a boolean value!\"\n )\n if \"GoalPoint\" in options[\"Options\"]:\n if len(list(options[\"Options\"][\"GoalPoint\"].keys())) != len(\n list(settings_file[\"Agents\"].keys())\n ):\n raise AssertionError(\n \"\\n\\nError!\\nYou must have the same number of goal points as agents!\\nYou defined {} agents and {} goal points\".format(\n len(\n list(options[\"Options\"][\"GoalPoint\"].keys())\n ),\n len(list(settings_file[\"Agents\"].keys())),\n )\n )\n for agent_name, goal in options[\"Options\"][\n \"GoalPoint\"\n ].items():\n if type(goal).__name__ != \"dict\" or [\n \"X\",\n \"Y\",\n \"Z\",\n ] != list(goal.keys()):\n raise AssertionError(\n \"\\n\\nError for agent {}!\\n You must define a dictonary with the keys 'X', 'Y' and 'Z' in NED coordaintes\\n Your input was of type {}\".format(\n agent_name, type(goal).__name__\n )\n )\n for coord, coord_value in goal.items():\n if type(coord_value).__name__ != \"float\":\n raise AssertionError(\n \"\\n\\nError!\\n Coordinate {} must be of type float\\n Your input was of type {}\".format(\n coord, type(coord_value).__name__\n )\n )\n if coord_value < -999.0 or coord_value > 999.0:\n raise AssertionError(\n \"\\n\\nError!\\n Coordinate {} must be between -999.0 and 999.0 meters!\\n Your input was {}\".format(\n coord, coord_value\n )\n )\n except AssertionError as error:\n print(error)\n return False, str(error)\n elif key == \"Environment\":\n valid_keys = [\"Name\", \"StreamVideo\", \"StartingLevelName\", \"Options\"]\n try:\n if not \"Name\" in list(options.keys()):\n raise AssertionError(\n \"You must provide a Name that is in the environment\"\n )\n if not self.validate_environment_name(options[\"Name\"]):\n raise AssertionError(\n \"Environment name {} is not valid!\".format(\n options[\"Name\"]\n )\n )\n if not isinstance(options[\"StreamVideo\"], bool):\n raise AssertionError(\n \"Error! Please ensure the StreamVideo section is a boolean value\"\n )\n if not isinstance(options[\"StartingLevelName\"], str):\n raise AssertionError(\n \"Error! Please ensure the Level name is a string value\"\n )\n if (\n options[\"StartingLevelName\"]\n not in settings_file[\"Scenario\"][\"Options\"][\"LevelNames\"]\n ):\n raise AssertionError(\n \"Error! You must choose a level name that is in the Scenario Levels selected!\\nValid options are: {}\".format(\n settings_file[\"Scenario\"][\"Options\"][\"LevelNames\"]\n )\n )\n if not self.validate_level_is_supported(\n options[\"Name\"], options[\"StartingLevelName\"]\n ):\n raise AssertionError(\n \"Level selected is not supported by this environment\"\n )\n self.validate_environment_options(options)\n except AssertionError as error:\n print(error)\n return False, str(error)\n elif key == \"Data\":\n valid_options = [\"Images\", \"VehicleState\", \"Video\"]\n valid_image_types = [\"PNG\"]\n valid_video_types = [\"MP4\"]\n try:\n for data_type in options.keys():\n print(\"Validating Data Type: {}\".format(data_type))\n if not data_type in valid_options:\n raise AssertionError(\n \"Error! {} is not a valid type of data to collect\".format(\n data_type\n )\n )\n if data_type == \"Images\":\n if not options[\"Images\"][\"Format\"] in valid_image_types:\n raise AssertionError(\n \"Error!\\n{} is not a valid image format to save collected images\".format(\n data_type\n )\n )\n if not \"ImagesPerSecond\" in list(\n options[data_type].keys()\n ):\n raise AssertionError(\n \"\\nError!\\nYou must include the parameter 'ImagesPerSecond' in the Images options!\\n\"\n )\n if not isinstance(\n options[data_type][\"ImagesPerSecond\"], int\n ):\n raise AssertionError(\n \"\\nError!\\nImagesPerSecond must be a integer value!!\\n\"\n )\n if (\n options[data_type][\"ImagesPerSecond\"] > 20\n or options[data_type][\"ImagesPerSecond\"] < 1\n ):\n raise AssertionError(\n \"\\nError!\\nImagesPerSecond must be within the range of 1 to 20 images!\\n\"\n )\n elif data_type == \"Video\":\n if \"Format\" not in options[\"Video\"].keys():\n raise AssertionError(\n \"Error!\\n'Format' is a required key in this section! Video Format options are {}\".format(\n valid_video_types\n )\n )\n if \"VideoName\" not in options[\"Video\"].keys():\n raise AssertionError(\n \"\\nError!\\n'VideoName' is a required key in this section!\\nThis must be a string!\\n\\n\"\n )\n if \"CameraName\" not in options[\"Video\"].keys():\n raise AssertionError(\n \"\\nError!\\n'CameraName' is a required key in this section!\\nThis must be a string!\\n\\n\"\n )\n if not options[\"Video\"][\"Format\"] in valid_video_types:\n raise AssertionError(\n \"Error!\\n{} is not a valid video format!\".format(\n options[\"Video\"][\"Format\"]\n )\n )\n if not isinstance(options[\"Video\"][\"VideoName\"], str):\n raise AssertionError(\n \"Error!\\nVideo name must be a string! \\n\"\n )\n if not isinstance(options[\"Video\"][\"CameraName\"], str):\n raise AssertionError(\n \"Error!\\n\\n Data Options error!\\n Camera name must be a string! \\n\"\n )\n camera_names = list()\n for agent_name, agent_options in settings_file[\n \"Agents\"\n ].items():\n if \"Cameras\" not in agent_options[\"Sensors\"].keys():\n raise AssertionError(\n \"Error!\\n\\nYou must add a Cameras section to your settings to record video!\"\n )\n for camera_name in agent_options[\"Sensors\"][\n \"Cameras\"\n ].keys():\n if camera_name not in camera_names:\n camera_names.append(camera_name)\n if options[\"Video\"][\"CameraName\"] not in camera_names:\n raise AssertionError(\n \"Error!\\n\\nNo Agent in your settings has a camera with name: {}\\nPlease correct this by adding a Camera with that name!!\".format(\n options[\"Video\"][\"CameraName\"]\n )\n )\n except AssertionError as error:\n print(error)\n\n return False, str(error)\n elif key == \"Agents\":\n # Test that they have an agent\n try:\n numb_agents = len(list(options.keys()))\n if numb_agents < 1 or numb_agents > 5:\n raise AssertionError(\n \"\\n\\nError!\\nYou must have at least one agent but can only have up to 5!\"\n )\n except Exception as error:\n print(error)\n # This is a critical error that should not allow them to continue\n return False\n # Now, iterate through each agent and check that the modules are there\n for agent, agent_options in options.items():\n print(\"Validating {}\".format(agent))\n # Test that the appropriate sections exist\n valid_agent_sections = [\n \"Vehicle\",\n \"AutoPilot\",\n \"Sensors\",\n \"Controller\",\n \"SoftwareModules\",\n \"StartingPosition\",\n \"VehicleOptions\",\n \"VehiclePhysicsProfile\"\n ]\n valid_agent_sections.sort()\n try:\n user_agent_options = list(agent_options.keys())\n user_agent_options.sort()\n if not user_agent_options == valid_agent_sections:\n raise AssertionError(\n \"Sections for {} are not valid!\\nYour Options: {}\\nValid Options: {}\".format(\n agent, user_agent_options, valid_agent_sections\n )\n )\n except Exception as error:\n traceback.print_exc()\n return False\n for section_name, section in agent_options.items():\n if section_name == \"Vehicle\":\n if not section == \"Multirotor\":\n raise AssertionError(\n \"Vehicle parameter must be Multirotor. Support for different vehicle types coming soon!\"\n )\n elif section_name == \"VehiclePhysicsProfile\":\n self._validate_vehicle_physics_profile(section, agent_options[\"Vehicle\"])\n elif section_name == \"VehicleOptions\":\n if not isinstance(section, dict):\n raise AssertionError(\n \"Error!\\n Section VehicleOptions must be a dictionary!\\nYour input was of type: {}\".format(\n type(section).__name__\n )\n )\n valid_options = [\n \"RunROSNode\",\n \"UseLocalPX4\",\n \"PlanningCoordinateFrame\",\n \"LocalHostIP\",\n ]\n for option_key, option_value in section.items():\n if option_key not in valid_options:\n raise AssertionError(\n \"Error!\\nOption {} for Vehicle Options for agent {} is not in the list of valid options!\".format(\n option_key, agent\n )\n )\n if option_key == \"RunROSNode\":\n if not isinstance(section[option_key], bool):\n raise AssertionError(\n \"Error!\\nOption {} must be of type bool.\\nYour input was of type {}\".format(\n option_key,\n type(option_value).__name__,\n )\n )\n if (\n \"PlanningCoordinateFrame\"\n not in section.keys()\n ):\n raise AssertionError(\n \"Error!\\nOption 'PlanningCoordinateFrame' must be included if you are running ROS. Options are NED and ENU.\"\n )\n if option_key == \"PlanningCoordinateFrame\":\n if not isinstance(section[option_key], str):\n raise AssertionError(\n \"Error!\\nOption {} must be of type string.\\nYour input was of type {}\".format(\n option_key,\n type(option_value).__name__,\n )\n )\n if section[option_key] not in [\"NED\", \"ENU\"]:\n raise AssertionError(\n \"Error for agent {}!\\nOption 'PlanningCoordinateFrame' can only be NED and ENU.\".format(\n agent\n )\n )\n if option_key == \"UseLocalPX4\":\n if not isinstance(section[option_key], bool):\n raise AssertionError(\n \"Error!\\nOption {} must be of type bool.\\nYour input was of type {}\".format(\n option_key,\n type(option_value).__name__,\n )\n )\n if option_key == \"LocalHostIP\":\n if not isinstance(option_value, str):\n raise AssertionError(\n \"Error!\\n\\nLoclHostIP must be of type str. \\nYour input was of type {}\".format(\n type(option_value).__name__\n )\n )\n try:\n ipaddress.ip_address(option_value)\n print(\n \"Provided IP address has been validated!\"\n )\n except ValueError:\n raise AssertionError(\n \"Error!\\n\\nThe provided IP address is not a valid IPV4 address!\\nYour input was: {}\".format(\n option_value\n )\n )\n elif section_name == \"StartingPosition\":\n if (\n \"X\" not in section.keys()\n or \"Y\" not in section.keys()\n or \"Z\" not in section.keys()\n ):\n raise AssertionError(\n \"\\n\\n Error for Agent {}!\\nYou must have X, Y and Z as keys to a dictionary for the StartingPosition!\\nYour Input: {}\".format(\n agent, section.keys()\n )\n )\n for key, pos in section.items():\n if type(pos).__name__ != \"float\":\n raise AssertionError(\n \"\\n\\n Error for Agent {}!\\n{} must be of type float!\\nYour Input: {}\".format(\n agent, key, pos\n )\n )\n if pos > 999.0 or pos < -999.0:\n raise AssertionError(\n \"\\n\\n Error for Agent {}!\\n{} must be of type within -999.0 and 999.0 meters!\\nYour Input: {}\".format(\n agent, key, pos\n )\n )\n elif section_name == \"AutoPilot\":\n if not section == \"SWARM\" and not section == \"PX4\":\n raise AssertionError(\n \"Autopilot parameter must be SWARM or PX4. Support for different autopilots, including PX4/Ardupilot coming in February 2023!\"\n )\n elif section_name == \"Sensors\":\n self.validate_sensors(\n section,\n settings_file[\"Agents\"][agent][\"AutoPilot\"],\n agent_name=agent,\n )\n # Update the broader file\n settings_file[\"Agents\"][agent][section_name] = section\n elif section_name == \"Controller\":\n valid_controller_sections = [\"Name\", \"Gains\"]\n for (\n controller_section_name,\n controller_setting,\n ) in section.items():\n if (\n controller_section_name\n not in valid_controller_sections\n ):\n raise AssertionError(\n \"Controller for {} has an invalid field!\\nYour input: {}\\nValid Inputs: {}\".format(\n agent,\n controller_section_name,\n valid_controller_sections,\n )\n )\n if controller_section_name == \"Name\":\n if not controller_setting == \"PID\":\n raise AssertionError(\n \"Controller for {} type is is invalid. Name must be PID. Support for different controllers will be available in March 2023!\".format(\n agent\n )\n )\n elif controller_section_name == \"Gains\":\n for (\n gain_key,\n gain,\n ) in controller_setting.items():\n if not isinstance(gain, float):\n raise AssertionError(\n \"Gain {} for controller for {} must be a float value!\".format(\n gain_key, agent\n )\n )\n if gain < 0.0 or gain > 20.0:\n raise AssertionError(\n \"Gain {} for controller for {} must be between 0.0 and 20.0!\".format(\n gain_key, agent\n )\n )\n elif section_name == \"SoftwareModules\":\n self.validate_software_modules(\n section,\n agent,\n settings_file[\"Agents\"][agent][\"Sensors\"],\n )\n return True, \"Success! Your settings file has been validated!\"\n except Exception as error:\n traceback.print_exc()\n print(error)\n statement = error\n return False, str(error)\n\n def _validate_vehicle_physics_profile(self, vehicle_physics_profile: str, vehicle_type: str):\n \"\"\"\n Validate that the provided vehicle physics profile has been\n defined in the VehicleProfiles.json file in the vehicle_profiles\n folder.\n\n ### Inputs:\n ----\n `vehicle_physics_profile` : str\\n\n The vehicle physics profile to validate.\n\n `vehicle_type` : str\\n\n The vehicle type to validate.\n\n ### Returns:\n ----\n `valid` : bool\\n\n \"\"\"\n if not isinstance(vehicle_physics_profile, str):\n raise AssertionError(\n \"Error!\\n\\nThe vehicle physics profile must be of type str.\\nYour input was of type {}\".format(\n type(vehicle_physics_profile).__name__\n )\n )\n vehicle_profiles = self._retrieve_valid_vehicle_physics_profiles(vehicle_type)\n if vehicle_physics_profile not in vehicle_profiles.keys():\n raise AssertionError(\n \"Error!\\n\\nThe vehicle physics profile {} is not a valid vehicle physics profile.\\nValid vehicle physics profiles are: {}\".format(\n vehicle_physics_profile, vehicle_profiles.keys()\n )\n )\n \n # Check that the related json file for the vehicle physics profile exists\n if self._file_path is not None:\n file_path = self._file_path + \"/vehicle_profiles/{}\".format(vehicle_profiles[vehicle_physics_profile])\n else:\n file_path = find_file_path(vehicle_profiles[vehicle_physics_profile], \"vehicle_profiles\")\n \n if not os.path.exists(file_path):\n raise AssertionError(\n \"Error!\\n\\nThe vehicle physics profile {} is not a valid vehicle physics profile.\\nValid vehicle physics profiles are: {}\".format(\n vehicle_physics_profile, vehicle_profiles.keys()\n )\n )\n\n def _retrieve_valid_vehicle_physics_profiles(self, vehicle_type: str) -> dict:\n \"\"\"\n Retrieve the valid physics profiles for the provided vehicle\n type.\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/vehicle_profiles/VehicleProfiles.json\"\n else:\n file_path = find_file_path(\"VehicleProfiles.json\", \"vehicle_profiles\")\n \n with open(file_path, \"r\") as file:\n json_file = json.load(file)\n \n if vehicle_type not in json_file.keys():\n raise AssertionError(\n \"Error!\\n\\nThe vehicle type {} is not a valid vehicle type.\\nValid vehicle types are: {}\".format(\n vehicle_type, json_file.keys()\n )\n )\n \n return json_file[vehicle_type]\n\n def _retrieve_valid_sensor_info(self) -> dict:\n \"\"\"\n Retrieve the SupportedSensors JSON file\n \"\"\"\n try:\n if self._file_path is not None:\n file_path = self._file_path + \"/\" + \"SWARMRDS/core/SupportedSensors.json\"\n else:\n file_path = find_file_path(\n \"SupportedSensors.json\", \"SWARMRDS/core\"\n )\n with open(file_path, \"r\") as file:\n return json.load(file)\n except FileNotFoundError:\n raise AssertionError(\n \"Error!\\nThe file 'SupportedSensors.json' was not found in the core folder of this client! Please make sure this file exists!\"\n )\n\n def _validate_sensor_setting(\n self,\n sensor_name: str,\n sensor_setting_key: str,\n valid_sections: dict,\n sensor_setting: None,\n sensor_type: str,\n ):\n \"\"\"\n Validate an individual sensor information\n \"\"\"\n if sensor_setting_key not in valid_sections.keys():\n raise AssertionError(\n \"Error!\\n\\n{} {} Parameter with key {} is not valid!\".format(\n sensor_type, sensor_name, sensor_setting_key\n )\n )\n valid_sensor_setting_info = valid_sections[sensor_setting_key]\n if type(sensor_setting).__name__ != valid_sensor_setting_info[\"Type\"]:\n raise AssertionError(\n \"Error\\n\\n{} {} Parameter with key {} is an invalid type\\nValid Type: {}\\nYour Input: {}\".format(\n sensor_type,\n sensor_name,\n sensor_setting_key,\n type(sensor_setting).__name__,\n valid_sensor_setting_info[\"Type\"],\n )\n )\n\n if type(sensor_setting) == float or type(sensor_setting) == int:\n if (\n sensor_setting < valid_sensor_setting_info[\"Range\"][0]\n or sensor_setting > valid_sensor_setting_info[\"Range\"][1]\n ):\n raise AssertionError(\n \"Error!\\n\\n{} {} Parameter with key {} is not within the valid range\\nValid Range: {}\\nYour Input: {}\".format(\n sensor_type,\n sensor_name,\n sensor_setting_key,\n valid_sensor_setting_info[\"Range\"],\n sensor_setting,\n )\n )\n if type(sensor_setting) == str:\n if (\n valid_sensor_setting_info[\"ValidEntries\"] != [\"*\"]\n and sensor_setting not in valid_sensor_setting_info[\"ValidEntries\"]\n ):\n raise AssertionError(\n \"Error!\\n\\n{} {} Parameter with key {} is not within the valid range\\nValid Range: {}\\nYour Input: {}\".format(\n sensor_type,\n sensor_name,\n sensor_setting_key,\n valid_sensor_setting_info[\"ValidEntries\"],\n sensor_setting,\n )\n )\n\n def validate_sensors(\n self, sensor_settings_dict: dict, autopilot_type: str, agent_name: str\n ) -> None:\n \"\"\"\n Validate each sensor that has been added to the settings file\n\n ### Inputs:\n - sensor_settings [dict] The sensor settings for Agent i\n\n ### Outputs;\n - None\n \"\"\"\n valid_sensor_info = self._retrieve_valid_sensor_info()\n valid_sensor_types = [\n \"Cameras\",\n \"LiDAR\",\n \"IMU\",\n \"GPS\",\n \"Barometers\",\n \"AirSpeed\",\n \"Odometers\",\n \"Magnetometers\",\n \"Distance\",\n ]\n if autopilot_type == \"PX4\":\n listed_sensors = sensor_settings_dict.keys()\n if (\n not \"IMU\" in listed_sensors\n or not \"Magnetometers\" in listed_sensors\n or not \"Barometers\" in listed_sensors\n ):\n raise AssertionError(\n \"Error! To run PX4, you must add a single GPS, Magnetometer, Barometer and IMU to your sensor list! Please see the Examples folder for an example settings file!\"\n )\n for sensor_type, sensor_settings in sensor_settings_dict.items():\n if sensor_type not in valid_sensor_types:\n raise AssertionError(\n \"{} is not a supported sensor in SWARM. Please contact Codex Labs to request support for this sensor!\".format(\n sensor_type\n )\n )\n if sensor_type == \"Cameras\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 camera in this section!\"\n )\n for camera_name, camera_options in sensor_settings.items():\n self.validate_sensor_setting_parameters(\n camera_name,\n camera_options,\n valid_sensor_info[sensor_type],\n sensor_type,\n )\n # print(\"Validating camera {}\".format(camera_name))\n # valid_camera_sections = [\n # \"Enabled\",\n # \"PublishPose\",\n # \"X\",\n # \"Y\",\n # \"Z\",\n # \"Settings\",\n # \"Roll\",\n # \"Pitch\",\n # \"Yaw\",\n # ]\n # valid_camera_sections.sort()\n # sensor_settings_sections = list(camera_options.keys())\n # sensor_settings_sections.sort()\n # if sensor_settings_sections != valid_camera_sections:\n # # print(\"Warning! Default sections for camera {} are being set! Your Sections: {}\\nRequired Sections: {}\".format(camera_name, sensor_settings_sections, valid_camera_sections))\n # # diff_fields = [def_option for def_option, user_option in zip(valid_camera_sections, sensor_settings_sections) if (def_option not in user_option)]\n # # for field in diff_fields:\n # # settings_file[\"Agents\"][agent][\"Sensors\"][\"Cameras\"][camera_name][field] = CAMERA_SETTINGS_DEFAULTS[field]\n # # camera_options = settings_file[\"Agents\"][agent][\"Sensors\"][\"Cameras\"][camera_name]\n # raise AssertionError(\n # \"Error!\\n{} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n # camera_name,\n # sensor_settings_sections,\n # valid_camera_sections,\n # )\n # )\n # for sensor_setting_key, sensor_setting in camera_options.items():\n # if (\n # sensor_setting_key == \"X\"\n # or sensor_setting_key == \"Y\"\n # or sensor_setting_key == \"Z\"\n # ):\n # # TODO We need to define these bounds somewhere relative to the environment\n # if not isinstance(sensor_setting, float) or (\n # sensor_setting < -50.0 or sensor_setting > 50.0\n # ):\n # raise AssertionError(\n # \"{} for {} is not a float value within -50.0 and 50.0\".format(\n # sensor_setting_key, agent_name\n # )\n # )\n # if (\n # sensor_setting_key == \"Yaw\"\n # or sensor_setting_key == \"Pitch\"\n # or sensor_setting_key == \"Roll\"\n # ):\n # # TODO We need to define these bounds somewhere relative to the environment\n # if not isinstance(sensor_setting, float) or (\n # sensor_setting < -360.0 or sensor_setting > 360.0\n # ):\n # raise AssertionError(\n # \"{} for {} is not a float value within -360.0 and 360.0 degrees!\".format(\n # sensor_setting_key, agent_name\n # )\n # )\n # elif sensor_setting_key == \"Settings\":\n # valid_camera_setting_sections = [\n # \"ImageType\",\n # \"Width\",\n # \"Height\",\n # \"FOV_Degrees\",\n # \"FramesPerSecond\",\n # ]\n # user_camera_setting_sections = list(sensor_setting.keys())\n # valid_camera_setting_sections.sort()\n # user_camera_setting_sections.sort()\n # if (\n # user_camera_setting_sections\n # != valid_camera_setting_sections\n # ):\n # raise AssertionError(\n # \"Camera {} for agent {} settings are invalid.\\nYour sensor settings section: {}\\nValid sensor setting section: {}\".format(\n # camera_name,\n # agent_name,\n # user_camera_setting_sections,\n # valid_camera_setting_sections,\n # )\n # )\n # for (\n # camera_setting_key,\n # camera_setting,\n # ) in sensor_setting.items():\n # if not camera_setting_key == \"ImageType\":\n # if not isinstance(\n # camera_setting, float\n # ) and not isinstance(camera_setting, int):\n # raise AssertionError(\n # \"Camera {} for agent {} {} parameter must be of type Float or type Int!\\nYour input: {}\".format(\n # camera_name,\n # agent_name,\n # camera_setting_key,\n # type(camera_setting).__name__,\n # )\n # )\n # if camera_setting_key == \"Width\":\n # if (\n # float(camera_setting) > 1280.0\n # or float(camera_setting) < 640.0\n # ):\n # raise AssertionError(\n # \"Camera {} for agent {} width parameter must be between 640 and 1280!\\nYour input: {}\".format(\n # camera_name, agent_name, camera_setting\n # )\n # )\n # elif camera_setting_key == \"Height\":\n # if (\n # float(camera_setting) > 720.0\n # or float(camera_setting) < 480.0\n # ):\n # raise AssertionError(\n # \"Camera {} for agent {} width parameter must be between 640 and 1280!\\nYour input: {}\".format(\n # camera_name, agent_name, camera_setting\n # )\n # )\n # elif camera_setting_key == \"FOV_Degrees\":\n # if (\n # float(camera_setting) > 180.0\n # or float(camera_setting) < 10.0\n # ):\n # raise AssertionError(\n # \"Camera {} for agent {} FOV parameter must be between 10.0 and 180.0 Degrees!\\nYour input: {}\".format(\n # camera_name, agent_name, camera_setting\n # )\n # )\n # elif camera_setting_key == \"FramesPerSecond\":\n # if (\n # float(camera_setting) > 30.0\n # or float(camera_setting) < 1.0\n # ):\n # raise AssertionError(\n # \"Camera {} for agent {} Frames Per Second parameter must be between 1.0 and 30.0 Frames!\\nYour input: {}\".format(\n # camera_name, agent_name, camera_setting\n # )\n # )\n # elif camera_setting_key == \"ImageType\":\n # if not isinstance(camera_setting, str):\n # raise AssertionError(\n # \"Camera {} for agent {} {} parameter must be of type String!\\nYour input: {}\".format(\n # camera_name,\n # agent_name,\n # camera_setting_key,\n # type(camera_setting).__name__,\n # )\n # )\n # if camera_setting not in [\n # \"Scene\",\n # \"Segmentation\",\n # \"Depth\",\n # ]:\n # raise AssertionError(\n # \"Camera {} for agent {} {} parameter is not valid. Must be either Scene or Depth!\\nYour input: {}\".format(\n # camera_name,\n # agent_name,\n # camera_setting_key,\n # type(camera_setting).__name__,\n # )\n # )\n\n elif sensor_type == \"Barometers\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 Barometer in this section!\"\n )\n for baro_name, baro_options in sensor_settings.items():\n self.validate_sensor_setting_parameters(\n baro_name,\n baro_options,\n valid_sensor_info[sensor_type],\n sensor_type,\n )\n # print(\"Validating Barometer {}\".format(baro_name))\n # valid_baro_sections = [\"Enabled\", \"Method\", \"PublishingRate\"]\n # valid_baro_sections.sort()\n # sensor_settings_sections = list(baro_options.keys())\n # sensor_settings_sections.sort()\n # if sensor_settings_sections != valid_baro_sections:\n # raise AssertionError(\n # \"Error!\\n\\nBarometer Sensor {} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n # baro_name, sensor_settings_sections, valid_baro_sections\n # )\n # )\n # for sensor_setting_key, sensor_setting in baro_options.items():\n # if sensor_setting_key == \"Method\":\n # if not isinstance(\n # sensor_setting, str\n # ) or sensor_setting not in [\"Colosseum\"]:\n # raise AssertionError(\n # \"Error!\\n\\nBarometer {} parameter Method was invalid! The method must be a string\\nValid options are: Colosseum\"\n # )\n # if sensor_setting_key == \"Eabled\":\n # if not isinstance(sensor_setting, bool):\n # raise AssertionError(\n # \"Error!\\n\\nBarometer {} parameter Enabled was invalid! The method must be a boolean value!\"\n # )\n # if sensor_setting_key == \"PublishingRate\":\n # self.validate_publishing_Rate(\n # \"Barometer\", baro_name, sensor_setting, 1.0, 20.0\n # )\n elif sensor_type == \"Odometers\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 Odometer in this section!\"\n )\n for odom_name, odom_options in sensor_settings.items():\n print(\"Validating Odometer {}\".format(odom_name))\n valid_odom_sections = [\"Enabled\", \"Method\", \"PublishingRate\"]\n valid_odom_sections.sort()\n sensor_settings_sections = list(odom_options.keys())\n sensor_settings_sections.sort()\n if sensor_settings_sections != valid_odom_sections:\n raise AssertionError(\n \"Error!\\n\\nOdometer Sensor {} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n odom_name, sensor_settings_sections, valid_odom_sections\n )\n )\n for sensor_setting_key, sensor_setting in odom_options.items():\n if sensor_setting_key == \"Method\":\n if not isinstance(\n sensor_setting, str\n ) or sensor_setting not in [\"Colosseum\"]:\n raise AssertionError(\n \"Error!\\n\\nOdometer {} parameter Method was invalid! The method must be a string\\nValid options are: Colosseum\"\n )\n if sensor_setting_key == \"Eabled\":\n if not isinstance(sensor_setting, bool):\n raise AssertionError(\n \"Error!\\n\\nOdometer {} parameter Enabled was invalid! The method must be a boolean value!\"\n )\n if sensor_setting_key == \"PublishingRate\":\n self.validate_publishing_Rate(\n \"Odometer\", baro_name, sensor_setting, 1.0, 50.0\n )\n elif sensor_type == \"Magnetometers\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 Magnetometer in this section!\"\n )\n for mag_name, mag_options in sensor_settings.items():\n print(\"Validating GPS {}\".format(mag_name))\n valid_mag_sections = valid_sensor_info[sensor_type][\"Parameters\"]\n required_sections = valid_sensor_info[sensor_type][\n \"RequiredParameters\"\n ]\n met_requirements = {key: False for key in required_sections}\n for sensor_setting_key, sensor_setting in mag_options.items():\n if sensor_setting_key in required_sections:\n met_requirements[sensor_setting_key] = True\n self._validate_sensor_setting(\n mag_name,\n sensor_setting_key,\n valid_mag_sections,\n sensor_setting,\n sensor_type,\n )\n for key, status in met_requirements.items():\n if not status:\n raise AssertionError(\n \"Error!\\n\\nMagnetometer {} does not have all required parameters!\\nMissing parameter is {}\".format(\n mag_name, key\n )\n )\n\n # print(\"Validating Magnetometer {}\".format(\n # mag_name))\n\n # valid_mag_sections = [\n # \"Enabled\", \"Method\", \"PublishingRate\"]\n # valid_mag_sections.sort()\n # sensor_settings_sections = list(\n # mag_options.keys())\n # sensor_settings_sections.sort()\n # if sensor_settings_sections != valid_mag_sections:\n # raise AssertionError(\"Error!\\n\\nMagnetometer Sensor {} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n # mag_name, sensor_settings_sections, valid_mag_sections))\n # for sensor_setting_key, sensor_setting in mag_options.items():\n # if sensor_setting_key == \"Method\":\n # if not isinstance(sensor_setting, str) or sensor_setting not in [\"Colosseum\"]:\n # raise AssertionError(\n # \"Error!\\n\\nMagnetometer {} parameter Method was invalid! The method must be a string\\nValid options are: Colosseum\")\n # if sensor_setting_key == \"Eabled\":\n # if not isinstance(sensor_setting, bool):\n # raise AssertionError(\n # \"Error!\\n\\nMagnetometer {} parameter Enabled was invalid! The method must be a boolean value!\")\n # if sensor_setting_key == \"PublishingRate\":\n # self.validate_publishing_Rate(\n # \"Magnetometer\", mag_name, sensor_setting, 1.0, 20.0)\n elif sensor_type == \"GPS\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 GPS in this section!\"\n )\n for gps_name, gps_options in sensor_settings.items():\n print(\"Validating GPS {}\".format(gps_name))\n valid_gps_sections = valid_sensor_info[sensor_type][\"Parameters\"]\n required_sections = valid_sensor_info[sensor_type][\n \"RequiredParameters\"\n ]\n met_requirements = {key: False for key in required_sections}\n for sensor_setting_key, sensor_setting in gps_options.items():\n if sensor_setting_key in required_sections:\n met_requirements[sensor_setting_key] = True\n self._validate_sensor_setting(\n gps_name,\n sensor_setting_key,\n valid_gps_sections,\n sensor_setting,\n sensor_type,\n )\n for key, status in met_requirements.items():\n if not status:\n raise AssertionError(\n \"Error!\\n\\nGPS {} does not have all required parameters!\\nMissing parameter is {}\".format(\n gps_name, key\n )\n )\n elif sensor_type == \"AirSpeed\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 AirSpeed in this section!\"\n )\n for airspeed_name, airspeed_options in sensor_settings.items():\n print(\"Validating AirSpeed {}\".format(airspeed_name))\n valid_airspeed_sections = [\"Enabled\", \"Method\", \"PublishingRate\"]\n valid_airspeed_sections.sort()\n sensor_settings_sections = list(airspeed_options.keys())\n sensor_settings_sections.sort()\n if sensor_settings_sections != valid_airspeed_sections:\n raise AssertionError(\n \"Error!\\n\\nAirSpeed Sensor {} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n airspeed_name,\n sensor_settings_sections,\n valid_airspeed_sections,\n )\n )\n for sensor_setting_key, sensor_setting in airspeed_options.items():\n if sensor_setting_key == \"Method\":\n if not isinstance(\n sensor_setting, str\n ) or sensor_setting not in [\"Colosseum\"]:\n raise AssertionError(\n \"Error!\\n\\nAirSpeed {} parameter Method was invalid! The method must be a string\\nValid options are: Colosseum\"\n )\n if sensor_setting_key == \"Eabled\":\n if not isinstance(sensor_setting, bool):\n raise AssertionError(\n \"Error!\\n\\nAirSpeed {} parameter Enabled was invalid! The method must be a boolean value!\"\n )\n if sensor_setting_key == \"PublishingRate\":\n self.validate_publishing_Rate(\n \"AirSpeed\", airspeed_name, sensor_setting, 1.0, 20.0\n )\n elif sensor_type == \"Distance\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 Distance Sensor in this section!\"\n )\n for dist_name, dist_options in sensor_settings.items():\n self.validate_sensor_setting_parameters(\n dist_name,\n dist_options,\n valid_sensor_info[sensor_type],\n sensor_type,\n )\n # print(\"Validating Distance Sensor {}\".format(\n # dist_name))\n\n # valid_dist_sections = [\n # \"Enabled\", \"Method\", \"X\", \"Y\", \"Z\", \"Roll\", \"Pitch\", \"Yaw\", \"PublishingRate\", \"MinDistance\", \"MaxDistance\"]\n # valid_dist_sections.sort()\n # sensor_settings_sections = list(\n # dist_options.keys())\n # sensor_settings_sections.sort()\n # if sensor_settings_sections != valid_dist_sections:\n # raise AssertionError(\"Error!\\n\\nDistance Sensor {} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n # dist_name, sensor_settings_sections, valid_dist_sections))\n # for sensor_setting_key, sensor_setting in dist_options.items():\n # if sensor_setting_key == \"X\" or sensor_setting_key == \"Y\" or sensor_setting_key == \"Z\":\n # # TODO We need to define these bounds somewhere relative to the environment\n # if not isinstance(sensor_setting, float) or (sensor_setting < -50.0 or sensor_setting > 50.0):\n # raise AssertionError(\n # \"{} for {} is not a float value within -50.0 and 50.0\".format(sensor_setting_key, agent_name))\n # if sensor_setting_key == \"Yaw\" or sensor_setting_key == \"Pitch\" or sensor_setting_key == \"Roll\":\n # # TODO We need to define these bounds somewhere relative to the environment\n # if not isinstance(sensor_setting, float) or (sensor_setting < -360.0 or sensor_setting > 360.0):\n # raise AssertionError(\n # \"{} for {} is not a float value within -360.0 and 360.0 degrees!\".format(sensor_setting_key, agent_name))\n # if sensor_setting_key == \"MinDistance\":\n # if not isinstance(sensor_setting, float) or (sensor_setting < 0.2 or sensor_setting >= dist_options[\"MaxDistance\"]):\n # raise AssertionError(\n # \"Error!\\n\\n{} for {} is not a float value within 0.2 and {} meters\".format(sensor_setting_key, agent_name, dist_options[\"MaxDistance\"]))\n # if sensor_setting_key == \"MaxDistance\":\n # if not isinstance(sensor_setting, float) or (sensor_setting > 1000.0 or sensor_setting <= dist_options[\"MinDistance\"]):\n # raise AssertionError(\n # \"Error!\\n\\n{} for {} is not a float value within {} and 1000.0 meters\".format(sensor_setting_key, agent_name, dist_options[\"MinDistance\"]))\n # if sensor_setting_key == \"PublishingRate\":\n # self.validate_publishing_Rate(\n # \"Distance Sesnor\", dist_name, sensor_setting, 1.0, 20.0)\n elif sensor_type == \"IMU\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 IMU in this section!\"\n )\n for imu_name, imu_options in sensor_settings.items():\n print(\"Validating IMU {}\".format(imu_name))\n valid_imu_sections = valid_sensor_info[sensor_type][\"Parameters\"]\n required_sections = valid_sensor_info[sensor_type][\n \"RequiredParameters\"\n ]\n met_requirements = {key: False for key in required_sections}\n for sensor_setting_key, sensor_setting in imu_options.items():\n if sensor_setting_key in required_sections:\n met_requirements[sensor_setting_key] = True\n self._validate_sensor_setting(\n imu_name,\n sensor_setting_key,\n valid_imu_sections,\n sensor_setting,\n sensor_type,\n )\n for key, status in met_requirements.items():\n if not status:\n raise AssertionError(\n \"Error!\\n\\nIMU {} does not have all required parameters!\\nMissing parameter is {}\".format(\n gps_name, key\n )\n )\n elif sensor_type == \"LiDAR\":\n if len(sensor_settings.keys()) < 1:\n raise AssertionError(\n \"You must have at least 1 LiDAR in this section!\"\n )\n for lidar_name, lidar_options in sensor_settings.items():\n self.validate_sensor_setting_parameters(\n lidar_name,\n lidar_options,\n valid_sensor_info[sensor_type],\n sensor_type,\n )\n\n # valid_lidar_sections = [\n # \"Enabled\",\n # \"Method\",\n # \"X\",\n # \"Y\",\n # \"Z\",\n # \"Roll\",\n # \"Pitch\",\n # \"Yaw\",\n # \"PublishingRate\",\n # \"Hardware\",\n # ]\n # valid_lidar_sections.sort()\n # sensor_settings_sections = list(lidar_options.keys())\n # sensor_settings_sections.sort()\n # if sensor_settings_sections != valid_lidar_sections:\n # raise AssertionError(\n # \"LiDAR {} has invalid settings.\\nYour Sections: {}\\nRequired Sections: {}\".format(\n # lidar_name,\n # sensor_settings_sections,\n # valid_lidar_sections,\n # )\n # )\n # for sensor_setting_key, sensor_setting in lidar_options.items():\n\n # if (\n # sensor_setting_key == \"X\"\n # or sensor_setting_key == \"Y\"\n # or sensor_setting_key == \"Z\"\n # ):\n # # TODO We need to define these bounds somewhere relative to the environment\n # if not isinstance(sensor_setting, float) or (\n # sensor_setting < -50.0 or sensor_setting > 50.0\n # ):\n # raise AssertionError(\n # \"{} for {} is not a float value within -50.0 and 50.0\".format(\n # sensor_setting_key, agent_name\n # )\n # )\n # if (\n # sensor_setting_key == \"Yaw\"\n # or sensor_setting_key == \"Pitch\"\n # or sensor_setting_key == \"Roll\"\n # ):\n # # TODO We need to define these bounds somewhere relative to the environment\n # if not isinstance(sensor_setting, float) or (\n # sensor_setting < -360.0 or sensor_setting > 360.0\n # ):\n # raise AssertionError(\n # \"{} for {} is not a float value within -360.0 and 360.0 degrees!\".format(\n # sensor_setting_key, agent_name\n # )\n # )\n # if sensor_setting_key == \"PublishingRate\":\n # self.validate_publishing_Rate(\n # \"LiDAR\", lidar_name, sensor_setting, 1.0, 30.0\n # )\n # elif sensor_setting_key == \"Settings\":\n # valid_lidar_setting_sections = [\n # \"Range\",\n # \"NumberOfChannels\",\n # \"RotationsPerSecond\",\n # \"PointsPerSecond\",\n # \"VerticalFOVUpper\",\n # \"VerticalFOVLower\",\n # \"HorizontalFOVStart\",\n # \"HorizontalFOVEnd\",\n # \"DataFrame\",\n # ]\n # for (\n # lidar_setting_key,\n # lidar_setting,\n # ) in sensor_setting.items():\n # if (\n # lidar_setting_key\n # not in valid_lidar_setting_sections\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} settings are invalid.\\nYour sensor settings section: {}\\nValid sensor setting section: {}\".format(\n # lidar_name,\n # agent_name,\n # lidar_setting_key,\n # valid_lidar_setting_sections,\n # )\n # )\n # if lidar_setting_key != \"DataFrame\":\n # if not isinstance(\n # lidar_setting, float\n # ) and not isinstance(lidar_setting, int):\n # raise AssertionError(\n # \"LiDAR {} for agent {} {} parameter must be of type Float or type Int!\\nYour input: {}\".format(\n # lidar_name,\n # agent_name,\n # lidar_setting_key,\n # type(lidar_setting).__name__,\n # )\n # )\n # if lidar_setting_key == \"Range\":\n # if (\n # float(lidar_setting) > 250.0\n # or float(lidar_setting) < 0.2\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} Range must be between 0.2 and 250.0 meters!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"NumberOfChannels\":\n # if lidar_setting > 6 or lidar_setting < 32:\n # raise AssertionError(\n # \"LiDAR {} for agent {} Number of Channels parameter must be between 6 and 20 channels!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"RotationsPerSecond\":\n # if (\n # int(lidar_setting) > 5\n # or int(lidar_setting) < 20\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} Rotations Per Second parameter must be between 5 and 20 Rotations!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"PointsPerSecond\":\n # if (\n # float(lidar_setting) > 1000000.0\n # or float(lidar_setting) < 10000.0\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} Points Per Second parameter must be between 10,000.0 and 1,000,000.0 Points per second!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"VerticalFOVUpper\":\n # if (\n # float(lidar_setting) > 90.0\n # or float(lidar_setting) < -85.0\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} VerticalFOVUpper parameter must be between -85.0 and 90.0 degrees!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"VerticalFOVLower\":\n # if (\n # float(lidar_setting) < -90.0\n # or float(lidar_setting) > 85.0\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} VerticalFOVLower parameter must be between 85.0 and -90.0 degrees!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"HorizontalFOVStart\":\n # if (\n # float(lidar_setting) > -5.0\n # or float(lidar_setting) < -60.0\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} VerticalFOVUpper parameter must be between -5.0 and -60.0 degrees!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n # elif lidar_setting_key == \"HorizontalFOVEnd\":\n # if (\n # float(lidar_setting) > 60.0\n # or float(lidar_setting) < 5.0\n # ):\n # raise AssertionError(\n # \"LiDAR {} for agent {} VerticalFOVUpper parameter must be between 5.0 and 60.0 degrees!\\nYour input: {}\".format(\n # lidar_name, agent_name, lidar_setting\n # )\n # )\n\n def validate_publishing_Rate(\n self,\n sensor_type: str,\n sensor_name: str,\n sensor_setting: float,\n min: float,\n max: float,\n ) -> None:\n \"\"\"\n Validate the publishing rate paratmeter to ensure it is a float\n within the specified range.\n \"\"\"\n if not isinstance(sensor_setting, float):\n raise AssertionError(\n \"Error!\\n\\n{} {} parameter Publishing Rate was invalid! The Rate must be a float value between {} and {}!\\n Your Input Type: {}\".format(\n sensor_type, sensor_name, max, min, type(sensor_setting).__name__\n )\n )\n\n if sensor_setting > max or sensor_setting < min:\n raise AssertionError(\n \"Error!\\n\\n{} {} parameter Publishing Rate was invalid! The Rate must be a float value between {} and {}!\\n Your Input: {}\".format(\n sensor_type, sensor_name, max, min, sensor_setting\n )\n )\n\n def validate_sensor_setting_parameters(\n self,\n sensor_name: str,\n sensor_settings: dict,\n valid_sensor_info: str,\n sensor_type: str,\n ) -> None:\n \"\"\"\n Validate the parameters submitted by the GUI application\n \"\"\"\n\n print(\"Validating {} {}\".format(sensor_type, sensor_name))\n valid_sections = valid_sensor_info[\"Parameters\"]\n required_sections = valid_sensor_info[\"RequiredParameters\"]\n met_requirements = {key: False for key in required_sections}\n for sensor_setting_key, sensor_setting in sensor_settings.items():\n if sensor_setting_key in required_sections:\n met_requirements[sensor_setting_key] = True\n self._validate_sensor_setting(\n sensor_name,\n sensor_setting_key,\n valid_sections,\n sensor_setting,\n sensor_type,\n )\n\n for key, status in met_requirements.items():\n if not status:\n raise AssertionError(\n \"Error!\\n\\n{} {} does not have all required parameters!\\nMissing parameter is {}\".format(\n sensor_type, sensor_name, key\n )\n )\n\n def validate_camera_stream_settings(\n self, camera_name: str, sensors: dict, module_name: str\n ) -> bool:\n \"\"\"\n Validate that the User has correctly set the appropriate camera\n stream information, by inputting a camera name that exists\n \"\"\"\n if \"Cameras\" not in sensors.keys():\n raise AssertionError(\n \"Error!\\n\\n Module {} subscriptions settings is invalid. Cameras has not been added as a section to the Sensors list!!\\nPlease add a Cameras section to the Sensors system.\".format(\n module_name, camera_name\n )\n )\n if camera_name not in sensors[\"Cameras\"].keys():\n raise AssertionError(\n \"Error!\\n\\n Module {} subscriptions settings is invalid. {} has not been listed as a Camera in the Cameras section!!\\nPlease add a Camera with this name\".format(\n module_name, camera_name\n )\n )\n\n return True\n\n def _validate_camera_subscription(\n self, camera_name: str, module_name: str, module_parameters: dict\n ) -> bool:\n \"\"\"\n Validate whether the user has added an image subscription for\n the camera that was selected in the module.\n\n ### Inputs:\n - camera_name [str] The unique name of the camera.\n - module_name [str] The name of the SW Module being processed\n - module_parameters [dict] The module paramters\n\n ### Outputs:\n - A flag that the camera has been subscribed to\n \"\"\"\n found_subscription = False\n for subscription in module_parameters[\"Subscribes\"]:\n if type(subscription) == dict:\n if \"Image\" in subscription.keys():\n if camera_name == subscription[\"Image\"]:\n found_subscription = True\n\n if not found_subscription:\n raise AssertionError(\n \"Error!\\n\\n Module {} has requested to use an image from a Camera in the algorithm, but not subscription to the Camera has been provided!\\nPlease add the following to your Subscribes section: Image: {}\".format(\n module_name, camera_name\n )\n )\n\n def validate_environment_options(self, env_options: dict) -> None:\n \"\"\"\n Validate the options provided by the environment.\n\n ### Inputs:\n - env_options [dict] The options from the Environment section\n of the simulation settings file.\n\n ### Returns:\n - Flag determining whether the validation was completed or not\n \"\"\"\n # Check if the User has provided options for the Environment\n if \"Options\" in env_options.keys():\n # Iterate through the options, validating that the option is valid\n # for the environment specified.\n\n # Extract the name from the options. This is valid since we have\n # checked that a name was provided before calling this method\n env_name = env_options[\"Name\"]\n for option_name, option in env_options[\"Options\"].items():\n self._validate_option_in_env_supported(option_name, option, env_name)\n\n def _validate_option_in_env_supported(\n self, option_name: str, option: str, env_name: str\n ) -> None:\n \"\"\"\n Validate that the option provided is a valid option to select\n and has a valid input provdied.\n\n ### Inputs:\n - option_name [str] The name of the Option to select\n - option [str] The user selected option\n - env_name [str] The name of the environment that was selected\n\n ### Outputs:\n - A flag describing whether the option is validated or not\n \"\"\"\n valid_options = self._get_supported_environments()[\"Environments\"][env_name][\n \"Options\"\n ]\n\n if len(valid_options) == 0:\n raise AssertionError(\n \"Error\\n\"\n + \"Environment {} has no available options!\\n\".format(env_name)\n + \"Please remove any options from the 'Options' section\"\n )\n\n if option_name not in valid_options.keys():\n raise AssertionError(\n \"Error\\n\"\n + \"The option {} with option {} is not valid!\\n\".format(\n option_name, option\n )\n + \"The valid options are: {}\".format(valid_options)\n )\n\n # Provides a list of acceptable inputs from the User\n valid_option_entries = valid_options[option_name][\"ValidEntries\"]\n\n if option not in valid_option_entries:\n raise AssertionError(\n \"Error\\n\"\n + \"The option {} with option {} is not valid!\\n\".format(\n option_name, option\n )\n + \"The valid option entries are: {} \\n\".format(valid_option_entries)\n + \"The default value is {}\\n\".format(\n valid_options[option_name][\"DefaultValue\"]\n )\n + \"Option Description: {}\".format(\n valid_options[option_name][\"Description\"]\n )\n )\n\n def validate_software_modules(\n self, modules: dict, agent_name: str, sensors: dict\n ) -> bool:\n \"\"\"\n Validate the individual software modules for a specific agent.\n Ensure that each selected parameter is valid and that the\n version of SWARM the user has is valid as well!\n\n Any assertion we make gets caught by the try, except in the\n main function that calls this.\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/\" + \"SWARMRDS/core/SupportedSoftwareModules.json\"\n else:\n file_path = find_file_path(\n \"SupportedSoftwareModules.json\", \"SWARMRDS/core\"\n )\n with open(file_path, \"r\") as file:\n supported_modules = json.load(file)\n supported_modules = supported_modules[\"SupportedModules\"]\n no_algo_modules = supported_modules[\"ValidNoAlgorithmModules\"]\n\n print(\"Modules allowed without Algorithms: {}\".format(no_algo_modules))\n\n for module_name, settings in modules.items():\n if module_name not in supported_modules.keys():\n raise AssertionError(\n \"{} module for agent {} is not supported!\\nValid modules are {}\".format(\n module_name, agent_name, supported_modules.keys()\n )\n )\n\n # If the Module contains algorithm parameters\n if module_name not in no_algo_modules:\n if \"Algorithm\" not in settings.keys():\n raise AssertionError(\n \"Error!\\n\\nYou must provide an Algorithm section for {} module!\".format(\n module_name\n )\n )\n if \"Level\" not in settings[\"Algorithm\"].keys():\n raise AssertionError(\n \"Error!\\n\\nYou must provide an Level section for {} module and provide an integer number between 1 and 3!\".format(\n module_name\n )\n )\n algo_level = settings[\"Algorithm\"][\"Level\"]\n if not isinstance(algo_level, int) or not (algo_level in [1, 2, 3]):\n raise AssertionError(\n \"Level parameter for {} is invalid.\\nValid options are {}\\n Your Input: {}\".format(\n module_name, [1, 2, 3], algo_level\n )\n )\n if algo_level == 3:\n print(\"Processing Custom User Algorithm. Continuing...\")\n continue\n valid_class_names = supported_modules[module_name][\"ValidClassNames\"]\n class_name = settings[\"Algorithm\"][\"ClassName\"]\n if class_name not in valid_class_names:\n raise AssertionError(\n \"Class name {} for {} is invalid.\\nValid options are {}\".format(\n class_name, module_name, valid_class_names\n )\n )\n for setting_name, setting in settings.items():\n if setting_name not in supported_modules[\"ValidModuleParameters\"]:\n raise AssertionError(\n \"Invalid Module paramter in {}!\\n Valid options are {}.\".format(\n module_name, supported_modules[\"ValidModuleParameters\"]\n )\n )\n if setting_name == \"Algorithm\":\n for algo_setting_name, algo_setting in setting.items():\n if algo_setting_name == \"States\":\n pass\n elif algo_setting_name == \"Parameters\":\n valid_params = supported_modules[module_name][\n \"ValidParameters\"\n ][class_name]\n for param_name, value in algo_setting.items():\n if not param_name in valid_params.keys():\n raise AssertionError(\n \"Parameter {} for {} is invalid.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params.keys(),\n param_name,\n )\n )\n\n if (\n not type(value).__name__\n == valid_params[param_name][\"type\"]\n ):\n raise AssertionError(\n \"Parameter {} for {} is an invalid type.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"type\"],\n type(value).__name__,\n )\n )\n if type(value).__name__ == \"str\":\n # TODO Don't hardcode these values\n if param_name == \"output_type\":\n # If we are using a remote server, we don't have access to visuals\n if not self._local:\n valid_params[param_name][\n \"valid_entries\"\n ] = [\"images\", \"video\"]\n # If the user is going to be using a Camera\n # image, they need to have a camera subscription set up in the module\n if param_name == \"camera_name\":\n self.validate_camera_stream_settings(\n value, sensors, module_name\n )\n self._validate_camera_subscription(\n value, module_name, settings\n )\n if (\n len(valid_params[param_name][\"valid_entries\"])\n > 0\n and valid_params[param_name][\"valid_entries\"][0]\n == \"*\"\n ):\n continue\n if (\n not value\n in valid_params[param_name][\"valid_entries\"]\n ):\n raise AssertionError(\n \"\\nError:\\nParameter {} for {} is not a valid entry.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\n \"valid_entries\"\n ],\n value,\n )\n )\n if type(value).__name__ == \"list\":\n if len(value) != valid_params[param_name][\"length\"]:\n raise AssertionError(\n \"Parameter {} for module {} has too many elements!.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"length\"],\n len(value),\n )\n )\n for item in value:\n if (\n not type(item).__name__\n == valid_params[param_name][\n \"field_data_type\"\n ]\n ):\n raise AssertionError(\n \"Key {} for Parameter {} for {} is an invalid type.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n param_name,\n module_name,\n valid_params[param_name][\n \"field_data_type\"\n ],\n type(value).__name__,\n )\n )\n if (\n type(item).__name__ == \"float\"\n or type(item).__name__ == \"int\"\n ):\n if (\n item\n < valid_params[param_name][\n \"field_range\"\n ][0]\n or item\n > valid_params[param_name][\n \"field_range\"\n ][1]\n ):\n raise AssertionError(\n \"Parameter {} for {} is not in a valid range.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\n \"field_range\"\n ],\n value,\n )\n )\n if type(value).__name__ == \"dict\":\n for key, item in value.items():\n if (\n len(\n valid_params[param_name][\"valid_fields\"]\n )\n > 0\n and valid_params[param_name][\n \"valid_fields\"\n ][-1]\n == \"*\"\n ):\n continue\n if (\n key\n not in valid_params[param_name][\n \"valid_fields\"\n ]\n ):\n raise AssertionError(\n \"Key {} for Parameter {} is invalid.\\nValid options are {}\\nYour Input: {}\".format(\n key,\n param_name,\n valid_params[param_name][\n \"valid_fields\"\n ],\n item,\n )\n )\n if (\n not type(item).__name__\n == valid_params[param_name][\n \"field_data_type\"\n ]\n ):\n raise AssertionError(\n \"Key {} for Parameter {} for {} is an invalid type.\\nValid options are {}\\nYour Input: {}\".format(\n key,\n param_name,\n module_name,\n valid_params[param_name][\n \"field_data_type\"\n ],\n type(value).__name__,\n )\n )\n if (\n type(item).__name__ == \"float\"\n or type(item).__name__ == \"int\"\n ):\n if (\n item\n < valid_params[param_name][\n \"field_range\"\n ][0]\n or item\n > valid_params[param_name][\n \"field_range\"\n ][1]\n ):\n raise AssertionError(\n \"Parameter {} for {} is not in a valid range.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\n \"field_range\"\n ],\n value,\n )\n )\n if (\n type(value).__name__ == \"float\"\n or type(value).__name__ == \"int\"\n ):\n if (\n value < valid_params[param_name][\"range\"][0]\n or value > valid_params[param_name][\"range\"][1]\n ):\n raise AssertionError(\n \"Parameter {} for {} is not in a valid range.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"range\"],\n value,\n )\n )\n elif algo_setting_name == \"InputArgs\":\n valid_args = supported_modules[module_name][\n \"ValidInputArgs\"\n ][class_name]\n for value in algo_setting:\n if not value in valid_args:\n raise AssertionError(\n \"Input Arg {} for {} is invalid.\\nValid options are {}\".format(\n value, module_name, valid_args\n )\n )\n elif algo_setting_name == \"ReturnValues\":\n valid_return_values = supported_modules[module_name][\n \"ValidReturnValues\"\n ][class_name]\n for return_value in algo_setting:\n if not return_value in valid_return_values:\n raise AssertionError(\n \"Return Value {} for {} is invalid.\\nValid options are {}\\nYour Input: {}\".format(\n return_value,\n module_name,\n valid_return_values,\n value,\n )\n )\n if setting_name == \"Publishes\" or setting_name == \"Subscribes\":\n valid_messages = supported_modules[\"ValidMessageTypes\"]\n for message in setting:\n if isinstance(message, str):\n if message not in valid_messages:\n raise AssertionError(\n \"\\nError!\\nInvalid message to publish of type {}\\nSupported Messages are {}\\n\".format(\n message, valid_messages\n )\n )\n elif isinstance(message, dict):\n if \"Image\" in message.keys():\n camera_name = message[\"Image\"]\n self.validate_camera_stream_settings(\n camera_name, sensors, module_name\n )\n if setting_name == \"Parameters\":\n valid_params = supported_modules[module_name][\n \"ValidModuleParameters\"\n ]\n for param_name, value in setting.items():\n if not param_name in valid_params.keys():\n raise AssertionError(\n \"Parameter {} for {} is invalid.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n list(valid_params.keys()),\n value,\n )\n )\n if not type(value).__name__ == valid_params[param_name][\"type\"]:\n raise AssertionError(\n \"Parameter {} for {} is an invalid type.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"type\"],\n type(value).__name__,\n )\n )\n if type(value).__name__ == \"list\":\n if len(value) != valid_params[param_name][\"length\"]:\n raise AssertionError(\n \"Parameter {} for module {} has too many elements!.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"length\"],\n len(value),\n )\n )\n for item in value:\n if (\n not type(item).__name__\n == valid_params[param_name][\"field_data_type\"]\n ):\n raise AssertionError(\n \"Key {} for Parameter {} for {} is an invalid type.\\nValid options are {}\\nYour Input: {}\".format(\n key,\n param_name,\n module_name,\n valid_params[param_name][\"field_data_type\"],\n type(value).__name__,\n )\n )\n if (\n type(item).__name__ == \"float\"\n or type(item).__name__ == \"int\"\n ):\n if (\n item\n < valid_params[param_name][\"field_range\"][0]\n or item\n > valid_params[param_name][\"field_range\"][1]\n ):\n raise AssertionError(\n \"Parameter {} for {} is not in a valid range.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"field_range\"],\n value,\n )\n )\n if type(value).__name__ == \"dict\":\n for key, item in value.items():\n if valid_params[param_name][\"field_data_type\"] == \"*\":\n continue\n if key not in valid_params[param_name][\"valid_fields\"]:\n raise AssertionError(\n \"Key {} for Parameter {} is an invalid.\\nValid options are {}\\nYour Input: {}\".format(\n key,\n param_name,\n module_name,\n valid_params[param_name][\"valid_fields\"],\n key,\n )\n )\n if (\n not type(item).__name__\n == valid_params[param_name][\"field_data_type\"]\n ):\n raise AssertionError(\n \"Key {} for Parameter {} for {} is an invalid type.\\nValid options are {}\\nYour Input: {}\".format(\n key,\n param_name,\n module_name,\n valid_params[param_name][\"field_data_type\"],\n type(value).__name__,\n )\n )\n if (\n type(item).__name__ == \"float\"\n or type(item).__name__ == \"int\"\n ):\n if (\n item\n < valid_params[param_name][\"field_range\"][0]\n or item\n > valid_params[param_name][\"field_range\"][1]\n ):\n raise AssertionError(\n \"Parameter {} for {} is not in a valid range.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"field_range\"],\n value,\n )\n )\n if (\n type(value).__name__ == \"float\"\n or type(value).__name__ == \"int\"\n ):\n if (\n value < valid_params[param_name][\"range\"][0]\n or value > valid_params[param_name][\"range\"][1]\n ):\n raise AssertionError(\n \"Parameter {} for {} is not in a valid range.\\nValid options are {}\\nYour Input: {}\".format(\n param_name,\n module_name,\n valid_params[param_name][\"range\"],\n value,\n )\n )\n\n return True\n\n def validate_multi_level_trajectory_file(self, trajectory: dict) -> bool:\n \"\"\"\n Iteration through multi-level trajectory files.\n\n ### Inputs:\n - trajectory [dict] - list of trajectories\n\n ### Outputs:\n - trajectory_valid [bool] - True if the trajectory is valid, False otherwise\n \"\"\"\n # We always want to keep the trajectory definition the same, as this\n # will confuse the user if they have to put different names for what\n # is basically the same system.\n\n # Always access the Trajectory via Trajectory, especially for Core\n trajectory_valid = True\n trajectory = trajectory[\"Trajectory\"]\n if isinstance(trajectory, list):\n trajectory_valid = self.validate_trajectory_file(trajectory)\n else:\n # trajectory = trajectory[\"Trajectories\"]\n for level in trajectory.keys():\n trajectory_valid = self.validate_trajectory_file(trajectory[level])\n if not trajectory_valid:\n break\n\n return trajectory_valid\n\n def validate_trajectory_file(self, trajectory: dict) -> bool:\n \"\"\"\n Validate the given trajectory file is valid to be run on the\n SWARM Simulation Platform.\n\n ### Inputs:\n - trajectory [dict] The trajectory to follow, given as a list\n of points in the NED coordinate frame with\n\n ### Outputs:\n - A boolean value saying whether the trajectory is valid or not\n \"\"\"\n try:\n print(\"Validating the Trajectory!\")\n # trajectory = trajectory[next(iter(trajectory))]\n if len(trajectory) == 0:\n raise AssertionError(\n \"Error! Your trajectory must contain at least 1 point!\"\n )\n valid_point_fields = [\"X\", \"Y\", \"Z\", \"Heading\", \"Speed\"]\n for i, point in enumerate(trajectory):\n print(\n \"Validing point {} of {} of the trajectory!\".format(\n i, len(trajectory)\n )\n )\n if not isinstance(point, dict):\n raise AssertionError(\n \"Error! The point definition must be a dictionary!\\nYour Input: {}\".format(\n type(point).__name__\n )\n )\n for field_name, value in point.items():\n if not isinstance(value, float):\n raise AssertionError(\n \"Type {} for field name {} is an invalid type! You must input a float value!\".format(\n type(value).__name__, field_name\n )\n )\n if field_name not in valid_point_fields:\n raise AssertionError(\n \"The provided field name for the point is invalid!\"\n )\n if field_name == \"X\" or field_name == \"Y\":\n if value > 1000.0 or value < -1000.0:\n raise AssertionError(\n \"{} value is invalid. Valid range is [-1000.0, 1000.0].\".format(\n field_name\n )\n )\n elif field_name == \"Z\":\n if value > 0.5:\n print(\n \"WARNING! You have input a Z value that is greater the 0.5, which is below the starting point of the agent (ie. in the ground). Giving these values should only be done if you know the agent will not hit the ground and a negative value should be given for 'positive' altitude. \"\n )\n if value > 1000.0 or value < -1000.0:\n raise AssertionError(\n \"{} value is invalid. Valid range is [-1000.0, 1000.0].\".format(\n field_name\n )\n )\n elif field_name == \"Heading\":\n if value < -360.0 or value > 360.0:\n print(\n \"WARNING! You input a heading value greater then 360 or less than -360. This will be truncated to a proper value!\"\n )\n elif field_name == \"Speed\":\n if value < 0.0 or value > 20.0:\n raise AssertionError(\n \"Speed value is invalid! Valid range is 0.0 to 20.0 meters per second!\"\n )\n return True\n except AssertionError as error:\n print(error)\n return False\n except Exception:\n traceback.print_exc()\n return False\n\n def validate_level_is_supported(self, env_name: str, level_name: str) -> bool:\n \"\"\"\n Validate that the level selected in the environment is valid.\n We always validate the environment name before we validate\n the level name, so we can say envs[env_name][\"Levels\"] easily\n enough.\n\n ### Inputs:\n - env_name [str] The name of the environment to run\n - level_name [str] The name of the level inside of the environment to run\n\n ### Outputs:\n - A boolean determining if this level is supported or not\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/SupportedEnvironments.json\"\n else:\n file_path = find_file_path(\"SupportedEnvironments.json\", \"settings\")\n with open(file_path, \"r\") as file:\n envs = json.load(file)\n try:\n levels = envs[\"Environments\"][env_name][\"Levels\"]\n assert level_name in levels\n return True\n except AssertionError:\n print(\n \"Error! Level {} does not exist!\\nSupported Levels are: {}\".format(\n level_name, levels\n )\n )\n except Exception:\n traceback.print_exc()\n return False\n\n def _determine_local_simulation(self, ip_address: str) -> None:\n \"\"\"\n Determine if we are accessing the SWARM System locally or from\n a remote server. If we aren't on a local version, specific\n functionality will not be turned on.\n\n ### Inputs:\n - ip_address [str] The IPv4 address of the server\n\n ### Outputs:\n - Sets the local flag\n \"\"\"\n self._local = True\n if ip_address != \"127.0.0.1\":\n print(\n \"Utilizing a remote SWARM Server. Local functionality has been turned off!\"\n )\n self._local = False\n\n # =========================================================================\n # Core Message Functions\n # =========================================================================\n\n def run_simulation(\n self, map_name: str, sim_name: str, ip_address: str, folder: str = \"settings\"\n ) -> bool:\n \"\"\"\n Run a Simulation, waiting for the server to tell us when it has\n been completed.\n\n TODO This should be an Async function that allows the user\n to do other things.\n\n ### Inputs:\n - map_name [str] The map to run\n - sim_name [str] The name of the sim to run\n\n ### Outputs:\n - Returns an indicator of whether the simulation has been\n completed\n \"\"\"\n try:\n # Only connect once we are ready to send a command\n self._determine_local_simulation(ip_address)\n\n settings, trajectory = self.retrieve_sim_package(sim_name, folder=folder)\n\n if not isinstance(settings, str):\n assert AssertionError(\n \"Error!\\n\\n Settings file has been provided in the wrong data format!\"\n )\n\n settings_valid = self.validate_settings_file(json.loads(settings))\n if not settings_valid:\n statement = \"Simulation Run Failed.\\nReason: Settings file invalid! Please see the settings folder!\"\n if self._response_queue is not None:\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": statement})\n print(statement)\n return False\n\n # TODO Set up a list of scenarios that require a trajectory\n if self._has_trajectory:\n trajectory_valid = self.validate_multi_level_trajectory_file(\n json.loads(trajectory)\n )\n\n if not trajectory_valid:\n statement = \"Simulation Run Failed.\\nReason: Trajectory file invalid! Please see the settings folder!\"\n if self._response_queue is not None:\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": statement})\n print(statement)\n return False\n else:\n print(\"Trajectory Valid!\")\n if self._response_queue is not None:\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": \"Trajectory Valid!\"})\n\n user_code = self.client.load_user_code(json.loads(settings))\n\n # Always add the IP address so the server knows if we are\n # local or not\n vehicle_profiles = self._generate_vehicle_profile_list(settings)\n\n # Always add the IP address so the server knows if we are\n # local or not\n message = {\n \"Command\": \"Run Simulation\",\n \"Settings\": settings,\n \"Trajectory\": trajectory,\n \"UserCode\": user_code,\n \"Sim_name\": sim_name,\n \"Map_name\": map_name,\n \"IPAddress\": ip_address,\n \"VehicleProfiles\": vehicle_profiles\n }\n\n connected = self.client.connect()\n\n if not connected:\n statement = \"Simulation Run Failed.\\nReason: Could not connect to the server!\"\n if self._response_queue is not None:\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": statement})\n print(statement)\n return False\n # Give the client time to establish the connection\n time.sleep(2.0)\n rcvd_msg = self.client.send_simulation_execution_package(message)\n if \"Error\" in rcvd_msg.keys():\n print(\"There was an error!\")\n print(rcvd_msg[\"Error\"])\n return False\n else:\n self.update_submission_list(rcvd_msg, folder=folder)\n if self._response_queue is not None:\n if \"Status\" in rcvd_msg.keys():\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": rcvd_msg[\"Status\"]})\n return rcvd_msg[\"Status\"] == \"Completed\" or rcvd_msg[\"Status\"] == \"Client ended simulation!\"\n except AssertionError:\n print(\"Simulation could not be completed!\")\n return False\n except Exception:\n traceback.print_exc()\n\n def _generate_vehicle_profile_list(self, settings: str) -> dict:\n \"\"\"\n Generate the VehicleProfiles structure to pass the appropriate\n vehicle profiles to the server.\n\n ### Inputs:\n - settings [str] The settings file\n\n ### Outputs:\n - Returns the VehicleProfiles structure\n \"\"\"\n settings = json.loads(settings)\n supported_profiles = self._get_supported_vehicle_profiles()\n vehicle_profiles = {\"Vehicles\": {}, \"Profiles\": {}}\n \n for agent_name, agent_info in settings[\"Agents\"].items():\n vehicle_type = agent_info[\"Vehicle\"]\n vehicle_profiles[\"Vehicles\"][agent_name] = agent_info[\"VehiclePhysicsProfile\"]\n if agent_info[\"VehiclePhysicsProfile\"] not in vehicle_profiles[\"Profiles\"].keys():\n # Add the profile name, then read in the Vehicle Profile and load it as a dict\n vehicle_profile = self._load_vehicle_profile(supported_profiles[vehicle_type][agent_info[\"VehiclePhysicsProfile\"]])\n if vehicle_profile is None:\n if self._response_queue is not None:\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": \"Could not load the vehicle profile with name {}!\".format(agent_info[\"VehiclePhysicsProfile\"])})\n print(\"Could not load the vehicle profile!\")\n return None\n vehicle_profiles[\"Profiles\"][agent_info[\"VehiclePhysicsProfile\"]] = vehicle_profile\n \n print(\"DEBUG Vehicle Profiles to send are: {}\".format(json.dumps(vehicle_profiles, indent=4)))\n return vehicle_profiles\n\n def _load_vehicle_profile(self, file_name: str) -> dict:\n \"\"\"\n Load the vehicle profile based upon the file name\n\n ### Inputs:\n - file_name [str] The name of the vehicle profile to load\n\n ### Outputs:\n - Returns the vehicle profile as a dict\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/vehicle_profiles/\" + file_name\n else:\n file_path = find_file_path(file_name, \"vehicle_profiles\")\n \n try:\n with open(file_path, \"r\") as f:\n vehicle_profile = json.load(f)\n \n return vehicle_profile\n except FileNotFoundError:\n print(f\"Could not find {file_name} in the vehicle_profiles folder!\")\n return None\n\n def _get_supported_vehicle_profiles(self) -> list:\n \"\"\"\n Get the supported vehicle profiles.\n\n ### Outputs:\n - Returns a list of supported vehicle profiles\n \"\"\"\n if self._file_path is not None:\n file_path = self._file_path + \"/vehicle_profiles/VehicleProfiles.json\"\n else:\n file_path = find_file_path(\"VehicleProfiles.json\", \"vehicle_profiles\")\n \n with open(file_path, \"r\") as f:\n vehicle_profiles = json.load(f)\n \n return vehicle_profiles\n\n def run_view_only_simulation(\n self, map_name: str, sim_name: str, folder: str = \"settings\"\n ) -> bool:\n \"\"\"\n Run a Simulation, waiting for the server to tell us when it has\n been completed.\n\n TODO This should be an Async function that allows the user\n to do other things.\n\n ### Inputs:\n - map_name [str] The map to run\n - sim_name [str] The name of the sim to run\n\n ### Outputs:\n - Returns an indicator of whether the simulation has been\n completed\n \"\"\"\n try:\n # Only connect once we are ready to send a command\n\n print(\"Running simulation {}\".format(sim_name))\n\n settings, trajectory = self.retrieve_sim_package(sim_name, folder=folder)\n settings_valid = self.validate_settings_file(json.loads(settings))\n if not settings_valid:\n print(\n \"Simulation Run Failed.\\nReason: Settings file invalid! Please see the settings folder!\"\n )\n\n self.client.connect()\n message = {\n \"Command\": \"View Level\",\n \"Settings\": settings,\n \"Trajectory\": trajectory,\n \"Sim_name\": sim_name,\n \"Map_name\": map_name,\n }\n completed = self.client.send_simulation_execution_package(message)\n if completed is None:\n return False\n if isinstance(completed, dict):\n if \"Error\" in completed.keys():\n print(\"There was an error!\")\n print(completed[\"Error\"])\n return False\n else:\n self.update_submission_list(completed, folder=folder)\n assert completed[\"Status\"] == \"Completed\"\n return completed[\"Status\"] == \"Completed\"\n if \"Error\" in completed.keys():\n print(\"There was an error!\")\n print(completed[\"Error\"])\n return False\n else:\n self.update_submission_list(completed, folder=folder)\n assert completed[\"Status\"] == \"Completed\"\n return completed[\"Status\"] == \"Completed\"\n except AssertionError:\n print(\"Simulation could not be completed!\")\n return False\n except Exception:\n traceback.print_exc()\n\n def extract_data(self, sim_name: str) -> bool:\n \"\"\"\n Run a Simulation, waiting for the server to tell us when it has\n been completed. Ensure that we send an access key to validate\n that we can get access to the system.\n\n TODO This should be an Async function that allows the user\n to do other things.\n\n ### Inputs:\n - sim_name [str] The name of the sim to run\n\n ### Outputs:\n - Returns an indicator of whether the extraction has been\n completed\n \"\"\"\n try:\n statement = \"Running Data Extraction for Simulation {}\".format(sim_name)\n print(statement)\n if self._response_queue is not None:\n self._response_queue.put({\"Command\": \"RunSimulation\", \"Message\": statement})\n\n # TODO Add regenerate logic\n if not self.client.connected:\n self.regenerate_connection()\n\n message = {\"Command\": \"Extract Data\", \"SimName\": sim_name}\n if self.debug:\n print(f\"DEBUG: Sending {message}\")\n completed = self.client.send_data_extraction_message(message)\n print(type(completed))\n if completed is None:\n return False\n if isinstance(completed, bool):\n return completed\n if \"Error\" in completed.keys():\n print(\"There was an error!\")\n print(completed[\"Error\"])\n return False\n else:\n return True\n except AssertionError:\n print(\"Simulation could not be completed!\")\n return False\n except KeyboardInterrupt:\n print(\"Shutting down connection!\")\n # Give time for the Server to stope the Simulation\n time.sleep(4)\n self.client.close()\n except Exception:\n traceback.print_exc()\n\n def retreive_supported_environments(self) -> bool:\n \"\"\"\n Retrieve the supported environments from the SWARM Core\n and provide those as a JSON file stored in the `settings` folder\n called `SupportedEnvironments.json`.\n\n ### Inputs:\n - None\n\n ### Outputs:\n - None\n \"\"\"\n try:\n print(\"Requesting the supported environemtns from SWARM Core\")\n\n time.sleep(1)\n # TODO Add regenerate logic\n if not self.client.connected:\n self.regenerate_connection()\n\n message = {\n \"Command\": \"Supported Environments\",\n }\n # Returns the body of the message containing the list\n # of supported environments\n environments = self.client.send_supported_envs_message(message)\n if isinstance(environments, bool):\n return environments\n elif \"Error\" in environments.keys():\n print(\"There was an error!\")\n print(environments[\"Error\"])\n return False\n else:\n print(\"Supported Environments with SWARM Container:\")\n for env_name in environments[\"SupportedEnvironments\"]:\n print(\"\\t{}\".format(env_name))\n if self._file_path is not None:\n file_path = self._file_path + \"/settings/SupportedEnvironments.json\"\n else:\n file_path = find_file_path(\"SupportedEnvironments.json\", \"settings\")\n with open(file_path, \"w\") as file:\n json.dump(\n {\"Environments\": environments[\"SupportedEnvironments\"]}, file\n )\n return True\n except AssertionError:\n print(\"Simulation could not be completed!\")\n return False\n except Exception:\n traceback.print_exc()\n\n def retreive_environment_information(self, env_name: str) -> bool:\n \"\"\"\n Retrieve the map and metadata from the SWARM Core\n instance. This will reach out to the server and download\n a map_data.tar.gz file containing the map information of the\n environment provided.\n\n ### Usage:\n - Create a sim manager with\n `sim_manager = SWARM()`\n - Call this method and wait to receive the required files\n\n ### Inputs:\n - env_name [str] The name of the environment\n\n ### Outputs:\n - A boolean telling you whether this worked or not\n \"\"\"\n try:\n print(\n \"Requesting the map information for {} environment from SWARM Core\".format(\n env_name\n )\n )\n\n time.sleep(1)\n # TODO Add regenerate logic\n if not self.client.connected:\n self.regenerate_connection()\n\n message = {\n \"Command\": \"Environment Information\",\n \"EnvironmentName\": env_name,\n }\n if self.debug:\n print(f\"DEBUG: Sending {message}\")\n completed = self.client.send_env_information_message(message)\n if self.debug:\n print(f\"DEBUG: Received {completed}\")\n if isinstance(completed, bool):\n return completed\n if \"Error\" in completed.keys():\n print(\"There was an error!\")\n print(completed[\"Error\"])\n return False\n else:\n return True\n except AssertionError:\n print(\"Simulation could not be completed!\")\n return False\n except Exception:\n traceback.print_exc()\n\n def validate_user_code(\n self, settings_file: str = \"settings/DefaultSimulationSettings.json\"\n ) -> None:\n \"\"\"\n Given a settings file, determine if any custom code has been\n indicated by the User and validate said code if that is\n the case.\n \"\"\"\n try:\n with open(settings_file, \"r\") as file:\n settings = json.load(file)\n\n custom_code = False\n for agent_name, agent_info in settings[\"Agents\"].items():\n for module_name, module in agent_info[\"SoftwareModules\"].items():\n if module[\"Algorithm\"][\"Level\"] == 3:\n custom_code = True\n print(\"DEBUG Code is custom: {}\".format(custom_code))\n if custom_code:\n completed = self.client.send_user_code_for_validation({}, settings)\n except AssertionError:\n print(\"Simulation could not be completed!\")\n return False\n except Exception:\n traceback.print_exc()\n","repo_name":"CodexLabsLLC/SWARMDeveloperClient","sub_path":"SWARMRDS/core/swarm.py","file_name":"swarm.py","file_ext":"py","file_size_in_byte":166718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1783942276","text":"\nfrom flask import Flask, request, redirect, render_template, flash, session\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom werkzeug.exceptions import Unauthorized\n\nfrom requests import delete\nfrom sqlalchemy import delete\nfrom sqlalchemy.exc import IntegrityError\n\nfrom key import API_SECRET_KEY, SECRET_KEY\n\nfrom navigation import api_details, rate_multipler\nfrom models import db, connect_db, User, Route \nfrom forms import LoginForm, RegisterForm, RouteForm, DeleteForm\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///routes'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SECRET_KEY'] = SECRET_KEY\n\nconnect_db(app)\ndb.create_all()\n\n\n@app.route(\"/\")\ndef homepage():\n \"\"\"Main Page (users will either register or sign in here\"\"\"\n\n return redirect(\"/register\")\n\n\n\n@app.route(\"/info\")\ndef info_page():\n \"\"\"Bring user to an information page that describes the application\"\"\"\n \n if \"username\" in session:\n\n return render_template(\"info.html\")\n\n\n##############################################################################\n#routes related to user registration, login, & logout\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Allow user to sign in via a form\"\"\"\n\n if \"username\" in session:\n return redirect(\"/routes\")\n\n form = RegisterForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n company = form.company.data\n state = form.state.data\n\n user = User.register(username, password, first_name, last_name, company, state)\n\n db.session.commit()\n session[\"username\"] = user.username\n flash(\"Account Created!\")\n return redirect(\"/login\")\n\n else:\n return render_template(\"register.html\", form=form)\n\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Handles user login\"\"\"\n\n if \"username\" in session:\n return redirect(\"/routes\")\n\n\n form = LoginForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n\n user = User.authenticate(username, password)\n\n if user:\n session['username'] = user.username\n flash(\"Welcome Back!\")\n return redirect(\"/routes\")\n else:\n form.username.errors = [\"Invalid username/password.\"]\n return render_template(\"login.html\", form=form)\n\n return render_template(\"login.html\", form=form)\n\n\n@app.route(\"/logout\")\ndef logout():\n \"\"\"Logout the user\"\"\"\n\n if \"username\" not in session:\n return redirect(\"/login\")\n \n session.pop(\"username\")\n flash(\"Logged out.\")\n return redirect(\"/login\")\n\n##############################################################################\n#routes related to the functionality of the app\n\n@app.route(\"/routes\")\ndef show_user_routes():\n \"\"\"Show a list of all user routes\"\"\"\n\n if \"username\" not in session:\n raise Unauthorized()\n\n if Route.query.all() is None:\n return redirect(\"/routes/add\")\n\n\n routes = Route.query.all()\n\n return render_template(\"routes.html\", routes=routes)\n\n\n@app.route(\"/routes/\")\ndef show_route(route_id):\n \"\"\"display a single route\"\"\"\n\n if \"username\" not in session:\n raise Unauthorized()\n\n route = Route.query.get_or_404(route_id)\n route.map_url = route.map_url.replace('API_SECRET_KEY', API_SECRET_KEY)\n\n payout = rate_multipler(route.mileage, route.travel_type)\n\n return render_template(\"route.html\", route=route, payout=payout) \n \n\n\n@app.route(\"/routes/add\", methods=[\"GET\", \"POST\"])\ndef add_route():\n \"\"\"Allow user to add a new route\"\"\"\n\n if \"username\" not in session:\n raise Unauthorized()\n\n username = session[\"username\"]\n\n form = RouteForm()\n\n if form.validate_on_submit():\n start_point = form.start_point.data\n end_point = form.end_point.data\n travel_type = form.travel_type.data\n comments = form.comments.data\n\n distance = api_details(start_point, end_point)\n map_url = f\"https://www.mapquestapi.com/staticmap/v5/map?key=API_SECRET_KEY&size=1100,500@2x&start={start_point}&end={end_point}\"\n\n route = Route(username=username, start_point=start_point, end_point=end_point, travel_type=travel_type, mileage=distance, comments=comments, map_url=map_url)\n \n db.session.add(route)\n db.session.commit()\n flash(\"Route added.\")\n\n return redirect(\"/routes\")\n\n else:\n return render_template(\"new_route.html\", form=form)\n\n\n@app.route(\"/routes//delete\", methods=[\"GET\", \"POST\"])\ndef delete_route(route_id):\n \"\"\"Delete a specific route\"\"\"\n\n if \"username\" not in session:\n raise Unauthorized()\n\n route = Route.query.get_or_404(route_id)\n \n db.session.delete(route)\n db.session.commit()\n\n flash(\"Route deleted.\")\n\n return redirect(\"/routes\")\n\n\n@app.route(\"/routes//edit\", methods=[\"GET\", \"POST\"])\ndef edit_route(route_id):\n \"\"\"Allows a user to edit a route\"\"\"\n\n route = Route.query.get(route_id)\n\n if \"username\" not in session:\n raise Unauthorized()\n\n form = RouteForm(obj=route)\n\n if form.validate_on_submit():\n route.start_point = form.start_point.data\n route.end_point = form.end_point.data\n route.travel_type = form.travel_type.data\n route.comments = form.comments.data\n\n db.session.commit()\n flash(\"Route edited.\")\n \n return redirect(\"/routes\")\n\n return render_template(\"edit_route.html\", form=form, route=route)\n\n\n\n","repo_name":"ericdoering/navigate_for_rate","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23637184981","text":"# Qualification Round 2012\r\n# Problem B. Dancing With the Googlers\r\n\r\nimport sys\r\n\r\ndef reverse_numeric(x, y):\r\n\treturn int(y) - int(x)\r\n\r\ndef processScores(line):\r\n\tscores = line.rsplit(' ')\r\n\t\r\n\tnum_of_dancers = int(scores.pop(0))\r\n\tnum_of_surprises = int(scores.pop(0))\r\n\tmin_best_score = int(scores.pop(0))\r\n\t\r\n\tif min_best_score == 0:\r\n\t\tcounter = len(scores)\r\n\telse:\r\n\t\tcounter = 0\r\n\t\t\r\n\t\tfor score in sorted(scores, cmp=reverse_numeric):\r\n\t\t\tscore = int(score)\r\n\t\t\tif score > 0:\r\n\t\t\t\tdiv = score / 3\r\n\t\t\t\tremainder = score - div * 3\r\n\t\t\t\t\r\n\t\t\t\tif div >= min_best_score:\r\n\t\t\t\t\tcounter += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tif div >= max(min_best_score - 2, 0):\r\n\t\t\t\t\t\tif remainder == 0 and min_best_score - div == 1 and num_of_surprises > 0:\r\n\t\t\t\t\t\t\tcounter += 1\r\n\t\t\t\t\t\t\tnum_of_surprises -= 1\r\n\t\t\t\t\t\telif remainder == 1 and min_best_score - div == 1:\r\n\t\t\t\t\t\t\tcounter += 1\r\n\t\t\t\t\t\telif remainder == 2 and min_best_score - div == 1:\r\n\t\t\t\t\t\t\tcounter += 1\r\n\t\t\t\t\t\telif remainder == 2 and min_best_score - div == 2 and num_of_surprises > 0:\r\n\t\t\t\t\t\t\tcounter += 1\r\n\t\t\t\t\t\t\tnum_of_surprises -= 1\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\treturn counter\r\n\r\ndef main():\r\n\tf_in = open(sys.argv[1], \"rb\")\r\n\tline = f_in.readline()\r\n\r\n\t# Getting total number of test cases\r\n\ttotal_tc_num = int(line.rstrip())\r\n\tcount = 1\r\n\r\n\t# Processing input test cases\r\n\tif total_tc_num > 0:\r\n\t\tf_out = open(sys.argv[2], \"w\")\r\n\t\twhile line and count <= total_tc_num:\r\n\t\t\tline = f_in.readline().rstrip()\r\n\t\t\tstr = \"Case #%d: %s\" % (count, processScores(line))\r\n\t\t\tif count < total_tc_num:\r\n\t\t\t\tstr += \"\\n\"\r\n\t\t\tf_out.write(str)\r\n\t\t\tcount += 1\r\n\t\tf_out.close()\r\n\tf_in.close()\r\n\r\nif __name__ == '__main__' :\r\n main()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/1029.py","file_name":"1029.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41861534110","text":"import json\nimport time\nfrom kafka import KafkaProducer\n\nBOOTSTRAP_SERVERS = 'localhost:9092'\nTOPIC = 'kpi_source'\nproducer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS, key_serializer=lambda x: x.encode('utf-8'),\n value_serializer=lambda x: json.dumps(x).encode('utf-8'))\n\nfor x in range(10):\n kpi_key = f'kpi.dashboard1.{x}'\n producer.send(TOPIC, key=kpi_key,\n value={'kpi_key': kpi_key, 'timestamp': int(time.time()) * 1000, 'value': 1.12312})\nproducer.flush()\nproducer.close()\n","repo_name":"lukegs7/python-learn","sub_path":"mx_learn/kfk/produce.py","file_name":"produce.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2761625471","text":"import pygame, sys\nfrom button import Button\nfrom SimpleLR import SimpleLR\nfrom Polynomial import PolynomialRegression\n\n\nORIGIN = (30, 750)\n\ndef draw_axis():\n \"\"\"Draw axis and origin point\"\"\"\n # Text\n root_surf = font.render('0', True, 'black')\n root_rect = root_surf.get_rect(center =(20, 760))\n y_label = font.render('Label', True, 'black')\n y_label_rect = y_label.get_rect(center =(70, 50))\n x_label = font.render('Feature', True, 'black')\n x_label_rect = x_label.get_rect(center =(1040, 770))\n # Vertical axis\n pygame.draw.line(display_surf, 'black', (30, 50), (30, 750), 4)\n pygame.draw.line(display_surf, 'black', (20, 60), (30, 50), 4)\n pygame.draw.line(display_surf, 'black', (40, 60), (30, 50), 4)\n display_surf.blit(y_label, y_label_rect)\n # Horizontal axis\n pygame.draw.line(display_surf, 'black', (30, 750), (1000, 750), 4)\n pygame.draw.line(display_surf, 'black', (990, 740), (1000, 750), 4)\n pygame.draw.line(display_surf, 'black', (990, 760), (1000, 750), 4)\n display_surf.blit(x_label, x_label_rect)\n # Origin point\n display_surf.blit(root_surf, root_rect)\n\ndef convert_position(old_pos):\n \"\"\"convert position from pygame coordinate system to cartesian coordinate system\"\"\"\n new_x = old_pos[0] - ORIGIN[0]\n new_y = ORIGIN[1] - old_pos[1]\n return (new_x, new_y)\n\ndef revert_position(old_pos):\n \"\"\"revert position from cartesian coordinate system to pygame coordinate system\"\"\"\n new_x = old_pos[0] + ORIGIN[0]\n new_y = ORIGIN[1] - old_pos[1]\n return (new_x, new_y)\n\ndef marking_down():\n \"\"\"for each data point, draw a label at that point\"\"\"\n for point in data_points:\n point_rect = label.get_rect(center = point)\n display_surf.blit(label, point_rect)\n\ndef drawing_record_table():\n \"\"\"for each data point we have just choose, draw its coordinate in the record table\"\"\"\n # text\n data_text_surf = font.render('Data', True, 'black')\n data_text_rect = data_text_surf.get_rect(topleft =(1070, 50))\n x_text_surf = font.render('X', True, 'black')\n x_text_rect = x_text_surf.get_rect(topleft =(1055, 80))\n y_text_surf = font.render('Y', True, 'black')\n y_text_rect = y_text_surf.get_rect(topleft =(1055, 110))\n fofo_surf = font.render('', True, 'black')\n if data_points:\n x_point_surf = font.render(str(convert_position(data_points[-1])[0]), True, 'black')\n y_point_surf = font.render(str(convert_position(data_points[-1])[1]), True, 'black')\n else:\n x_point_surf = font.render('', True, 'black')\n y_point_surf = font.render('', True, 'black')\n x_point_rect = x_point_surf.get_rect(center =(1135, 91))\n fofo_rect_1 = fofo_surf.get_rect(center =(1135, 91))\n y_point_rect = y_point_surf.get_rect(center =(1135, 121))\n fofo_rect_2 = fofo_surf.get_rect(center =(1135, 121))\n # Draw the table\n pygame.draw.rect(display_surf, 'black', data_text_rect.inflate(100, 10), 2)\n pygame.draw.rect(display_surf, 'black', x_text_rect.inflate(70, 10), 2)\n pygame.draw.rect(display_surf, 'black', y_text_rect.inflate(70, 10), 2)\n pygame.draw.rect(display_surf, 'black', fofo_rect_1.inflate(65, 10), 2)\n pygame.draw.rect(display_surf, 'black', fofo_rect_2.inflate(65, 10), 2)\n # Display the text\n display_surf.blit(data_text_surf, data_text_rect)\n display_surf.blit(x_text_surf, x_text_rect)\n display_surf.blit(y_text_surf, y_text_rect)\n display_surf.blit(x_point_surf, x_point_rect)\n display_surf.blit(y_point_surf, y_point_rect)\n\ndef draw_option_box():\n \"\"\"draw the option box\"\"\"\n box_rect = pygame.Rect(990, 20, 200, 460) \n pygame.draw.rect(display_surf, 'black', box_rect, 3) \n \ndef visualize_model(points):\n \"\"\"draw the line or the curve that represent the model\"\"\"\n for i in range(len(points) - 1):\n pygame.draw.line(display_surf, 'deeppink', revert_position(points[i]), revert_position(points[i + 1]), 4)\n\ndef draw_mae(mae_value):\n \"\"\"draw the MAE value of the current model\"\"\"\n mae_text_surf = font.render('MAE', True, 'black')\n mae_text_rect = mae_text_surf.get_rect(center =(900, 37))\n mae_value_surf = font.render(str(mae_value), True, 'black')\n mae_value_rect = mae_value_surf.get_rect(center =(900, 67))\n fofo_rect = font.render('', True, 'black').get_rect(center =(900, 67))\n # Draw\n pygame.draw.rect(display_surf, 'black', mae_text_rect.inflate(100, 10), 2)\n display_surf.blit(mae_text_surf, mae_text_rect)\n pygame.draw.rect(display_surf, 'black', fofo_rect.inflate(150, 10), 2)\n display_surf.blit(mae_value_surf, mae_value_rect)\n \n# Basic setup\npygame.init()\ndisplay_surf = pygame.display.set_mode((1200, 800))\nbg_surf = pygame.image.load('Assets/bg.jpg').convert_alpha()\npygame.display.set_caption('Visulizing Regression Models')\ncan_click = True\nclick_timer = None\n# Fonts\nfont = pygame.font.Font(None, 32)\n\n# Buttons\nclear_button = Button('Clear', 140, 40, (1020, 170), 6, display_surf, font)\nSLR_button = Button('SimpleLR', 140, 40, (1020, 230), 6, display_surf, font)\npoly_button = Button('Polynomial', 140, 40, (1020, 290), 6, display_surf, font)\nmae_button = Button('MAE', 140, 40, (1020, 350), 6, display_surf, font)\nclose_button = Button('Close', 140, 40, (1020, 410), 6, display_surf, font)\nslr_check = False\npoly_check = False\nmae_check = False\n\n# Data points\nlabel = pygame.image.load('Assets/star.png')\ndata_points = []\nconverted_data_points = []\n\n# sounds\nmouse_click_sound = pygame.mixer.Sound('Assets/click.wav')\nerror_sound = pygame.mixer.Sound('Assets/error.mp3')\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n data_points = [convert_position(point) for point in data_points]\n pygame.quit()\n sys.exit()\n # Click timer and Restricting mouse position\n if not can_click:\n if pygame.time.get_ticks() - click_timer >= 200:\n can_click = True\n if pygame.mouse.get_pressed()[0] and can_click:\n click_timer = pygame.time.get_ticks()\n can_click = False\n if (990 >= pygame.mouse.get_pos()[0] >= 30) and (50 <= pygame.mouse.get_pos()[1] <= 750):\n mouse_click_sound.play()\n data_points.append(pygame.mouse.get_pos())\n click_timer = pygame.time.get_ticks()\n elif not (1190 >= pygame.mouse.get_pos()[0] >= 990 and 20 <= pygame.mouse.get_pos()[1] <= 480):\n error_sound.play()\n \n # Drawing\n display_surf.blit(bg_surf, (0, 0))\n draw_axis()\n marking_down()\n drawing_record_table()\n draw_option_box()\n \n # Active clear button\n clear_button.draw()\n if clear_button.pressed:\n mouse_click_sound.play()\n data_points = []\n converted_data_points = []\n slr_check = poly_check = mae_check = False\n \n # Active SLR button \n SLR_button.draw()\n if SLR_button.pressed:\n mouse_click_sound.play()\n if data_points:\n converted_data_points = [convert_position(point) for point in data_points]\n slr = SimpleLR(converted_data_points)\n slr.train_model()\n slr_check = True\n poly_check = False\n if slr_check:\n visualize_model(slr.predict())\n \n # Active polynomial button\n poly_button.draw()\n if poly_button.pressed:\n mouse_click_sound.play()\n if data_points:\n converted_data_points = [convert_position(point) for point in data_points]\n poly_reg = PolynomialRegression(converted_data_points)\n poly_reg.train_model()\n poly_check = True\n slr_check = False\n if poly_check:\n visualize_model(poly_reg.predict())\n \n # Active Mae button\n mae_button.draw()\n if mae_button.pressed:\n mouse_click_sound.play()\n if slr_check or poly_check:\n mae_check = True\n if mae_check:\n mae_value = slr.mae() if slr_check else poly_reg.mae()\n draw_mae(mae_value)\n \n # Active close button\n close_button.draw()\n if close_button.pressed:\n mouse_click_sound.play()\n pygame.quit()\n sys.exit()\n \n # Update screen\n pygame.display.update()","repo_name":"khanhnhan1512/Visualizing-Regression-Models","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33451663308","text":"class Solution:\n def countDistinctIntegers(self, nums: List[int]) -> int:\n set_num = set(nums)\n for ind in nums:\n if ind > 9 or ind < -9:\n str_ind = str(ind)\n set_num.add(int(str_ind[::-1]))\n else:\n set_num.add(ind)\n \n return len(set_num)\n \n ","repo_name":"yonasengdu/Compitative-programming","sub_path":"2442-count-number-of-distinct-integers-after-reverse-operations/2442-count-number-of-distinct-integers-after-reverse-operations.py","file_name":"2442-count-number-of-distinct-integers-after-reverse-operations.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"12660357468","text":"import glob\nimport mne\nfrom os.path import basename\nfrom mne_bids import write_raw_bids, BIDSPath, update_sidecar_json\nimport xml.etree.ElementTree as ET\n\nraw_path = '/storage/store2/data/mass/SS3/'\nannot_path = '/storage/store2/data/mass/SS3/annotations/'\nbids_root = '/storage/store2/data/mass-bids/SS3'\n\nall_ch_types = {'ECG I': 'ecg',\n 'EMG Chin1': 'emg',\n 'EMG Chin2': 'emg',\n 'EMG Chin3': 'emg',\n 'EOG Right Horiz': 'eog',\n 'EOG Left Horiz': 'eog',\n 'Resp Belt Abdo': 'misc',\n 'Resp Belt Thor': 'misc'}\n\nraw_files = glob.glob(raw_path+'*.edf')\nannot_files = glob.glob(annot_path+'*.edf')\n\nraw_names = [basename(raw_file)[:10] for raw_file in raw_files]\nannot_names = [basename(annot_file)[:10] for annot_file in annot_files]\ncommon = list(set(raw_names) & set(annot_names))\ncommon.sort()\n\nfor fileref in common:\n raw_filepath = raw_path + fileref + ' PSG.edf'\n annot_filepath = annot_path + fileref + ' Annotations.edf'\n subject = fileref[3:5]+fileref[6:]\n raw = mne.io.read_raw_edf(raw_filepath)\n annots = mne.read_annotations(annot_filepath)\n for i, desc in enumerate(annots.description):\n if desc == 'Sleep stage ?':\n annots.description[i] = 'Sleep stage W'\n elif 'EMGArtefact' in desc:\n root = ET.fromstring(desc)\n channel = (root.attrib[\"channel\"].replace(\"-LER\", \"\")\n .replace(\"-CLE\", \"\").split()[1])\n annots.description[i] = f\"BAD_{root.attrib['groupName']}_{channel}\"\n elif 'MicroArousal' in desc:\n root = ET.fromstring(desc)\n channel = (root.attrib[\"channel\"].replace(\"-LER\", \"\")\n .replace(\"-CLE\", \"\").split()[1])\n annots.description[i] = f\"{root.attrib['groupName']}_{channel}\"\n raw.set_annotations(annots, emit_warning=False)\n ch_names = raw.info['ch_names']\n new_ch_names, new_ch_types = {}, {}\n for ch_name in ch_names:\n if ch_name.startswith('EEG '):\n new_ch_names[ch_name] = (ch_name.replace('EEG ', '')\n .replace('-LER', '')\n .replace('-CLE', ''))\n if ch_name in all_ch_types.keys():\n new_ch_types[ch_name] = all_ch_types[ch_name]\n if 'Resp Belt Abdo' in ch_names:\n ref = 'Linked Ear Reference'\n else:\n ref = 'Computed Linked Ear'\n for old, new in new_ch_names.items():\n raw._orig_units[new] = raw._orig_units[old]\n del raw._orig_units[old]\n raw.rename_channels(new_ch_names)\n raw.set_channel_types(new_ch_types)\n raw.info['line_freq'] = 50\n bids_path = BIDSPath(subject=subject, root=bids_root)\n write_raw_bids(raw, bids_path, overwrite=True)\n bids_path.update(suffix='eeg', extension='.json')\n update_sidecar_json(bids_path, {'EEGReference': ref})\n","repo_name":"msolal/BT-sleep-EEG","sub_path":"converting_to_bids/mass_to_bids.py","file_name":"mass_to_bids.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"43462428431","text":"import os\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\nimport torchvision\nimport PIL\n\nfrom style_modules import ContentLoss, StyleLoss, TotalVariationLoss\nfrom style_utils import features_from_img, extract_features, rel_error, style_transfer\nfrom image_utils import preprocess\n\n\ndtype = torch.FloatTensor\n# Uncomment out the following line if you're on a machine with a GPU set up for PyTorch!\n# dtype = torch.cuda.FloatTensor\n\ncnn = torchvision.models.squeezenet1_1(pretrained=True).features\ncnn.type(dtype)\n\n# Fix the weights of the pretrained network\nfor param in cnn.parameters():\n param.requires_grad = False\n\n\ncontent_loss = ContentLoss()\n\nstyle_loss = StyleLoss()\n\ntv_loss = TotalVariationLoss()\n\n\n# Generate Images\n# Composition VII + Tubingen\nparams1 = {\n 'name': 'composition_vii_tubingen',\n 'content_image' : 'styles_images/tubingen.jpg',\n 'style_image' : 'styles_images/composition_vii.jpg',\n 'image_size' : 192,\n 'style_size' : 512,\n 'content_layer' : 3,\n 'content_weight' : 5e-2,\n 'style_layers' : (1, 4, 6, 7),\n 'style_weights' : (20000, 500, 12, 1),\n 'tv_weight' : 5e-2,\n 'content_loss' : content_loss,\n 'style_loss': style_loss,\n 'tv_loss': tv_loss,\n 'cnn': cnn,\n 'dtype': dtype\n}\n\n# Scream + Tubingen\nparams2 = {\n 'name': 'scream_tubingen',\n 'content_image':'styles_images/tubingen.jpg',\n 'style_image':'styles_images/the_scream.jpg',\n 'image_size':192,\n 'style_size':224,\n 'content_layer':3,\n 'content_weight':3e-2,\n 'style_layers':[1, 4, 6, 7],\n 'style_weights':[200000, 800, 12, 1],\n 'tv_weight':2e-2,\n 'content_loss': content_loss,\n 'style_loss': style_loss,\n 'tv_loss': tv_loss,\n 'cnn': cnn,\n 'dtype': dtype\n}\n\n# Starry Night + Tubingen\nparams3 = {\n 'name': 'starry_tubingen',\n 'content_image' : 'styles_images/tubingen.jpg',\n 'style_image' : 'styles_images/starry_night.jpg',\n 'image_size' : 192,\n 'style_size' : 192,\n 'content_layer' : 3,\n 'content_weight' : 6e-2,\n 'style_layers' : [1, 4, 6, 7],\n 'style_weights' : [300000, 1000, 15, 3],\n 'tv_weight' : 2e-2,\n 'content_loss': content_loss,\n 'style_loss': style_loss,\n 'tv_loss': tv_loss,\n 'cnn': cnn,\n 'dtype': dtype\n}\n\nstyle_transfer(**params1)\nstyle_transfer(**params2)\nstyle_transfer(**params3)","repo_name":"jieun-seong/visualization_and_style_transfer","sub_path":"Code/style_transfer.py","file_name":"style_transfer.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26304798742","text":"import ctypes.util as _util\nimport fmm as _fmm\nimport numpy as _np\nimport time\nimport unittest as _unittest\n\nfrom _nufft import ffi as _ffi\n_lib = _ffi.dlopen(_util.find_library('nufft'))\n\n_twopi = 2*_np.pi\n\n\ndef inufft_cpp(F, K, Y, L, p, n):\n num_values = F.size\n values = _ffi.new(\n 'double[%d]' % (2*num_values),\n list(_np.array([F.real, F.imag]).T.reshape(2*num_values)))\n num_nodes = Y.size\n nodes = _ffi.new('double[%d]' % num_nodes, list(Y))\n fmm_depth = L\n truncation_number = p\n neighborhood_radius = n\n output = _ffi.new('double[%d]' % (2*num_nodes))\n t0 = time.clock()\n _lib.compute_P_ddi(\n values, nodes, num_values, num_nodes, fmm_depth, truncation_number,\n neighborhood_radius, output)\n t = time.clock() - t0\n output = _np.array(list(output)).reshape(num_nodes, 2)\n return output[:, 0] + 1j*output[:, 1], t\n\n\n# TODO: implement c0 estimate\n# TODO: implement NEW c0 estimate\n# TODO: rewrite this as a class to simplify code a bit\n\n\n# ALL THIS IS TEMPORARY (for cest implementation)...\n\n\ndef _phinear(F, X, y, n):\n if y in X:\n return 0\n else:\n tmp = 0\n for l in _np.arange(-n, n + 1):\n numer = _np.multiply((-1.0)**_np.arange(F.size), F)\n denom = y - X - 2*_np.pi*l\n tmp += _np.sum(_np.divide(numer, denom))\n return tmp\n\n\ndef _s(K, X, k, eps):\n return _np.sin(K*(X[k] + eps))/K\n\n\ndef _p(F, X, n, k, eps):\n return _phinear(F, X, X[k] + eps, n)\n\n\ndef _cest(F, X, K, n, eps=1e-5):\n A = _np.sum([_s(K, X, k, eps)*(F[k] - _s(K, X, k, eps)*_p(F, X, n, k, eps))\n for k in _np.arange(2*K)])\n B = _np.sum([_s(K, X, k, eps)*_s(K, X, k, eps) for k in _np.arange(2*K)])\n return A/B\n\n\n#### ... UP THROUGH HERE.\n\n\ndef _transpose_argsort_indices(I):\n '''Compute the inverse permutation of I, where I is a list of indices\n (a permutation) computed using np.argsort.\n\n '''\n J = _np.zeros(len(I), dtype=_np.int)\n for i, j in enumerate(I):\n J[j] = i\n return J\n\n\ndef _test(y, N, index_ratio):\n '''This test is necessary to check if we've passed a point that we\n essentially already know the value of (i.e. an evaluation point\n that's nearly equal to a source point). We could probably handle\n this correctly in the FMM itself, but for now, a test like this\n should give us approximately what we want... That is, no\n interpolates that are NaN, +/-Inf, or incorrectly equal to zero.\n\n '''\n return _np.abs(_np.mod(y*(N/_twopi), index_ratio)) < 1e-13\n\n\ndef _extend_X(X, n):\n '''Periodically extend the grid points in X that lie in [0, 2*pi) to\n [-2*pi*n, 2*pi*(n + 1)).\n\n '''\n return _np.concatenate([X + _twopi*l for l in range(-n, n + 1)])\n\n\ndef _get_extended_alt_sign_F_and_sum(F, n, N):\n '''Modulate F with (-1)^n and periodically extend the result to\n [-2*pi*n, 2*pi*(n + 1)). Also, return the sum of one period of the\n modulated F as the second return value.\n\n '''\n Fas = _np.multiply(F, _np.power(-1, range(N)))\n return _np.tile(Fas, 2*n + 1), _np.sum(Fas)\n\n\ndef _get_checkpoints(q):\n '''Compute q checkpoint pairs. The first point in each checkpoint pair\n is uniformly distributed in [2*pi, 4*pi). The second is simply\n that point but shifted into [0, 2*pi).\n\n '''\n Yc = _np.random.uniform(_twopi, 2*_twopi, q)\n return Yc, Yc - _twopi\n\n\ndef _R(Y, m):\n 'Evaluate the mth Cauchy regular basis function R.'\n return _np.power(Y - _np.pi, m)\n\n\ndef _get_phinear_and_f(Y, Yc, Yc_tilde, n, X_per, Fas_per, L, p):\n '''This is a workhorse function that uses the FMM to compute both\n phinear and the difference in the checkpoint values. These are\n returned as the first and second return values.\n\n '''\n Ycat = _np.concatenate((Y, Yc, Yc_tilde))\n I = _np.argsort(Ycat)\n J = _transpose_argsort_indices(I)\n dom = (-_twopi*n, _twopi*(n + 1))\n fmm = _fmm.fmm1d_cauchy_double\n V = fmm(X_per, Ycat[I], Fas_per, L, p, scaled_domain=dom)[J]\n i = len(Y)\n j = i + len(Yc)\n k = j + len(Yc_tilde)\n return V[:i], V[j:k] - V[i:j]\n\n\ndef _get_phifar(X, Y, Yc, Yc_tilde, F, K, f, p, n, q):\n '''Use least squares collocation to compute phifar. TODO: this needs\n to be updated and replaced with the Vandermonde approach.\n\n '''\n A = _np.zeros((q, p), dtype=Y.dtype)\n for m in range(p):\n A[:, m] = _R(Yc, m) - _R(Yc_tilde, m)\n c0 = _cest(F, X, K, n)\n C = _np.zeros(p, dtype=c0.dtype)\n C[0] = c0\n C[1:] = _np.linalg.lstsq(A, f)[0][1:]\n phifar = _np.zeros(Y.shape, dtype=C.dtype)\n for j in range(len(Y)):\n phifar[j] = _np.sum([C[m]*(Y[j] - _np.pi)**m for m in range(p)])\n return phifar\n\n\ndef _finish_interpolation(Y, F, phi, K, N, Fas_sum):\n '''This function takes care of the last couple steps: it modulates the\n result by the sine-based factor and sets any values that were too\n close to source points to the corresponding weight (i.e. function\n value).\n\n '''\n G = [(-Fas_sum*_np.cos(K*Y[j]) + 2*_np.sin(K*Y[j])*phi[j])/N\n for j in range(len(Y))]\n index_ratio = len(Y)/(2*K)\n for i, y in enumerate(Y):\n if _test(y, len(Y), index_ratio):\n G[i] = F[int(i/index_ratio)]\n return _np.array(G)\n\n\ndef inufft(F, K, Y, L, p, n, q):\n '''Arguments:\n\n F: samples of a K-bandlimited function spaced equally along [0, 2pi).\n K: the bandlimit of the sampled function.\n Y: a list of target points in [0, 2pi).\n L: the depth of the FMM used in interpolation.\n p: the truncation number of the FMM.\n n: the 'radius' of the neighborhood around [0, 2pi) -- i.e. determining\n the intervals [-2pi*n, 0) and [2pi, 2pi(n+1)).\n q: the number of checkpoint pairs.\n\n '''\n N = len(F)\n X = _np.linspace(0, _twopi, N, endpoint=False)\n X_per = _extend_X(X, n)\n Fas_per, Fas_sum = _get_extended_alt_sign_F_and_sum(F, n, N)\n Yc, Yc_tilde = _get_checkpoints(q)\n phinear, f = _get_phinear_and_f(Y, Yc, Yc_tilde, n, X_per, Fas_per, L, p)\n phifar = _get_phifar(X, Y, Yc, Yc_tilde, F, K, f, p, n, q)\n phi = phinear + phifar\n return _finish_interpolation(Y, F, phi, K, N, Fas_sum)\n\n\nclass inufft_test(_unittest.TestCase):\n def test(self):\n Y = _np.array([\n 0.06144733,\n 0.12742735,\n 0.19382462,\n 0.76838064,\n 0.81637296,\n 0.97971646,\n 1.04936551,\n 1.21592974,\n 1.25491235,\n 1.45162833,\n 1.53560262,\n 1.59485713,\n 1.75060987,\n 1.79657517,\n 1.8218675,\n 2.54129315,\n 2.6901106,\n 2.87683413,\n 3.01196423,\n 3.10353108,\n 3.23054149,\n 3.59794781,\n 3.83246419,\n 4.30807601,\n 4.61761236,\n 5.31786545,\n 5.62484061,\n 5.66583099,\n 5.67175258,\n 6.07424511,\n 6.11556149,\n 6.19334216])\n coefs = _np.array([\n 0.+0.j,\n -0.+0.09094568j,\n 0.+0.j,\n -0.+0.12732395j,\n 0.+0.j,\n -0.+0.21220659j,\n 0.+0.j,\n -0.+0.63661977j,\n 0.+0.j,\n -0.-0.63661977j,\n 0.+0.j,\n -0.-0.21220659j,\n 0.+0.j,\n -0.-0.12732395j,\n 0.+0.j,\n -0.-0.09094568j])\n F = _np.fft.ifft(_np.fft.fftshift(coefs))\n K = int(F.size/2)\n L = 4\n p = 4\n n = 3\n q = Y.size\n expected = _np.array([\n 0.01930708 + 4.67176242e-19j, 0.03833431 + 8.43065927e-19j,\n 0.05414096 + 9.89540130e-19j, 0.05579820 - 1.34327825e-19j,\n 0.05594726 + 2.42754205e-19j, 0.06196288 + 9.89611364e-19j,\n 0.06507520 + 8.48430386e-19j, 0.06753474 - 2.95002823e-19j,\n 0.06680686 - 5.70652687e-19j, 0.05968726 - 8.06992194e-19j,\n 0.05779020 - 2.74994500e-19j, 0.05769407 + 1.89337641e-19j,\n 0.06196792 + 9.81094362e-19j, 0.06382237 + 9.62438121e-19j,\n 0.06479719 + 8.96407345e-19j, 0.06311348 + 9.85735660e-19j,\n 0.07273939 + 4.48473340e-19j, 0.06605620 - 8.45187551e-19j,\n 0.03886123 - 8.52064588e-19j, 0.01203484 - 2.96733713e-19j,\n -0.02750106 + 6.46340413e-19j, -0.07253002 - 4.82520351e-19j,\n -0.05780036 - 6.79132847e-19j, -0.06775652 + 9.18257837e-20j,\n -0.05897387 - 6.80571405e-19j, -0.06127630 - 9.81206947e-19j,\n -0.05936333 + 8.41511326e-19j, -0.06188170 + 9.64474493e-19j,\n -0.06228141 + 9.73915329e-19j, -0.05715692 - 9.84725529e-19j,\n -0.04839742 - 9.63722365e-19j, -0.02781498 - 6.51686509e-19j])\n\n # actual = inufft(F, K, Y, L, p, n, q)\n # for i, val in enumerate(actual):\n # try:\n # self.assertTrue(_np.abs(val - expected[i]) <= 1e-5)\n # except:\n # print('failure at i = %d' % i)\n # print('expected: %g + j*%g'\n # % (expected[i].real, expected[i].imag))\n # print('actual: %g + j*%g' % (val.real, val.imag))\n # self.assertTrue(False)\n\n actual_cpp = inufft_cpp(F, K, Y, L, p, n)\n for i, val in enumerate(actual_cpp):\n try:\n self.assertTrue(_np.abs(val - expected[i]) <= 1e-5)\n except:\n print('failure at i = %d' % i)\n print('expected: %g + j*%g'\n % (expected[i].real, expected[i].imag))\n print('actual: %g + j*%g' % (val.real, val.imag))\n self.assertTrue(False)\n\n\nif __name__ == '__main__':\n _unittest.main()\n","repo_name":"sampotter/nufft","sub_path":"src/py/new/nufft.py","file_name":"nufft.py","file_ext":"py","file_size_in_byte":9803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35505096452","text":"# import request\nimport json\nimport pprint\n\nwith open('data.json') as f:\n data = json.load(f)\n\n# pprint.pprint(data['data'])\n\n# print(data['data'])\n\ndef get_state_names():\n arr = []\n for x in range(len(data['data'])):\n arr.append(data['data'][x][8])\n pprint.pprint(data['data'])\n return arr\n\ndef get_state_index(name,state_list):\n return data['data'][state_list.index(name)]\n","repo_name":"Haseebk0678/hackathon-2021","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23569329731","text":"def find_min_max(pos, start, end):\n\treturn (end - start - 1) // 2, (end - start) // 2\n\ndef find_stall(stalls, start, end):\n\n\tif end - start == 0:\n\t\treturn start, 0, 0\n\n\tbest_pos = (start + end - 1) // 2\n\tbest_stall = stalls[best_pos]\n\tif best_stall == False:\n\t\tmin_s, max_s = find_min_max(stalls, start, end)\n\t\treturn best_pos, min_s, max_s\n\n\telse:\n\t\tleft_pos, left_min, left_max = find_stall(stalls, start, best_pos)\n\t\tright_pos, right_min, right_max = find_stall(stalls, best_pos + 1, end)\n\t\tif left_min > right_min:\n\t\t\treturn left_pos, left_min, left_max\n\t\telif right_min > left_min:\n\t\t\treturn right_pos, right_min, right_max\n\t\telif left_max >= right_max:\n\t\t\treturn left_pos, left_min, left_max\n\t\telse:\n\t\t\treturn right_pos, right_min, right_max\n\n\nnum_tests = int(input())\nresults = ['' for i in range(num_tests)]\n\nfor i in range(num_tests):\n\n\tnum_stalls, num_peop = [int(x) for x in input().split()]\n\n\tstalls = [False for j in range(num_stalls)]\n\n\tfor j in range(num_peop):\n\t\tpos, min_s, max_s = find_stall(stalls, 0, num_stalls)\n\t\tstalls[pos] = True\n\n\tresults[i] = 'Case #{}: {} {}'.format(i + 1, max_s, min_s)\n\nfor i in range(num_tests):\n\tprint(results[i])\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2101.py","file_name":"2101.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26808191954","text":"import numpy as np\nfrom scipy.optimize import curve_fit\nimport pylab as plt\nfrom pprint import pprint\n\ndays = np.array([0, 2, 4, 6, 8, 26])\nq = np.arange(0,29,0.1)\n\nganymede = np.array([148.6, 164.0, -210.6, -92.0, 237.4, -232.5]) # angular separation data\n\neuropa = np.array([-67.7, 115.1, -147.2, 151.8, -118.0, -107.5]) # angular separation data\neuropa_orbit_ratio = 671034 / 1070412 # not enough data to find max - how much smaller than ganymede\n\nio = np.array([0.0, 76.3, 92.6, 57.4, 0.0, -84.4]) # angular separation data\nio_orbit_ratio = 421700 / 671034 # not enough data to find max - how much smaller than europa\n\nN = 6 # number of data points\n\nguess_freq = 1\nguess_amplitude = 3*np.std(ganymede)/(2**0.5)\nguess_phase = 0\nguess_offset = 0\n\np0=[guess_freq, guess_amplitude,\n guess_phase, guess_offset]\n\n# create a sinewave\ndef my_sine(x, freq, amplitude, phase, offset):\n return np.sin(x * freq + phase) * amplitude + offset\n\nfit = curve_fit(my_sine, days, ganymede, p0=p0)\n\n# recreate the fitted curve using the optimized parameters\npprint(fit[0])\npprint(fit[1])\n\nganymede_msg = 'Ganymede orbit ' + \"{:1.2f}\".format(2 * 3.14159 / fit[0][0]) + ' days'\neuropa_msg = 'Europa orbit ' + \"{:1.2f}\".format(2 * 3.14159 / fit[0][0]/2) + ' days'\nio_msg = 'Io orbit ' + \"{:1.2f}\".format(2 * 3.14159 / fit[0][0]/4) + ' days'\n\nganymede_fit = my_sine(q, *fit[0])\n\nfit[0][0] *= 2 # double the freq of ganymede\nfit[0][1] *= europa_orbit_ratio # smaller than ganymede\nfit[0][2] += 3.14159 # 180 degrees out of phase from ganymede\neuropa_fit = my_sine(q, *fit[0])\n\nfit[0][0] *= 2 # double the freq of europa\nfit[0][1] *= europa_orbit_ratio # smaller than europa\nfit[0][2] += 3.14159 # 180 degrees out of phase from europa\nio_fit = my_sine(q, *fit[0])\n\nplt.plot(days, ganymede, 'o', color = 'b')\nplt.plot(days, europa, 'o', color = 'g')\nplt.plot(days, io, 'o', color = 'r')\nplt.plot(q, ganymede_fit, label=ganymede_msg, color = 'b')\nplt.plot(q, europa_fit, label=europa_msg, color = 'g')\nplt.plot(q, io_fit, label=io_msg, color = 'r')\nplt.xlabel('Days since intital observation')\nplt.ylabel('Distance from Jupiter (arc-seconds)')\nplt.title('Orbits of Jupiter\\'s Moons')\nplt.legend()\nplt.show()\n","repo_name":"umeda/nezumi","sub_path":"Projects/orbital_resonance/jovian_orbits.py","file_name":"jovian_orbits.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"36877867888","text":"from Levenshtein import distance as levenshtein_distance\n\n\ndef find_best_property(property_list, tag, max_dist=0.3):\n tag = tag.replace('_GBD', '')\n best_match = None\n best_match_dist = 4\n for prop in property_list:\n dist = levenshtein_distance(prop, tag)\n if dist < best_match_dist:\n best_match_dist = dist\n best_match = prop\n\n if best_match_dist > len(tag) * max_dist:\n return None\n return best_match\n\n\ndef find_data_property_match(ontology_data, tag, individual=None, max_dist=0.3):\n specific_class_tag = None\n if individual is not None:\n class_name = individual.is_a[0].name\n specific_class_tag = find_best_property(ontology_data.data_properties, f'{class_name}_{tag}')\n return specific_class_tag if specific_class_tag else find_best_property(ontology_data.data_properties, f'{tag}', max_dist)\n\n\ndef find_object_property_match(ontology_data, tag, individual=None, max_dist=0.3):\n specific_class_tag = None\n if individual is not None:\n class_name = individual.is_a[0].name\n specific_class_tag = find_best_property(ontology_data.object_properties, f'{class_name}_{tag}')\n return specific_class_tag if specific_class_tag else find_best_property(ontology_data.object_properties, f'{tag}', max_dist)\n\n\ndef find_object_match(ontology_data, tag, property_name=None, max_dist=0.3):\n specific_class_tag = None\n if property_name is not None and property_name in ontology_data.property2class:\n property_class = ontology_data.property2class[property_name].lower()\n specific_class_tag = find_best_property(ontology_data.object_names, f'{tag}_{property_class}')\n return specific_class_tag if specific_class_tag else find_best_property(ontology_data.object_names, f'{tag}', max_dist)\n\n\ndef find_class_match(ontology_data, tag):\n return find_best_property(ontology_data.class_names, f'{tag}')","repo_name":"Titrom025/PyTableMiner","sub_path":"utils/matchers.py","file_name":"matchers.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5698562086","text":"# Baekjoon Online Judge - 10808번. 알파벳 개수\n\nalpha_cnt = [0] * 26\n\nword = input()\n\nfor alpha in word:\n # 알파벳이 소문자로 이루어져있어서 97을 뺴준다. 아스키코드\n alpha_cnt[ord(alpha)-97] += 1\nprint(*alpha_cnt)\n","repo_name":"wnstj-yang/Algorithm","sub_path":"BOJ/BOJ_10808.py","file_name":"BOJ_10808.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11734992157","text":"import json\n\nfrom pathlib import Path\nimport attr\n\nfrom .constants import SETTINGS_KEYS\n\nSETTINGS_PATH = \"~/.codesearchrc\"\n\ndefault_settings = {\n \"SOCKET_HOST\": \"127.0.0.1\",\n \"SOCKET_PORT\": 65126,\n \"EXCLUDES\": [],\n \"FILE_TYPES\": [],\n \"SIGNIFICANCE_THRESHOLD\": 0,\n \"WATCHED\": [],\n \"INDEXING_PROCESSES\": 4,\n \"BUFFER_PATH\": \"~/.codesearchbuffer\",\n}\n\n\n@attr.s\nclass Settings:\n settings = attr.ib(default=attr.Factory(dict))\n\n def from_file(self, path: str):\n settings_path = Path(SETTINGS_PATH).expanduser()\n\n if not settings_path.exists():\n self.settings = default_settings\n return\n\n with open(path, \"r\") as settings_file:\n self.settings = json.load(settings_file)\n\n def __getattr__(self, key):\n if key not in SETTINGS_KEYS:\n raise KeyError(f\"{key} not a valid settings property\")\n\n return self.settings[key]\n\n\nsettings = Settings()\n\nsettings.from_file(Path(SETTINGS_PATH).expanduser())\n","repo_name":"mcataford/codesearch","sub_path":"src/codesearch/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20656756314","text":"import pika, json\n\n# params = pika.URLParameters('amqp://rabbitmq:123@rabbitmq:5672//')\n# connection = pika.BlockingConnection(params)\nparams= pika.ConnectionParameters('rabbitmq', 5672, '/', pika.PlainCredentials('rabbitmq', '123'))\nconnection = pika.BlockingConnection(params)\nchannel = connection.channel()\n\n\nchannel.queue_declare(queue='broker_test')\n\n\ndef callback(ch, method, properties, body):\n print('Received')\n print(method)\n print(properties)\n print(body)\n\n\nchannel.basic_consume(queue='broker_test', on_message_callback=callback, auto_ack=True)\n\nprint('Started Consuming')\n\nchannel.start_consuming()\n\nchannel.close()\n","repo_name":"danieltorrescode/flask_api","sub_path":"src/libraries/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21836542025","text":"# сводная статистика по портфелю\n# from main import usd\nimport tinvest as tinvest\n\ndef cCalkulateStatistik(r, aAccountID):\n usd = float(r.get_market_orderbook(\"BBG0013HGFT4\", 1).payload.close_price) # цена доллара из стакана, имхо\n print(usd)\n r = r.get_portfolio(aAccountID)\n\n # print(r)\n\n ############ Классы объектов ответа get_portfolio в SDK от @daxartio https://github.com/daxartio/tinvest\n # tinvest.Portfolio = List[PortfolioPosition] # список объектов класса tinvest.PortfolioPosition\n\n # tinvest.PortfolioPosition\n # class PortfolioPosition(BaseModel):\n # name: str # Название бумаги\n\n # average_position_price: Optional[MoneyAmount] = Field(alias='averagePositionPrice') # сред. цена покупок бумаги в портфеле\n # average_position_price_no_nkd: Optional[MoneyAmount] = Field( # актуально для облигаций\n # alias='averagePositionPriceNoNkd'\n # )\n\n # balance: Decimal # колво акций (не лотов)\n # lots: int # лот, минимальный объем покупки\n # blocked: Optional[Decimal] # заблокировано под продажу\n\n # expected_yield: Optional[MoneyAmount] = Field(alias='expectedYield') # ожидаемая прибыль НА ВСЮ ПОЗИЦИЮ !!!\n\n # figi: str # обязательно\n # ticker: Optional[str]\n # isin: Optional[str]\n\n # instrument_type: InstrumentType = Field(alias='instrumentType') # stock, bond, etf etc.\n\n # class MoneyAmount(BaseModel):\n # currency: Currency\n # value: Decimal\n\n ################# посчитаю ожидаемую прибыль портфеля за вычетом комиссий и налогов ПРИМЕРНО !\n\n profit = []\n sales = []\n for p in r.payload.positions:\n y = float(p.expected_yield.value) # НА ВСЮ ПОЗИЦИЮ, а не на 1 бумагу\n price = float(p.average_position_price.value)\n # для долларовых бумаг (доллар в портфеле тоже считается бумагой)\n if p.expected_yield.currency == tinvest.Currency.usd:\n y *= usd\n price *= usd\n\n profit.append(y) # налоги считаю по прибыли уменьшая налог. базу убытками\n sales.append(price * float(p.balance) + y)\n\n # комиссию считаю от всей суммы продаж\n total = {\n 'sales_total, rub': sum(sales),\n 'profit_total, rub': sum(profit),\n }\n\n total['taxes'] = total['profit_total, rub'] * 0.13 # 0.13% НДФЛ\n total['comission'] = total['sales_total, rub'] * 0.003 # 0.3% Тариф Инвестор на 2021-07 Тинькофф Инвестиции\n total['payed_total'] = total['taxes'] + total['comission']\n total['money_cleaned'] = total['sales_total, rub'] - total['payed_total']\n total['profit_cleaned'] = total['profit_total, rub'] - total['payed_total']\n\n print(total)\n\n # \"positions\": [\n # {\n # \"figi\": \"string\",\n # \"ticker\": \"string\",\n # \"isin\": \"string\",\n # \"instrumentType\": \"Stock\",\n # \"balance\": 0,\n # \"blocked\": 0,\n # \"expectedYield\": {\n # \"currency\": \"RUB\",\n # \"value\": 0\n # },\n # \"lots\": 0,\n # \"averagePositionPrice\": {\n # \"currency\": \"RUB\",\n # \"value\": 0\n # },\n # \"averagePositionPriceNoNkd\": {\n # \"currency\": \"RUB\",\n # \"value\": 0\n # },\n # \"name\": \"string\"\n # }\n","repo_name":"PaskomS/tipaskom","sub_path":"tests/sStatistik.py","file_name":"sStatistik.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18788546003","text":"from flask import Flask\nfrom flask_login import LoginManager\nfrom flask_mail import Mail\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\ndb_migrate = Migrate()\nmail = Mail()\nlogin_manager = LoginManager()\nlogin_manager.login_view = \"home.login\"\n\n\ndef create_app(config=\"app.config.ProductionConfig\"):\n app = Flask(__name__)\n app.config.from_object(config)\n db.init_app(app)\n db_migrate.init_app(app, db, directory=\"app/db/migrations\")\n mail.init_app(app)\n login_manager.init_app(app)\n\n # Import & Register Blueprints\n from app.views.home import home\n\n app.register_blueprint(home)\n\n return app\n","repo_name":"abhijeetsonawane001/developersapi","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26263364736","text":"\"\"\"\n@project: Mobile Edge Offloading\n@author: Md Rajib Hossen\n@time: 03/15/2020\n@email: mdrajib.hossen@mavs.uta.edu\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport os\n\n\nclass QLearningTable:\n def __init__(self, actions, filename=\"\", lr=0.01, discount=0.9, e_greedy=0.99):\n self.actions = actions\n self.lr = lr\n self.discount = discount\n self.epsilon = e_greedy\n # If the specified file is not empty, then read the Q table from the specified file\n if filename != \"\":\n self.q_table = pd.read_csv(filename, index_col=0)\n self.q_table.columns = list(range(0, len(self.actions)))\n else:\n if os.path.exists(\"data/q_table.csv\"):\n if os.stat(\"data/q_table.csv\").st_size == 0:\n self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n else:\n self.q_table = pd.read_csv(\"data/q_table.csv\", index_col=0)\n self.q_table.columns = list(range(0, len(self.actions)))\n else:\n self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n\n def choose_action(self, state):\n self.check_state_exists(state)\n # action selection trade of between exploration and exploitation, explore 10% of the time\n if np.random.uniform() < self.epsilon:\n # choose best action\n state_action = self.q_table.loc[state, :]\n action = np.random.choice(state_action[state_action == np.max(state_action)].index)\n else:\n action = np.random.choice(self.actions)\n\n return action\n\n def learn(self, c_state, action, reward, n_state):\n self.check_state_exists(n_state)\n q_predict = self.q_table.loc[c_state, action]\n\n n_state_ls = list(n_state.split(\",\"))\n\n if n_state_ls[1] != \" -1\":\n q_target = reward + self.discount * self.q_table.loc[n_state, :].max()\n else:\n print(\"reward only\")\n q_target = reward\n\n # q_target = reward + self.discount * self.q_table.loc[n_state, :].max()\n\n self.q_table.loc[c_state, action] += self.lr * (q_target - q_predict)\n\n def check_state_exists(self, state):\n if state not in self.q_table.index:\n self.q_table = self.q_table.append(\n pd.Series(\n [0] * len(self.actions),\n index=self.q_table.columns,\n name=state,\n )\n )\n\n\nif __name__ == '__main__':\n qtable = QLearningTable(actions=list(range(3)))\n print(qtable.choose_action(\"abc\"))","repo_name":"rajibhossen/edge-offloading","sub_path":"qlearning.py","file_name":"qlearning.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"61"} +{"seq_id":"71883726593","text":"import os\nimport pygame as pg\nimport numpy as np\nimport random\nfrom pygameCamera import Camera\nfrom utils import *\nimport threading\n\nclass FaceDanceMachine:\n\n def __init__(self, display, similarity):\n \"\"\" Constructor function \"\"\"\n self.black = (0,0,0)\n self.display = display\n self.camera = Camera(self.display)\n self.similarity = similarity\n self.imgs, self.buttons = loadGameImgNButtons()\n\n def welcome(self):\n self.display.fill(self.black)\n loadNblit('welcome.png',self.display,0,0,740,480)\n pg.display.update()\n \n while True: \n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit() \n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n if 455<=mouse[0]<=695 and 290<=mouse[1]<=410 and click[0]==1:\n break\n\n def menu(self):\n self.display.fill(self.black)\n loadNblit('level1.png',self.display,168,72,404,337)\n loadNblit('level2.png',self.display,168,72,404,337)\n loadNblit('level3.png',self.display,168,72,404,337)\n pg.display.update()\n \n while True: \n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit() \n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n if 245<=mouse[0]<=495 and 64<=mouse[1]<=160 and click[0]==1:\n self.level = 1\n print(\"level 1 ...\")\n break\n elif 245<=mouse[0]<=495 and 192<=mouse[1]<=288 and click[0]==1:\n self.level = 2\n print(\"level 2 ...\")\n break\n elif 245<=mouse[0]<=495 and 320<=mouse[1]<=416 and click[0]==1:\n self.level = 3\n print(\"level 3 ...\")\n break\n\n def countDown(self):\n imgs = loadCountDownImgs()\n countDownImg = [load_image(str(index)+'.png',576,480) for index in range(4)]\n start = pg.time.get_ticks()\n \n while True:\n sec = (pg.time.get_ticks()-start)/1000 \n index = int(sec)\n if index > 3:\n break\n self.display.fill(self.black)\n self.display.blit(pg.transform.flip(self.camera.capture(), True, False), (0,0))\n self.display.blit(countDownImg[index], (32,0))\n for i in range(5):\n target = (i+index) if ((i+index)<5) else (i+index-5)\n self.display.blit(imgs[target], (640, 96*i))\n pg.display.update()\n \n def game(self):\n score = 0\n count = 0\n next = [True for i in range(self.level)]\n pos = [(0, (2-i)*144) for i in range(self.level)]\n mission = [ 0 for i in range(self.level)]\n start = pg.time.get_ticks()\n self.result, self.sim = -1, 0\n\n while True:\n \n #time up\n if (pg.time.get_ticks()-start) > 100000:\n exit = self.end(score)\n if exit:\n return False\n else:\n return True\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit() \n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n #pause\n if 640<=mouse[0]<=740 and 160<=mouse[1]<=256 and click[0]==1:\n print(\"pause\")\n exit = self.pause()\n if exit:\n return False\n #exit\n if 640<=mouse[0]<=740 and 288<=mouse[1]<=384 and click[0]==1:\n return False\n frame = self.camera.capture() #pygame surface\n self.display.fill(self.black)\n self.display.blit(pg.transform.flip(frame, True, False),(0,0))\n show_text(\"Score\", self.display, 660, 90)\n show_text(str(score), self.display, 680, 120)\n self.display.blit(self.buttons[0], (640, 160))\n self.display.blit(self.buttons[1], (640, 288))\n \n #face\n if count%50==0:\n t = threading.Thread(target=self.job, args=(frame, mission, ))\n t.start()\n if count%50 == 49:\n t.join()\n for i in range(self.level):\n if next[i]:\n next[i] = False\n pos[i] = (0, (2-i)*144)\n mission[i] = random.randint(0,9)\n self.display.blit(self.imgs[mission[i]], pos[i])\n pos[i]= (pos[i][0]+2, pos[i][1])\n if self.result == mission[i]:\n self.display.blit(self.imgs[10], pos[i])\n score += int(100*self.sim)\n next[i] = True\n if pos[i][0] > 480:\n next[i]=True\n count += 1\n pg.display.update()\n \n def job(self, frame, mission):\n print(\"Thread start...\")\n cv_frame = self.camera.pg2cv(frame)\n self.result, self.sim = self.similarity.face_dance(cv_frame, mission)\n print(\"Thread finish...\")\n\n def pause(self):\n buttons = loadPauseButtons()\n\n while True:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit() \n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n #play\n if 100<=mouse[0]<=300 and 190<=mouse[1]<=310 and click[0]==1:\n return False\n #exit\n if 440<=mouse[0]<=640 and 190<=mouse[1]<=310 and click[0]==1:\n return True\n self.display.fill(self.black)\n self.display.blit(buttons[0], (100, 190))\n self.display.blit(buttons[1], (440, 190))\n pg.display.update()\n \n def exit(self):\n self.display.fill(self.black)\n loadNblit('bye_3.png',self.display,82,0,576,480) \n pg.display.update()\n print(\"exitting ...\")\n pg.time.wait(1000)\n \n def end(self, score):\n self.display.fill(self.black)\n buttons = loadEndButtons()\n\n while True:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit() \n mouse = pg.mouse.get_pos()\n click = pg.mouse.get_pressed()\n #replay\n if 100<=mouse[0]<=300 and 260<=mouse[1]<=460 and click[0]==1:\n return False\n #exit\n if 440<=mouse[0]<=640 and 260<=mouse[1]<=460 and click[0]==1:\n return True\n self.display.fill(self.black)\n show_text(\"Score: \" + str(score), self.display, 100, 100, 80)\n self.display.blit(buttons[0], (100, 260))\n self.display.blit(buttons[1], (440, 260))\n pg.display.update() \n\n def run(self):\n self.welcome()\n replay = True\n while replay:\n self.menu()\n self.countDown()\n replay = self.game()\n self.exit()\n print(\"success\")\n self.camera.stop() \n","repo_name":"NTUEE-ESLab/2018Fall-FaceDanceMachine","sub_path":"FaceDanceMachine.py","file_name":"FaceDanceMachine.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"28087262616","text":"\"\"\"\nmylist = []\nmylist.append(1)\nmylist.append(2)\nmylist.append(3)\nprint (mylist[0])\nprint(mylist[0:2])\n\nfor rec in mylist:\n print rec\n\n\"\"\"\n\n\nnumbers = []\nstrings = []\nnames = [\"John\", \"Eric\", \"Melissa\"]\n\nsecond_name = names[1]\n\nnumbers.append([0,1,2,3,4,5,6,7,8,9,10])\nstrings.append([\"Hello\",\"World\"])\n\n\n\nprint(numbers)\nprint(strings)\nprint(\"The second names on the names list is %s\" %second_name)","repo_name":"OnlyBoo/Python3","sub_path":"second.py","file_name":"second.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42869390964","text":"from PrimaryIDS import PrimaryIDS\nfrom SecondaryIDS import SecondaryIDS\nfrom Detector import Detector\nimport time\n\n\nCONFIG = {\n 'secondary_ids_count': 10\n}\n\n\n# print(\"Creating primary IDS\")\n# ids = PrimaryIDS()\n#\n# ids.print_info(verbose=True)\n#\n#\n# start = time.time()\n#\n# print(time.time()-start)\n\n\ndef test():\n for repulsion_value in (0.005, 0.01, 0.02, 0.05):\n print(\"Testing with repulsion value %f\" % repulsion_value)\n f = open('detector-repulsion-%f-stats.txt' % repulsion_value,'w')\n f.write('tp,tn,fp,fn\\n')\n for i in range(20):\n print(\"Run [%d/20]\" % (i + 1))\n Detector.repulsion_value = repulsion_value\n ids = PrimaryIDS()\n\n ids.run()\n f.write('%d,%d,%d,%d\\n' % (ids.tp, ids.tn, ids.fp, ids.fn))\n f.close()\n\ntest()","repo_name":"PimW/AIS-intrusion-detection","sub_path":"AIS.py","file_name":"AIS.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"40001730043","text":"class Solution:\n def reverseBits(self, n: int) -> int:\n rev = 0\n for _ in range(32):\n ##left shift old rev bits by one to create a new space and \n #accomodate the new bit gotten from the and operation of n and 1 \n rev = (rev << 1) + (n & 1)\n #right shift n by 1 to pop out old bit\n n = n >> 1\n return rev","repo_name":"frankudoags/Leetcode-Solutions","sub_path":"0190-reverse-bits/0190-reverse-bits.py","file_name":"0190-reverse-bits.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71307766273","text":"import utils\nimport rospy\nimport random\nimport numpy as np\nfrom naoqi import ALProxy\nfrom PIL import Image as im\nfrom cv_bridge import CvBridge\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom sinfonia_pepper_robot_toolkit.srv import TakePicture, TakePictureResponse\n\n\nclass RobotCamera:\n\n def __init__(self, ip):\n self._camera = ALProxy(\"ALVideoDevice\", ip, 9559)\n self._ip = ip\n self._bridge = CvBridge()\n\n rospy.Service(\"sIA_take_picture\", TakePicture, self.handleTakePicture)\n self._errorPub = rospy.Publisher(\"sIA_rt_error_msgs\", String, queue_size=10)\n\n self.name = None\n self.cameraParams = []\n self.isStreaming = False\n\n self.videoPublisher = None\n\n def subscribeTopics(self):\n rospy.Subscriber(\"sIA_stream_from\", String, self.handleStreamVideo)\n\n def createPublishers(self):\n self.videoPublisher = rospy.Publisher(\"sIA_video_stream\", Image, queue_size=1)\n\n def subscribeCamera(self):\n self.name = str(self._ip + \"_\" + str(random.randint(0, 99999)))\n self.name = self._camera.subscribeCamera(self.name, self.cameraParams[0], self.cameraParams[1],\n self.cameraParams[2], self.cameraParams[3])\n\n def unsubscribeCamera(self):\n self._camera.unsubscribe(self.name)\n self.name = None\n\n def takePicture(self):\n self.subscribeCamera()\n image = self._camera.getImageRemote(self.name)\n image = im.frombytes(\"RGB\", (int(image[0]), int(image[1])), image[6])\n self.unsubscribeCamera()\n\n return image\n\n def streamVideo(self):\n\n try:\n if self.name is None:\n self.subscribeCamera()\n image = self._camera.getImageRemote(self.name)\n image = im.frombytes(\"RGB\", (int(image[0]), int(image[1])), image[6])\n image = self._bridge.cv2_to_imgmsg(np.array(image, 'uint8'), \"rgb8\")\n self.videoPublisher.publish(image)\n except TypeError:\n self._errorPub.publish(\"Error 0x06: Camera closed beforehand [camera]\")\n return\n\n def takeDepthMap(self):\n self.subscribeCamera()\n image = self._camera.getImageRemote(self.name)\n self.unsubscribeCamera()\n\n img = Image()\n img.header.stamp = rospy.Time(image[4] + image[5] * 1e-6)\n img.header.frame_id = \"2\"\n img.height = image[1]\n img.width = image[0]\n nbLayers = image[2]\n img.encoding = \"16UC1\"\n img.step = img.width * nbLayers\n img.data = image[6]\n\n return img\n\n def handleTakePicture(self, req):\n\n if req.command == \"Take Picture\":\n if self.name is None:\n if len(req.params) != 4:\n self._errorPub.publish(\"Error 0x02: Wrong number of params [camera]\")\n else:\n params = list(req.params)\n if utils.checkCameraSettings(params, \"camera\"):\n self.cameraParams = params\n if utils.areInRange([params[0]], [[0, 1]]):\n image = self.takePicture()\n return TakePictureResponse(self._bridge.cv2_to_imgmsg(np.array(image, 'uint8'), \"rgb8\"))\n elif params[0] == 2:\n msg = self.takeDepthMap()\n return TakePictureResponse(msg)\n else:\n self._errorPub.publish(\"Error 0x00: Value out of range [camera]\")\n else:\n self._errorPub.publish(\"Error 0x05: Resource is already in use [camera]\")\n else:\n self._errorPub.publish(\"Error 0x01: Wrong message [camera]\")\n\n def handleStreamVideo(self, data):\n\n if \"sIA_video_stream\" in data.data:\n request = data.data.split(\".\")[1:]\n if len(request) == 5:\n state = request[-1]\n\n try:\n params = map(int, request[:-1])\n except ValueError:\n self._errorPub.publish(\"Error 0x01: Wrong message [camera]\")\n return\n if state == \"ON\":\n if utils.checkCameraSettings(params, \"video\"):\n self.cameraParams = params\n self.isStreaming = True\n else:\n self._errorPub.publish(\"Error 0x00: Value out of range [camera]\")\n elif state == \"OFF\":\n self.unsubscribeCamera()\n self.isStreaming = False\n else:\n self._errorPub.publish(\"Error 0x01: Wrong message [camera]\")\n else:\n self._errorPub.publish(\"Error 0x02: Wrong number of params [camera]\")\n","repo_name":"carlosquinterop/SinfoniaPepperTeam","sub_path":"sinfonia_pepper_robot_toolkit/scripts/interaction/robot_camera.py","file_name":"robot_camera.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"38406172555","text":"import gym\nimport universe\nimport random\nfrom PIL import Image\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.models import load_model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Dropout\nfrom keras.layers.core import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.optimizers import Adam\nfrom keras.layers import concatenate\nfrom keras.utils import plot_model, to_categorical\nimport pickle\n\n# reinforcement learning step\ndef get_obs_len_reward(observation_n, reward_n, done_n, total_reward_sum, total_length):\n '''Proposed reward model\n If increase in length >= 25: Reward = +3\n If increase in length >= 10 and < 25: Reward = +2\n If increase in length > 0 and < 10: Reward = +1\n If increase in length < 0 (decrease): Reward = -3\n If killed, Reward = -10 * total_length\n '''\n total_length += reward_n\n step_reward = 0\n if reward_n >= 25:\n step_reward = 3\n elif reward_n >= 10 and reward_n < 25:\n step_reward = 2\n elif reward_n > 0 and reward_n < 10:\n step_reward = 1\n elif reward_n < 0:\n step_reward = -3\n if done_n == True:\n step_reward = -100\n \n total_reward_sum += step_reward\n\n return step_reward, total_reward_sum, total_length\n\ndef action_to_reward_model(input_shape):\n '''Map action to reward using a multilayer perceptron'''\n model = Sequential()\n model.add(Dense(128, input_dim = input_shape, activation = 'relu'))\n model.add(Dense(64, activation = 'relu'))\n model.add(Dense(16, activation = 'relu'))\n model.add(Dense(1, activation=\"linear\"))\n with open('mlpmodel.txt','w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + '\\n'))\n return model\n\ndef image_to_action_model(input_shape, filters=(16, 32, 64)):\n '''Map image to reward using a Deep CNN'''\n inputs = Input(shape=input_shape)\n for (index, filter) in enumerate(filters):\n if index == 0:\n x = inputs\n x = Conv2D(filter, (3, 3), padding='same')(x)\n x = Activation('relu')(x)\n x = BatchNormalization(axis = -1)(x)\n x = MaxPooling2D(pool_size=(2, 2))(x)\n x = Flatten()(x)\n x = Dense(64)(x)\n x = Activation('relu')(x)\n x = BatchNormalization(axis = -1)(x)\n x = Dropout(0.2)(x)\n x = Dense(16)(x)\n x = Activation('relu')(x)\n x = BatchNormalization(axis = -1)(x)\n x = Dropout(0.2)(x)\n x = Dense(4)(x)\n x = Activation('relu')(x)\n x = Dense(1, activation=\"linear\")(x)\n model = Model(inputs, x)\n \n with open('cnnmodel.txt','w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + '\\n'))\n return model\n\ndef build_model(action_input_shape, image_input_shape, output_shape):\n '''Create the hybrid model'''\n action_to_reward_X = action_to_reward_model(action_input_shape)\n image_to_reward_X = image_to_action_model(image_input_shape)\n combined_input = concatenate([action_to_reward_X.output, image_to_reward_X.output])\n x = Dense(64, activation=\"relu\")(combined_input)\n x = Dense(16, activation=\"relu\")(x)\n x = Dense(4, activation=\"relu\")(x)\n x = Dense(1, activation=\"linear\")(x)\n model = Model(inputs=[action_to_reward_X.input, image_to_reward_X.input], outputs=x)\n return model\n\ndef train_model(model, observation_list, step_reward_list, step_action_list):\n image_X = np.asarray(observation_list)\n image_X = image_X[:, :, :, np.newaxis]\n action_x = np.asarray(step_action_list)\n reward_y = np.asarray(step_reward_list)\n if model == None:\n model = build_model(action_input_shape= 3, image_input_shape=(300, 502, 1), output_shape=1)\n opt = Adam(lr=0.0001, decay=1e-3 / 200)\n model.compile(loss=\"mean_squared_error\", optimizer=opt)\n with open('mixedmodel.txt','w') as fh:\n model.summary(print_fn=lambda x: fh.write(x + '\\n'))\n hist = model.fit([action_x, image_X], reward_y, epochs=20, batch_size=32)\n return model, hist\n\ndef play(model, run_num, phase):\n env = gym.make('internet.SlitherIO-v0')\n env.configure(remotes=1) # automatically creates a local docker container\n observation_n = env.reset()\n n = 0\n total_reward_sum = 0\n reward_n = [0]\n done_n = [False]\n step_reward = 0\n total_length = 10\n available_actions = [[231, 196, 0], [231, 196, 1], [231, 236, 0], [231, 236, 1], [231, 276, 0], [231, 276, 1],\n [271, 196, 0], [271, 196, 1], [271, 236, 0], [271, 236, 1], [271, 276, 0], [271, 276, 1],\n [311, 196, 0], [311, 196, 1], [311, 236, 0], [311, 236, 1], [311, 276, 0], [311, 276, 1]]\n slow_actions = [[231, 196, 0], [231, 236, 0], [231, 276, 0],\n [271, 196, 0], [271, 236, 0], [271, 276, 0],\n [311, 196, 0], [311, 236, 0], [311, 276, 0]]\n observation_list = []\n step_reward_list = []\n step_action_list = []\n event = np.asarray([271, 236, 0])\n\n\n action_n = [[('PointerEvent', 271, 236, 0)]]\n done_n[0] = False\n while done_n[0] == False: \n n+=1\n if (n>1):\n if phase == 'train' or len(observation_list) == 0:\n event = random.choice(available_actions)\n action_n = [[('PointerEvent', event[0], event[1], event[2])]]\n event = np.asarray(event)\n\n elif phase == 'test':\n obs = np.asarray(observation_list[-1])\n obs = obs[np.newaxis, :, :, np.newaxis]\n for act in slow_actions:\n max_reward = 0\n event = []\n reward_action = model.predict([[np.asarray(act)], obs])[0][0]\n if (reward_action > max_reward):\n max_reward = reward_action\n action_n = [[('PointerEvent', act[0], act[1], act[2])]]\n event = np.asarray(act)\n\n if len(event) == 0:\n event = random.choice(slow_actions)\n action_n = [[('PointerEvent', event[0], event[1], event[2])]]\n event = np.asarray(event)\n\n if (observation_n[0] != None):\n step_reward, total_reward_sum, total_length = get_obs_len_reward(observation_n, reward_n[0], done_n[0], total_reward_sum, total_length)\n\n # save the new variables for each iteration\n observation_n, reward_n, done_n, info = env.step(action_n)\n if done_n[0] == True and len(step_reward_list) > 0:\n step_reward_list[-1] = -100\n # for purposes of visualization only\n if (observation_n[0] != None and step_reward != 0 ):\n arr = np.asarray(observation_n[0]['vision'])\n arr = arr[86:386, 20:522, :]\n R = arr[: , : , 0] * 0.299\n G = arr[: , : , 1] * 0.587\n B = arr[: , : , 2] * 0.114\n gray_imgarr = (R + G + B)\n observation_list.append(gray_imgarr)\n step_action_list.append(event)\n step_reward_list.append(step_reward)\n arr2im = Image.fromarray(arr)\n arr2im.save('observation_n.jpg')\n env.render()\n\n return observation_list, step_reward_list, step_action_list, total_length\n\n\nif __name__ == '__main__':\n # Number of times to play the game\n num_play = 1000\n total_reward = []\n total_length_list = []\n loss_history = []\n try:\n with open('obsFile', 'rb') as lf:\n observation_list = pickle.load(lf)\n except:\n observation_list = []\n\n try:\n with open('rewardlistFile', 'rb') as lf:\n step_reward_list = pickle.load(lf)\n with open('actionlistFile', 'rb') as lf:\n step_action_list = pickle.load(lf)\n except:\n step_reward_list = []\n step_action_list = []\n\n model = None\n for x in range(1, num_play):\n if x <= 1:\n print('-'*100, 'training', x)\n observation_list_train, step_reward_list_train, step_action_list_train, total_length = play(None, x, 'train')\n observation_list.extend(observation_list_train)\n with open('obsFile', 'wb') as lf:\n pickle.dump(observation_list, lf)\n step_reward_list.extend(step_reward_list_train)\n with open('rewardlistFile', 'wb') as lf:\n pickle.dump(step_reward_list, lf)\n step_action_list.extend(step_action_list_train)\n with open('actionlistFile', 'wb') as lf:\n pickle.dump(step_action_list, lf)\n\n print('==============> step_reward_list', step_reward_list_train)\n print('==============> step_action_list', step_action_list_train)\n print('==============> total_length', total_length)\n\n\n if (len(observation_list) > 0 and x > 1 and x % 5 == 0):\n try:\n model = load_model('slither_model.h5')\n except:\n model = None\n\n model, hist = train_model(model, observation_list, step_reward_list, step_action_list)\n # Save the model\n model.save('slither_model.h5')\n loss_history.append(hist.history['loss'][-1])\n with open('historyFile', 'wb') as hf:\n pickle.dump(loss_history, hf)\n\n if model != None and x > 1:\n print('-'*100, 'testing')\n observation_list_x, step_reward_list_x, step_action_list_x, total_length = play(model, x, 'test')\n observation_list.extend(observation_list_x)\n step_reward_list.extend(step_reward_list_x)\n step_action_list.extend(step_action_list_x)\n print('==============> step_action_list_x', step_action_list_x)\n print('==============> step_reward_list', step_reward_list_x)\n print('==============> total_length', total_length)\n total_reward = np.append(total_reward, np.sum(np.asarray(step_reward_list_x)))\n total_length_list = np.append(total_length_list, total_length)\n with open('rewardsFile', 'wb') as rf:\n pickle.dump(total_reward, rf)\n with open('lengthFile', 'wb') as lf:\n pickle.dump(total_length_list, lf)\n print('------- iteration', x)\n print('------- iteration reward', np.sum(np.asarray(step_reward_list_x)))\n print('------- iteration length', total_length)\n print('------- total_reward', total_reward)\n print('------- total_length_list', total_length_list)\n \n with open('obsFile', 'wb') as lf:\n pickle.dump(observation_list, lf)\n with open('rewardlistFile', 'wb') as lf:\n pickle.dump(step_reward_list, lf)\n with open('actionlistFile', 'wb') as lf:\n pickle.dump(step_action_list, lf)","repo_name":"hrishekesh/hybridnn","sub_path":"slithermodel.py","file_name":"slithermodel.py","file_ext":"py","file_size_in_byte":10924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16828902586","text":"import json\n\n\nclass MyConfig:\n def __init__(self):\n self.content = dict()\n self.metadata = dict()\n self.steps = dict()\n\n def load_data_by_path(self, path):\n self.content = self.__read_json(path)\n self.to_segments()\n self.merge_parts()\n\n @staticmethod\n def __read_json(path):\n data = {}\n with open(path, \"r\") as read_file:\n data = json.load(read_file)\n return data\n\n def to_segments(self):\n # form steps dict (without description) and metadata dict (without steps)\n self.metadata = self.content.copy()\n self.steps = self.metadata['configurations']['steps'].copy()\n self.steps.pop('description')\n self.metadata['configurations'].pop('steps')\n\n def merge_parts(self):\n \"\"\"\n :return: dictionary formed during the program operation\n \"\"\"\n data = self.metadata.copy()\n data['configurations'].update(self.steps.copy())\n return data\n","repo_name":"Joseph-Lila/config_maker","sub_path":"Code/py_files/MyConfig.py","file_name":"MyConfig.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"27948049733","text":"from google.cloud import vision\nimport os\nfrom selenium import webdriver\nfrom time import sleep\n\ndef count_faces(path):\n \"\"\"Detects faces in an image.\"\"\"\n import io;\n client = vision.ImageAnnotatorClient();\n \n \n with io.open(path, 'rb') as image_file:\n content = image_file.read();\n \n image = vision.Image(content=content);\n response = client.face_detection(image=image);\n faces = response.face_annotations;\n \n return len(faces);\n\nimgFile = '/Users/vdo/Documents/Jeffrey/CMU/Courses/2021T2/49783/assignments/A4/screen.png';\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]='/Users/vdo/Documents/Jeffrey/CMU/Courses/2021T2/49783/ultimate-retina-317302-4a5e9f81d23b.json';\nexpectedFaceCount = int(input(\"Enter expected count of attendees: \"));\n\nbrowser = webdriver.Firefox(executable_path=\"/usr/local/bin/geckodriver/geckodriver\")\nbrowser.get(\"https://www.zoom.us/\");\ninput(\"Press RETURN when everyone has arrived.\");\ncountUses = 0;\ntry:\n while browser:\n \n #photograph the class\n browser.get_screenshot_as_file(\"screen.png\");\n \n #use Google's face counter\n faceCount = count_faces(imgFile);\n countUses += 1;\n \n #if some are off-screen\n if faceCount < expectedFaceCount: #alert the teacher\n if expectedFaceCount - faceCount == 1:\n os.system('say \"Alert! One attendee is missing!\"');\n else:\n os.system('say \"Alert! ' + str(expectedFaceCount - faceCount) + ' attendees are missing!\"');\n \n #wait a few seconds before checking again\n sleep(10);\nexcept Exception as e:\n print('Google Vision Calls: ' + str(countUses));\n","repo_name":"jjmcinto/roboNanny","sub_path":"roboNanny.py","file_name":"roboNanny.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73846129153","text":"class Solution:\n def find(s: str, i: int, even: bool) -> tuple[int, int]:\n \"\"\"\n Get longest palindromic substring with odd/even length\n using index i as the center\n \"\"\"\n left = i + (1 if even else 0)\n right = i\n # Keep expanding substring from center\n while left - 1 >= 0 and right + 1 < len(s):\n # Cannot expand substring anymore\n if s[left - 1] != s[right + 1]:\n break\n left -= 1\n right += 1\n return left, right\n\n def longestPalindrome(self, s: str) -> str:\n max_len = 0\n max_l_r = 0, 0\n for i in range(len(s)):\n for even in (True, False):\n l, r = Solution.find(s, i, even)\n if r - l + 1 > max_len:\n max_len = r - l + 1\n max_l_r = l, r\n l, r = max_l_r\n return s[l : r + 1]\n\n\ns = Solution()\nprint(s.longestPalindrome(\"abaismalayalamm\")) # malayalam\nprint(s.longestPalindrome(\"babad\")) # bab\nprint(s.longestPalindrome(\"a\")) # a\nprint(s.longestPalindrome(\"abba\")) # abba\n","repo_name":"jetkan-yk/phyting","sub_path":"top100/longest_palindromic_substring.py","file_name":"longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20353741391","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport mlrose_hiive\nimport pandas as pd\nimport genetic_algorithm\nimport simulated_annealing\nimport mimic\nimport hillclimbing\nimport graphs\n\n\ndef create_tsp_problem(length=10, width=10, cities=8):\n coords_list = []\n np.random.seed(786)\n\n for city in range(0, cities):\n new_city = (np.random.randint(0, length), np.random.randint(0, width))\n if new_city not in coords_list:\n coords_list.append(new_city)\n\n tsp_coords = mlrose_hiive.fitness.TravellingSales(coords=coords_list)\n tsp_problem = mlrose_hiive.TSPOpt(length=cities, fitness_fn=tsp_coords, maximize=True)\n return tsp_problem\n\n\ndef plot_tsp_grid(tsp_problem, data_string=\"\", solution_vector=None):\n coords = tsp_problem.fitness_fn.coords\n x_coords = []\n y_coords = []\n\n for coord in coords:\n x_coords.append(coord[0])\n y_coords.append(coord[1])\n\n for i in range(len(coords)):\n plt.plot(x_coords[i], y_coords[i], 'co')\n plt.text(x_coords[i], y_coords[i], str(i))\n\n if solution_vector is not None:\n for i in range(0, len(solution_vector) - 1):\n x = x_coords[solution_vector[i]]\n y = y_coords[solution_vector[i]]\n dx = x_coords[solution_vector[i+1]] - x\n dy = y_coords[solution_vector[i+1]] - y\n\n plt.arrow(x, y, dx, dy, color='r', width=0.001/len(solution_vector), length_includes_head=True,\n head_width=5/len(solution_vector) )\n\n plt.title(\"Solution Vector Plotted for \" + data_string)\n plt.savefig(data_string + \"_travelling_sales_solution_plot\")\n plt.close()\n\n\ndef main_10_cities():\n # Create the TSP\n tsp = create_tsp_problem(length=200, width=200, cities=10)\n plot_tsp_grid(tsp, \"Genetic_Algorithm\")\n\n # Random Hillclimbing\n rhc_run_stats, rhc_run_curves = hillclimbing.solve_with_hillclimbing(tsp, \"RHC_TSP\")\n\n rhc_data_strings = {\n 'title': 'RHC - 10 Cities',\n 'Parameters': ['Restarts'],\n 'limit_time': 0,\n 'limit_iterations': 0\n }\n graphs.generate_graphs(rhc_run_stats, rhc_run_curves, rhc_data_strings)\n\n # Mimic\n\n mimic_run_stats, mimic_run_curves = mimic.solve_with_mimic(tsp, \"MIMIC_TSP\")\n mimic_data_strings = {\n 'title': 'MIMIC - 10 Cities',\n 'Parameters': ['Population Size', 'Keep Percent'],\n 'limit_time': 5,\n 'limit_iterations': 100\n }\n graphs.generate_graphs(mimic_run_stats, mimic_run_curves, mimic_data_strings)\n\n # Solve with Genetic Algorithm\n ga_run_stats, ga_run_curves = genetic_algorithm.solve_with_ga(tsp, \"GA_TSP\")\n\n ga_data_strings = {\n 'title': 'Genetic Algorithms - 10 Cities',\n 'Parameters': ['Mutation Rate', 'Population Size'],\n 'limit_time': 1,\n 'limit_iterations': 800\n }\n graphs.generate_graphs(ga_run_stats, ga_run_curves, ga_data_strings)\n\n # Simulated Annealing\n sa_run_stats, sa_run_curves = simulated_annealing.solve_with_sim_annealing(tsp, \"SA_TSP\")\n\n sa_data_strings = {\n 'title': 'Simulated Annealing - 10 Cities',\n 'Parameters': ['Temperature'],\n 'limit_time': 0.4,\n 'limit_iterations': 1000\n }\n graphs.generate_graphs(sa_run_stats, sa_run_curves, sa_data_strings)\n\n\ndef main_5_cities():\n # Create the TSP\n tsp = create_tsp_problem(length=200, width=200, cities=5)\n plot_tsp_grid(tsp, \"Genetic_Algorithm\")\n\n # Random Hillclimbing\n rhc_run_stats, rhc_run_curves = hillclimbing.solve_with_hillclimbing(tsp, \"RHC_TSP_5_Cities\")\n\n rhc_data_strings = {\n 'title': 'RHC - 5 Cities',\n 'Parameters': ['Restarts']\n }\n graphs.generate_graphs(rhc_run_stats, rhc_run_curves, rhc_data_strings)\n\n # Mimic\n\n mimic_run_stats, mimic_run_curves = mimic.solve_with_mimic(tsp, \"MIMIC_TSP_5_Cities\")\n mimic_data_strings = {\n 'title': 'MIMIC - 5 Cities',\n 'Parameters': ['Population Size', 'Keep Percent']\n }\n graphs.generate_graphs(mimic_run_stats, mimic_run_curves, mimic_data_strings)\n\n # Solve with Genetic Algorithm\n ga_run_stats, ga_run_curves = genetic_algorithm.solve_with_ga(tsp, \"GA_TSP_5_Cities\")\n\n ga_data_strings = {\n 'title': 'Genetic Algorithms - 5 Cities',\n 'Parameters': ['Mutation Rate', 'Population Size']\n }\n graphs.generate_graphs(ga_run_stats, ga_run_curves, ga_data_strings)\n\n # Simulated Annealing\n sa_run_stats, sa_run_curves = simulated_annealing.solve_with_sim_annealing(tsp, \"SA_TSP_5_Cities\")\n\n sa_data_strings = {\n 'title': 'Simulated Annealing - 5 Cities',\n 'Parameters': ['Temperature']\n }\n graphs.generate_graphs(sa_run_stats, sa_run_curves, sa_data_strings)\n\n\nif __name__ == '__main__':\n main_10_cities()\n # tsp = create_tsp_problem()\n # plot_tsp_grid(tsp)\n # main_5_cities()\n","repo_name":"shayanmukhtar/CS7641_Assignment_2","sub_path":"Code/travelling_salesman.py","file_name":"travelling_salesman.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21985403867","text":"# STEP 3: Apply distortion correction and warping (perspective transformation)\n\nimport pickle\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n# Read in the saved camera matrix and distortion coefficients\n# These are the arrays you calculated using cv2.calibrateCamera()\ndist_pickle = pickle.load( open( \"camera_cal/cali_pickle.p\", \"rb\" ) )\nmtx = dist_pickle[\"mtx\"]\ndist = dist_pickle[\"dist\"]\n\n# Read in an image\nimg = cv2.imread('test_images/straight_lines2.jpg')\n\n# MODIFY THIS FUNCTION TO GENERATE OUTPUT \n# THAT LOOKS LIKE THE IMAGE ABOVE\ndef corners_unwarp(img, mtx, dist):\n # Pass in your image into this function\n # Write code to do the following steps\n # 1) Undistort using mtx and dist\n undist = cv2.undistort(img, mtx, dist, None, mtx)\n img_size = (undist.shape[1], undist.shape[0])\n # 2) Convert to grayscale\n gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)\n\n # b) define 4 source points src = np.float32([[,],[,],[,],[,]])\n #Note: you could pick any four of the detected corners \n # as long as those four corners define a rectangle\n #One especially smart way to do this would be to use four well-chosen\n # corners that were automatically detected during the undistortion steps\n #We recommend using the automatic detection of corners in your code\n src = np.float32(\n [\n [557, 476],\n [731, 476],\n [1042, 674],\n [281, 674]\n ]\n )\n # [588, 459],\n # [696, 459],\n # [975, 637],\n # [327, 637]\n # c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])\n dst = np.float32(\n [\n [280, 0],\n [1060, 0],\n [1060, 700],\n [280, 700]\n ]\n )\n # [300, 0],\n # [1000, 0],\n # [1000, 700],\n # [300, 700]\n # d) use cv2.getPerspectiveTransform() to get M, the transform matrix\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src) # This is unnecessaary for this problem\n # e) use cv2.warpPerspective() to warp your image to a top-down view\n warped = cv2.warpPerspective(undist, M, img_size, flags=cv2.INTER_LINEAR)\n\n return warped, M\n\ntop_down, perspective_M = corners_unwarp(img, mtx, dist)\n\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # for displaying normal color\ntop_down = cv2.cvtColor(top_down, cv2.COLOR_BGR2RGB) # for displaying normal color\n\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))\nf.tight_layout()\nax1.imshow(img)\nax1.set_title('Original Image', fontsize=10)\nax2.imshow(top_down)\nax2.set_title('Undistorted and Warped Image', fontsize=10)\nplt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)\n\nplt.show()","repo_name":"Zoetic-Zephyr/CarND-Advanced-Lane-Lines","sub_path":"undistort_and_transform.py","file_name":"undistort_and_transform.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41322003497","text":"#!usr/bin/python\n\"\"\"\nProcess the utterance output from MetaMap.\nXuenan Pi\n11/01/2016\n\"\"\"\n\nfrom time_point import past_regex\n\n\nclass LabelTerms(object):\n def __init__(self, utterance, test=False):\n self.input_utterance = utterance\n self.test = test\n\n self.utterance_start = int(self.input_utterance[0][\"Utterance start index\"][0])\n self.text = self.input_utterance[0][\"Utterance text\"]\n self.syntax_unit = self.input_utterance[0][\"Utterance syntax unit\"]\n\n self.utterance_dict = dict()\n self.utterance_dict[\"Utterance text\"] = self.text\n self.utterance_dict[\"Utterance syntax unit\"] = self.syntax_unit\n self.mapping_result = []\n self.utterance_dict[\"mapping result\"] = self.mapping_result\n\n self.term_index_dict = dict()\n\n self.nonsense_mapping_list = [[\"therapeutics\", \"disease\", \"syndrome\", \"lactate\", \"calcium\"],\n [(\"procedure\", \"interventional procedure\"), (\"cavity\", \"dental caries\"),\n (\"water\", \"water\"), (\"immersion\", \"immersion\"), (\"sterile\", \"infertility\"),\n (\"preserved\", \"Biologic preservation\"), (\"fed\", \"Fish-eye disease\"),\n (\"preservation\", \"Biologic preservation\"), (\"delivery\", \"Obstetric delivery\"),\n (\"tolerance\", \"Immune tolerance\"), (\"binge\", \"Binge eating disorder\"),\n (\"reconstruction\", \"Reconstructive surgical procedures\"),\n (\"echo\", \"Echo protocol\"), (\"genetic\", \"Gene therapy\"),\n (\"regimen\", \"Treatment protocols\")]]\n\n def get_age_and_gender(self, term):\n self.utterance_dict[\"Age\"] = term[\"Age\"]\n if \"Gender\" not in term.keys():\n self.utterance_dict[\"Gender\"] = None\n else:\n self.utterance_dict[\"Gender\"] = term[\"Gender\"]\n\n def part_of_speech_noun(self, term_word):\n \"\"\" For the word that can be found in the syntax unit, only keep the word if the word is a noun.\"\"\"\n term_list, part_of_speech_list = zip(*self.syntax_unit)[0], zip(*self.syntax_unit)[1]\n if term_word not in term_list:\n term_index = None\n # sometimes the term word is a phrase\n # select the last word in the phrase, as usually noun is the last word\n term_word = term_word.split()[-1]\n for term in term_list:\n if term_word in term:\n term_index = term_list.index(term)\n # skip the word that cannot be found in the syntax unit\n if not term_index:\n return True\n else:\n term_index = term_list.index(term_word)\n tag = part_of_speech_list[term_index]\n return tag == \"noun\" and True or False\n\n def get_concept(self, term):\n # avoid including repetitive mapping result\n if not self.term_index_dict.values() or term[\"Concept Name\"] not in zip(*self.term_index_dict.values())[0]:\n position_list = term[\"Positional Info\"]\n term_start = position_list[0]\n term_length = position_list[1]\n index = term_start - self.utterance_start\n term_word = self.text[index:index + term_length]\n if not self.nonsense_mapping_test(term[\"Concept Name\"], term_word) and self.part_of_speech_noun(term_word)\\\n and term[\"Semantic Types\"] not in [\"[Population Group]\", \"[Age Group]\"]:\n if self.test:\n self.term_index_dict[index] = (term[\"Concept Name\"], term[\"Semantic Types\"], term_word)\n else:\n self.term_index_dict[index] = (term[\"Concept Name\"], term[\"Semantic Types\"])\n\n def get_time_point(self, phrase):\n for time in phrase[\"Time Point\"]:\n index = self.text.index(time)\n self.term_index_dict[index] = (time, \"[Time Point]\")\n\n def nonsense_mapping_test(self, term_concept, term_word):\n if term_concept.lower() in self.nonsense_mapping_list[0]:\n return True\n else:\n return (term_word.lower(), term_concept.lower()) in self.nonsense_mapping_list[1]\n\n def clean_mapping_result(self):\n \"\"\"Empty the utterance mapping result if there is only time point information there.\"\"\"\n if self.mapping_result:\n semantic_types_list = zip(*zip(*self.mapping_result)[1])[1]\n if set(semantic_types_list) == {\"[Time Point]\"}:\n del self.mapping_result[:]\n\n def process(self):\n for phrase in self.input_utterance[1:]:\n if \"mapping\" in phrase.keys():\n mapping = phrase[\"mapping\"]\n for term in mapping:\n # age and gender\n if \"Age\" in term.keys():\n self.get_age_and_gender(term)\n # concept term\n else:\n self.get_concept(term)\n # time point\n elif \"Time Point\" in phrase.keys():\n self.get_time_point(phrase)\n\n if self.term_index_dict:\n for key in sorted(self.term_index_dict.keys()):\n # [[index, (Concept time, Semantic types/Time Point)]]\n self.mapping_result += [[key, self.term_index_dict[key]]]\n self.clean_mapping_result()\n\n def label_mapping_result(self):\n # print self.mapping_result, self.text, self.syntax_unit\n term_list = zip(*zip(*self.mapping_result)[1])[0]\n semantic_types_list = zip(*zip(*self.mapping_result)[1])[1]\n if \"[Time Point]\" in semantic_types_list:\n time_index = semantic_types_list.index(\"[Time Point]\")\n term_idx = 0\n while term_idx < len(self.mapping_result):\n term = self.mapping_result[term_idx]\n if term[1][1] != \"[Time Point]\":\n if past_regex(term_list[time_index]):\n term[0] = (\"Past\", term_list[time_index])\n # only label time point for the current part when the semantic types is sign or symptom\n elif term[1][1] == \"[Sign or Symptom]\":\n term[0] = (\"Current\", term_list[time_index])\n else:\n term[0] = (\"Current\", 0)\n term_idx += 1\n else:\n self.mapping_result.remove(term)\n else:\n for term in self.mapping_result:\n term[0] = (\"Current\", 0)\n\n def main(self):\n self.process()\n if self.mapping_result:\n self.label_mapping_result()\n return self.utterance_dict\n","repo_name":"pixuenan/CaseReport","sub_path":"MetaMapParser/label_terms.py","file_name":"label_terms.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70451767556","text":"\"\"\"\n465. Optimal Account Balancing\n\"\"\"\n\nfrom typing import List\nimport sys\n\nclass Solution:\n def minTransfers(self, transactions: List[List[int]]) -> int:\n accounts = {}\n\n for t in transactions:\n # Payer\n accounts[t[0]] = accounts.get(t[0], 0) + t[2]\n # Payee\n accounts[t[1]] = accounts.get(t[1], 0) - t[2]\n\n dp = []\n # We do not need to consider people whose balance is zero\n print(accounts)\n for key in accounts.keys():\n if accounts[key] != 0:\n dp.append(accounts[key])\n print(dp)\n\n def dfs(i, dp):\n if i >= len(dp):\n return 0\n\n # get value with index i\n cur = dp[i]\n # We do not need to consider 0 case, since it is already\n # balanced. We only need to consider remaining case.\n if cur == 0:\n return dfs(i+1, dp)\n\n min_value = sys.maxsize\n # swap i with remaining j, then calculate by dfs\n for j in range(i+1, len(dp)):\n # we should only consider one negative and one positive case.\n # guarantee dp[z] == 0 for z in [0, i]\n n = dp[j]\n if cur * n < 0:\n dp[j] = cur + n\n min_value = min(min_value, 1 + dfs(i+1, dp))\n # restore j\n dp[j] = n\n\n return min_value\n return dfs(0, dp)\n\n\n","repo_name":"dictator-x/practise_as","sub_path":"algorithm/leetCode/0465_optimal_account_balancing.py","file_name":"0465_optimal_account_balancing.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4256993782","text":"import RPi.GPIO as gpio\nimport time\n\nis_pin_on = 0\n\ngpio.setmode(gpio.BCM)\ngpio.setup(4, gpio.IN)\n\n\ntry:\n while 1:\n is_pin_on = gpio.input(4)\n if is_pin_on == 0:\n print('off')\n else:\n print('on')\n time.sleep(0.1)\nexcept KeyboardInterrupt:\n gpio.cleanup()\n\n","repo_name":"huichangs/Embedded-Programming","sub_path":"input_example.py","file_name":"input_example.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22513243744","text":"import core.pml as pml,shells.geocoverageshell as geocoverageshell,shells.bagoftagsshell as bagoftagsshell, os, core.online, shells.emotionshell as emotionshell, core.demo, shells.bagofvisualwordsshell,shells.bagofwordsshell,shells.complexityshell,shells.contrastshell,shells.viewcountshell\nimport sys\nfrom core.reports import *\n\nif __name__ == '__main__':\n port = 8080\n if len(sys.argv) > 1:\n port = sys.argv[1]\n mm = core.demo.DemoOptimizer(port=port)\n mm.configureFromFile(\"configs/demo.default.config\")\n\n es = emotionshell.EmotionSummationShell()\n mm.addShell(es)\n\n gc = geocoverageshell.GeoHotspotShell()\n gc.configureFromFile(\"configs/geohotspot.default.config\")\n mm.addShell(gc)\n\n cmpls = shells.complexityshell.ComplexityShell()\n mm.addShell(cmpls)\n \n cntrs = shells.contrastshell.ContrastSummationShell()\n mm.addShell(cntrs)\n\n vcs = shells.viewcountshell.ViewCountShell()\n vcs.configureFromFile('configs/viewcountshell.default.config')\n mm.addShell(vcs)\n\n \"\"\"\n bvws = shells.bagofvisualwordsshell.BagOfVisualWordsShell()\n bvws.configureFromFile(\"configs/bagofvisualwords.default.config\")\n mm.addShell(bvws)\n\n bts = bagoftagsshell.BagOfTagsShell()\n bts.configureFromFile(\"configs/bagoftags.default.config\")\n mm.addShell(bts)\n\n\n bws = shells.bagofwordsshell.BagOfWordsShell()\n bws.configureFromFile(\"configs/bagofwordsshell.default.config\")\n mm.addShell(bws)\n \"\"\"\n\n\n selected = mm.run()\n\n #reporters = [HtmlReporter()]\n\n #createReports(mm,selected,reporters)\n","repo_name":"yk/pml14publish","sub_path":"demorun.py","file_name":"demorun.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23549248881","text":"def flip(cakes, width, index):\n for i in range(index, index + width):\n cakes[i] = \"-\" if cakes[i] == \"+\" else \"+\"\n\n\ndef is_happy(cakes):\n return all(map(lambda x: x == \"+\", cakes))\n\n\ndef flips(pancakes, width):\n cakes = list(pancakes)\n flip_count = 0\n for i in range(len(cakes) - width + 1):\n if cakes[i] == \"-\":\n flip_count += 1\n flip(cakes, width, i)\n # print(\"\".join(cakes))\n\n return str(flip_count) if is_happy(cakes) else \"IMPOSSIBLE\"\n\n\n# import random\n#\n# test_cakes = [random.choice(\"+-\") for i in range(1000)]\n#\n# print(test_cakes)\n#\n# print(flips(test_cakes, 3))\n# print(flips(\"+++++\", 4))\n# print(flips(\"+-+-+\", 4))\n\n# INPUT = \"TestInput\"\n# OUTPUT = \"TestOutput\"\n\n# INPUT = \"SmallInput\"\n# OUTPUT = \"SmallOutput\"\n\n\nINPUT = \"LargeInput\"\nOUTPUT = \"LargeOutput\"\n\nwith open(INPUT, \"r\") as input_file:\n with open(OUTPUT, \"w\") as output_file:\n output_file.truncate()\n\n t = int(input_file.readline())\n for i in range(t):\n cakes, width = input_file.readline().split()\n width = int(width)\n\n output_file.write(f\"Case #{i+1}: {flips(cakes, width)}\\n\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_199/783.py","file_name":"783.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13579672980","text":"#!/usr/bin/env python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport emxmisc.geometry as geom\nimport sys\n\n# Styles, added Dec 2017, see e.g.\n# print(plt.style.available)\n# https://tonysyu.github.io/raw_content/matplotlib-style-gallery/gallery.html\n\n# Outliers, info:\n# More advanced, from http://stackoverflow.com/questions/10231206/can-scipy-stats-identify-and-mask-obvious-outliers\n# See also: http://statsmodels.sourceforge.net/devel/examples/notebooks/generated/robust_models_0.html\n# For statsmodels on python3: install python3-pip; pip3 install statsmodels\n\ntry:\n from statsmodels.formula.api import ols # Used pip3 (python3-pip) to install\n print('Stats: statsmodels available')\n ImportStats=True\nexcept:\n print('Stats: NOT FOUND statsmodels')\n ImportStats=False\n\ndef getBias(x,y):\n sumBias=0.0\n for obs, mod in zip(x,y):\n sumBias += (mod-obs)\n return sumBias/len(x)\n\ndef getNME(x,y):\n sumObs=0.0\n sumErr=0.0\n for obs, mod in zip(x,y):\n sumErr += np.abs(obs-mod)\n sumObs += obs\n return 100*sumErr/sumObs # normalised mean error, as %\n\n#============================================================================\n\ndef emeploglogplot(x,y,xlabel,ylabel,txt=None,pcodes=None): #,label=None,\n plt.style.use('seaborn')\n #y[-1] = 10.0\n logx = np.log10( np.array(x))\n logy = np.log10( np.array(y))\n plt.scatter(x,y)\n for i in range(len(x)):\n p= pcodes[i] if pcodes else '#%d'%i\n print( 'INTO emepscatplot p,x,y: ', p, x[i], y[i], logx[i], logy[i] )\n if pcodes is not None:\n label = '%4s'%p\n plt.text(x[i],y[i],p,color='k',fontsize=10)\n t= plt.xticks()\n print('TTT', t)\n plt.xscale('log')\n plt.yscale('log')\n plt.axis('equal')\n plt.show()\n \n#============================================================================\n\ndef emepscatplot(x,y,xlabel,ylabel,txt=None,pcodes=None,label=None,\n title=None,\n plotstyle='seaborn',\n labelx=0.1,labely=0.9,labelsize=16,\n addxy=0.0, # Increases maxv to e.g. cope with label overwrites\n minxy=None, # lower value limit for plots\n statsxy=None, # loc reg stats text, e.g. (0.5,0.8)\n loglog=False,\n regline_wanted=True,\n addStats=False,addStats4=False, # 4 gives 4 figs\n addNME=False,\n addBias=False,\n biasUnits=None,\n skipOutliers=False,dbg=False,ofile=None):\n\n \"\"\"\n Scatter plot, emepscatplot(x,y,xlabel,ylabel,txt=None,pcodes=None,addxy=0.0,\n addStats=False,ofile=None)\n \"\"\"\n dtxt = 'emepscatplot'\n col_def = 'k'\n col_valid = 'b'\n col_outlier = 'r'\n plt.style.use(plotstyle)\n x = np.array(x)\n y = np.array(y)\n if addStats4: addStats = True # 4 just gives extra prec\n if dbg:\n print( dtxt+' lengths x,y: ', len(x), len(y) )\n print( dtxt+' shape x, y: ', x.shape, y.shape )\n print( dtxt+' plotstyle : ', plotstyle)\n for i in range(len(x)):\n p= pcodes[i] if pcodes else '#%d'%i\n print( 'INTO emepscatplot p,x,y: ', p, x[i], y[i] )\n#vlimit=300.0\n#f=alt0.0:\n maxv += addxy\n if title: # Hard-coded position so far, top-left\n ax.title(title, fontsize=labelsize)\n #NEBULA had: plt.title(title, fontsize=labelsize)\n if label: # Hard-coded position so far, top-left\n ax.text(labelx,labely,label, fontsize=labelsize,transform=ax.transAxes)\n print('XYLAB', maxv, minv, labelx*maxv, labely*maxv, v, xlabel,label)\n #plt.gca().set_aspect('equal')\n ax.set_aspect('equal')\n \n\n###########################################################################\n [m,c]=np.polyfit(x,y,1)\n r=np.corrcoef(x,y)\n###########################################################################\n skipi = np.zeros(len(x),dtype='int')\n skipL = np.full(len(x),True,dtype=bool)\n skip = [] \n if skipOutliers:\n try:\n regression= ols(\"data ~x\",data=dict(data=y,x=x)).fit()\n test = regression.outlier_test() # Find outliers \n #DS outliers = ((x[i],y[i]) for i,t in enumerate(test.icol(2)) if t < 0.5)\n #for i,t in enumerate(test.icol(2)):\n for i,t in enumerate(test.iloc[:,2]):\n skipL[i] = t < 0.5\n if t < 0.5:\n skipi[i] = 1\n skip.append(i)\n print(dtxt+'SKipping ', i, x[i], y[i] )\n #print(dtxt+'SKIPi,n=', len(skip), skipL )\n ax.scatter(x[skipL],y[skipL],color=col_outlier)\n #sys.exit()\n except: # where stats not implemented Test own outliers\n g = []\n for i in range(0,len(x)):\n g.append( geom.distancePoint2line( x[i],y[i], m, -1.0, c ) )\n print('GEOM', i, x[i], y[i], pcodes[i], geom.distancePoint2line( x[i],y[i], m, -1.0, c ))\n print('MAXGEOM', max( g ))\n for i in range(0,len(x)):\n if g[i] > 0.5* max(g):\n skipi[i] = 1\n skip.append(i)\n\n print('Outliers: ', skip)\n # Figure #\n #DS figure = smgraphics.regressionplots.plot_fit(regression, 1)\n # Add line #\n #DS smgraphics.regressionplots.abline_plot(model_results=regression, ax=figure.axes[0])\n\n#### station codes if wanted ##############################################\n\n if pcodes is None : # uses site codes, e.g AT00031R\n print('No PCODES')\n else:\n\n print('PCODES0 ', len(y), len(pcodes))\n for n in range(0, len(y) ):\n\n label = '%4s'%pcodes[n]\n col = col_valid # 'k'\n if skipi[n] : col=col_outlier # 'r'\n print(dtxt, n, skipi[n], pcodes[n], x[n], y[n])\n ax.text(x[n],y[n],label,color=col,fontsize=10)\n\n#J8 v=plt.axis() #J8 maxv=max(v)\n\n#### 1:1 line ############################################################\n\n print('MAXv ', maxv, minv)\n\n #lin=(0,maxv) # 1:1 line\n lin=(minv,maxv) # 1:1 line\n ax.plot(lin,lin,'g--')\n\n#### regression line - all data ###########################################\n #[m,c]=np.polyfit(x,y,1) #r=np.corrcoef(x,y)\n\n #BUG fit=( c, c+m*lin[1] )\n fit=( c+m*lin[0], c+m*lin[1] )\n if skipOutliers:\n ax.plot(lin,fit,'r--')\n elif regline_wanted:\n ax.plot(lin,fit,'k--')\n #plt.plot(lin,fit,'c--')\n\n###########################################################################\n # Data without outliers\n if len(skip) > 0:\n xn = np.delete( x, skip ) \n yn = np.delete( y, skip ) \n else:\n xn = x.copy()\n yn = y.copy()\n\n###########################################################################\n# After removing outliers\n\n if skipOutliers:\n [mn,cn]=np.polyfit(xn,yn,1)\n rn=np.corrcoef(xn,yn)\n #BUG: fitn=( cn, cn+mn*lin[1] )\n fitn=( cn+mn*lin[0], cn+mn*lin[1] )\n ax.plot(lin,fitn,'b--') # non outliers in blue\n print('SKIPFIT ', mn, cn, lin[0], lin[1], fitn )\n\n\n vspan = maxv+abs(minv) # complete axis length\n if statsxy is not None:\n vpos=minv + statsxy[1]*vspan # vertical position for text\n else:\n vpos=minv + 0.17*maxv # vertical position for text below\n statsxy = [ 0.01, 0.95 ] # Oct 28 2022 testing\n dvpos=0.05*vspan # increment between text lines\n\n if addStats:\n\n regline = 'y= %4.2f x + %6.1f'%( m, c)\n #SKIP? if np.abs(c) < 1.0e-4*np.max(y): #????\n signtxt = ' + '\n if c < 0.0: signtxt = ' ' # minus part of number\n regtxt = r'$y= %4.2f x %s %6.1f$'%( m, signtxt, c)\n corrtxt = r'Corr.= %6.2f'%r[0,1]\n if addStats4: \n regline = r'$y= %6.4f x %s %6.3f$'%( m,signtxt, c)\n corrtxt = r'Corr.= %8.4f'%r[0,1]\n if addNME:\n nme = getNME(x,y)\n print(\"NME = \" , nme) \n corrtxt += ', NME= %.1f%%'%nme\n if addBias:\n bias = getBias(x,y)\n print(\"Bias = \" , bias) \n #corrtxt += ', Bias= %.1f r\"%s\"'%( bias, biasUnits)\n #corrtxt += ', Bias= %.1f %s'%( bias, r\"$\\mu$g/m$^3$\")\n corrtxt += ', Bias= %.1f %s'%( bias, biasUnits )\n xpos = minv + 0.6*vspan\n if statsxy is not None:\n xpos = minv + statsxy[0]*vspan\n print('TTTTT', m, c, maxv, minv, vpos, vspan, xpos)\n #plt.text(xpos,vpos,regline,color=col,fontsize=12)\n #plt.figtext(labelx,labely-0.05,regline,color=col,fontsize=12)\n print('XYSTAT', maxv, minv, labelx*maxv, labely*maxv, v, xlabel,label)\n # Switch to using ax.transAxes\n #tips from https://stackoverflow.com/questions/62856272/position-font-relative-to-axis-using-ax-text-matplotlib\n dvpos = 0.05 # ax coords 0-1\n #NMR ax.text(labelx,labely-dvpos,regline,color=col,fontsize=12,transform=ax.transAxes)\n #NMR ax.text(labelx,labely-2*dvpos,corrtxt,color=col,fontsize=12,transform=ax.transAxes)\n print('XPOS VPOS VVV AA', xpos, vpos, dvpos, minv, maxv, vspan )\n print('STATSXY', statsxy)\n #VVV ax.text(xpos,vpos-dvpos,regline,color=col,fontsize=12,transform=ax.transAxes)\n #AUG22 plt.figtext(0.01,0.95,regline,color=col,fontsize=12,transform=ax.transAxes)\n #AUG22 plt.figtext(0.01,0.90,corrtxt,color=col,fontsize=12,transform=ax.transAxes)\n plt.figtext(statsxy[0],statsxy[1],regline,color=col_outlier,fontsize=12,transform=ax.transAxes)\n plt.figtext(statsxy[0],statsxy[1]-0.05,corrtxt,color=col_outlier,fontsize=12,transform=ax.transAxes)\n #vpos -= dvpos\n #plt.text(xpos,vpos,corrtxt,color=col,fontsize=12)\n\n if skipOutliers: # Now text for non-outliers in black\n vpos -= dvpos\n #NMR ax.text(xpos,vpos,'y= %4.2f x + %6.1f'%( mn, cn),color='k',fontsize=12)\n plt.figtext(0.01,0.85,'y= %4.2f x + %6.1f'%( mn, cn),color=col_valid,fontsize=12,transform=ax.transAxes)\n vpos -= dvpos\n #NMR ax.text(xpos,vpos,'Corr.= %6.2f'%rn[0,1],color='k',fontsize=12)\n corrtxt = r'Corr.= %6.2f'%rn[0,1]\n if addNME:\n nme = getNME(xn,yn)\n print(\"NME = \" , nme) \n corrtxt += ', NME= %.1f%%'%nme\n if addBias:\n bias = getBias(xn,yn)\n print(\"Bias = \" , bias) \n #corrtxt += ', Bias= %.1f'%bias\n #corrtxt += ', Bias= %.1f %s'%( bias, r\"$\\mu$g/m$^3$\")\n corrtxt += ', Bias= %.1f %s'%( bias, biasUnits )\n plt.figtext(0.01,0.80,corrtxt,color=col_valid,fontsize=12,transform=ax.transAxes)\n if minxy is not None:\n minv=minxy\n ax.axis([minv,maxv,minv,maxv])\n\n if txt: # place in upper left\n vpos=minv+0.90*vspan # vertical position for text below, was 0.22\n xpos = minv + 0.01*vspan\n ax.text(xpos,vpos,txt,color='k',fontsize=12)\n #plt.xbound(0,2*maxv)\n #plt.axis('scaled')\n #plt.axis('equal')\n\n plt.tight_layout()\n if ofile:\n print(dtxt+'SAVES ', ofile)\n plt.savefig(ofile,bbox_inches='tight')\n else:\n print(dtxt+'SHOWS ', plotstyle)\n plt.show()\n \n if skipOutliers:\n return mn, cn, rn[0,1] # Stats\n else:\n return m, c, r[0,1] # Stats\n\n#maxv=24000\n#P.axis([0,maxv,0,maxv])\n#P.xlim(0,maxv)\n#P.ylim(0,maxv)\n#P.axis('scaled')\n#P.axis('equal')\n#P.title(r'Modelled versus Observed AOT40$_f$\\n(Year %s, Stations < %s m a.s.l., Model %s)'%( year, vlimit, rv ))\n\n#P.show()\n#P.savefig('CompU%d_%s_%s_%s.png' % (maxalt, year, rv, grid) )\n#P.savefig('CompU%d_%s_%s_%s.eps' % (maxalt, year, rv, grid) )\n\nif __name__ == '__main__':\n x = [ 1.0, 2.0, 3.3, 3.9, 5.2, 5.3 ]\n y = [ 1.2, 2.2, 2.7, 3.5, 5.2, 2.2 ]\n c = [ 'AT92', 'AA', 'CCC', 'DDD', 'EEE', 'OUT' ]\n #p=emepscatplot(x,y,'Testx','Testy')\n #maxalt=300 # Max altitude of stations\n #aot = r'AOT40$_\\mathrm{f}$'\n\n #p=emepscatplot(x,y,'Testx','Testy',addStats=True,dbg=True)\n #p=emepscatplot(x,y,'Testx','Testy',label='LABEL',addStats=True,dbg=True)\n\n # Illustrate some styles\n\n #for style in 'bmh ggplot seaborn-colorblind seaborn-deep'.split():\n for style in 'ggplot'.split():\n print('TESTING STYLE', style)\n p=emepscatplot(x,y,'Testx','Testy',label=style,plotstyle=style,addStats=True,dbg=True)\n p=emepscatplot(x,y,'Testx','Testy',label=style,plotstyle=style,addStats=True,dbg=True) #FAILS:,minv=3.0)\n #p=emepscatplot(x,y,'Testx','TestLog',label=style,plotstyle=style,addStats=True,loglog=True,dbg=True,minv=3.0)\n# p= emeploglogplot(x,y,'Testx','Testy',txt=None,pcodes=None)\n p= emeploglogplot(x,y,'Testx','Testy',txt=None,pcodes=c)\n# p.show()\n\n #p=emepscatplot(x,y,'Testx','Testy',addStats=True,pcodes=c,ofile='TestPlots.png')\n\n","repo_name":"mifads/pyscripts","sub_path":"emxplots/plotscat.py","file_name":"plotscat.py","file_ext":"py","file_size_in_byte":12631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72201108994","text":"__author__ = \"Manuel García-Amado\"\n__license__ = \"GPLv2\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Manuel García-Amado\"\n__email__ = \"militarpancho@gmail.com\"\n\n\"\"\"This program can change the license header inside files.\n\"\"\"\n#~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*\n\n\"\"\"\nSDN collector for Opendaylight API Rest\n\n\n\"\"\"\n\nimport os\nimport collector\nimport json\nfrom collector.collector import esCollector\nfrom datetime import datetime\nimport time\nimport threading\nimport logging\nimport argparse\nimport sys\nimport signal\n\nELASTICSEARCH = os.environ.get('ELASTICSEARCH', 'localhost')\nODL_HOST = os.environ.get('ODL_HOST', 'localhost')\nES_PORT = \"9200\"\nODL_PORT = \"8181\"\nPID_FILE = 'collector.pid'\nCOUNTID_FILE = 'countid'\n\ndef start(simulation_id, timesleep, countidfile, pidfile=None):\n collector = esCollector(\n hosts='{}:{}'.format(ELASTICSEARCH, ES_PORT),\n countidfile = countidfile,\n odl_endpoint='http://{}:{}'.format(ODL_HOST, ODL_PORT))\n #if not collector.validate_index(simulation_id):\n # logging.info(\"This simulation already exists\")\n # sys.exit()\n if pidfile:\n pid = str(os.getpid())\n if os.path.isfile(pidfile):\n logging.info(\"Collector module is running in background\")\n sys.exit()\n with open(pidfile, 'w+') as f:\n f.write(pid)\n logging.info(\"Collector started\")\n\n return collector\n\ndef play(simulation_id, timesleep, countidfile, pidfile=None):\n if countidfile:\n if not os.path.isfile(countidfile):\n logging.info('Collector module was not stopped')\n sys.exit()\n else:\n with open(countidfile, 'r') as f:\n countid = f.read()\n if pidfile:\n if os.path.isfile(pidfile):\n logging.info('Collector module was already running')\n sys.exit()\n else:\n pid = str(os.getpid())\n with open(pidfile, 'w+') as f:\n f.write(pid)\n logging.info('Collector unpaused')\n\n collector = esCollector(\n hosts='{}:{}'.format(ELASTICSEARCH, ES_PORT),\n countidfile=countidfile,\n odl_endpoint='http://{}:{}'.format(ODL_HOST, ODL_PORT),\n countid=int(countid))\n\n return collector\n\ndef collect(collector, simulation_id, timesleep, logging):\n while True:\n collector.add_data(simulation_id, timesleep, logging)\n time.sleep(timesleep)\n\ndef pause(pidfile, countidfile):\n if os.path.isfile(pidfile):\n with open(pidfile, 'r') as f:\n pid = f.read()\n os.remove(pidfile)\n try:\n os.kill(int(pid), signal.SIGTERM)\n except:\n logging.info(\"Error at pausing %s\", sys.exc_info()[0])\n pass\n logging.info(\"collector paused\")\n sys.exit()\n else:\n logging.info(\"Collector module is not running\")\n sys.exit()\n\ndef stop(pidfile, countidfile):\n if os.path.isfile(pidfile):\n with open(pidfile, 'r') as f:\n pid = f.read()\n os.remove(pidfile)\n if os.path.isfile(countidfile):\n os.remove(countidfile)\n try:\n os.kill(int(pid), signal.SIGTERM)\n except:\n logging.info(\"Error at stopping %s\", sys.exc_info()[0])\n pass\n logging.info(\"collector stopped\")\n sys.exit()\n else:\n logging.info(\"Collector module is not running\")\n sys.exit()\n\ndef wait(sim_id, sleep_time, countidfile):\n time.sleep(int(sleep_time))\n logging.info(\"Waiting for Opendaylight response\")\n try:\n start(simulation_id=sim_id, timesleep=sleep_time, countidfile=countidfile)\n logging.info(\"Collector restarted\")\n except Exception as err:\n logging.info(str(err))\n wait(sim_id, sleep_time)\n\ndef Main():\n parser = argparse.ArgumentParser(description='SDN Collector')\n\n parser.add_argument(\n 'cmd',\n type=str,\n help='Start, stop, pause or play Collector module',\n choices=['start', 'stop', 'play', 'pause'])\n parser.add_argument(\n '--time',\n '-t',\n type=int,\n default=10,\n help='Time each OpenDayLight requests')\n\n parser.add_argument(\n '--simulation_id', '-s', type=str, required=True, help='Simulation ID')\n\n parser.add_argument(\n '--level',\n '-l',\n metavar='logging_level',\n type=str,\n default=\"INFO\",\n help='Logging level')\n\n args = parser.parse_args()\n\n #Logging Section. File=/var/tmp/collector.log\n logging.basicConfig(\n level=args.level,\n filename='/var/tmp/collector.log',\n format='%(asctime)s %(levelname)s %(message)s')\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(name)s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n pidfile = \"/tmp/{}\".format(PID_FILE)\n countidfile = '/tmp/{}'.format(COUNTID_FILE)\n\n if args.cmd == 'start':\n try:\n collector = start(args.simulation_id, args.time, countidfile, pidfile)\n collect(collector, args.simulation_id, args.time, logging)\n except KeyboardInterrupt:\n stop(pidfile)\n except TypeError as err:\n logging.info(\"Type Error: %s\", err)\n wait(args.simulation_id, args.time, countidfile)\n except ValueError as err:\n logging.info(\"Value Error: %s\", err)\n wait(args.simulation_id, args.time, countidfile)\n except:\n logging.info(\"No Response from ODL: %s\", sys.exc_info()[0])\n wait(args.simulation_id, args.time, countidfile)\n elif args.cmd == 'stop':\n stop(pidfile, countidfile)\n elif args.cmd == 'pause':\n pause(pidfile, countidfile)\n elif args.cmd == 'play':\n try:\n collector = play(args.simulation_id, args.time, countidfile, pidfile)\n collect(collector, args.simulation_id, args.time, logging)\n except KeyboardInterrupt:\n stop(pidfile)\n except TypeError as err:\n logging.info(\"Type Error: %s\", err)\n wait(args.simulation_id, args.time, countidfile)\n except ValueError as err:\n logging.info(\"Value Error: %s\", err)\n wait(args.simulation_id, args.time, countidfile)\n except:\n logging.info(\"No Response from ODL: %s\", sys.exc_info()[0])\n wait(args.simulation_id, args.time, countidfile)\n\nif __name__ == '__main__':\n Main()\n","repo_name":"FernandoBenayas/collector","sub_path":"collector/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23621739001","text":"#!/usr/bin/python2.7\nimport string\nimport itertools\n\n\ndef pSum(candy):\n\tif(len(candy) == 0):\n\t\treturn 0\n\tx = candy[0]\n\ti = 1\n\twhile(i maxSum):\n\t\t\t\tmaxSum = win\n\tif(maxSum > 0):\n\t\tprint (\"Case #\" + str(caseNum) + \": \" + str(maxSum))\n\telse:\n\t\tprint (\"Case #\" + str(caseNum) + \": NO\")\n\tcaseNum += 1\n\n\t\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_76/856.py","file_name":"856.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19171365281","text":"import argparse\nimport os\nfrom tqdm import tqdm\n\nfrom utils import *\nfrom dataset_utils import read_wikiqa_data, f1auc_score, wiki_evaluation\nfrom comp_utils import safe_completion, length_of_prompt\nimport numpy as np\n\n\ndef _parse_args():\n parser = argparse.ArgumentParser()\n add_engine_argumenet(parser)\n # standard, instruction, etc\n parser.add_argument('--style', type=str, default=\"standard\")\n parser.add_argument('--annotation', type=str, default=\"std\")\n parser.add_argument('--run_prediction', default=False, action='store_true')\n parser.add_argument('--run_length_test', default=False, action='store_true')\n parser.add_argument('--num_shot', type=int, default=6)\n parser.add_argument('--train_slice', type=int, default=0)\n parser.add_argument('--num_dev', type=int, default=1000) # firs 58 for calibrating, last 250 for testing\n parser.add_argument('--dev_slice', type=int, default=0)\n parser.add_argument('--show_result', default=False, action='store_true')\n parser.add_argument('--model', type=str, default=\"gpt3\")\n parser.add_argument('--show_prompt', default=False, action='store_true')\n args = parser.parse_args() \n specify_engine(args)\n return args\n\ndef result_cache_name(args):\n return \"misc/few_tr{}-{}_dv{}-{}_predictions.json\".format(\n args.train_slice, args.train_slice + args.num_shot, \\\n args.dev_slice, args.num_dev)\n\ndef convert_paragraphs_to_context(s, connction='\\n'):\n return connction.join(['{}'.format(p) for i, p in enumerate(s['pars'])])\n\ndef in_context_prediction(ex, shots, engine, style=\"standard\", length_test_only=False):\n if style == \"standard\":\n showcase_examples = [\n \"Q: {}\\nA: {}\\n\".format(s[\"question\"], s[\"answer\"]) for s in shots\n ]\n input_example = \"Q: {}\\nA:\".format(ex[\"question\"])\n prompt = \"\\n\".join(showcase_examples + [input_example])\n else:\n raise RuntimeError(\"Unsupported prompt style\")\n if length_test_only:\n pred = length_of_prompt(prompt, 32)\n print(\"-----------------------------------------\")\n print(pred)\n print(prompt)\n return pred\n else:\n pred = safe_completion(engine, prompt, 32, '\\n', temp=0.0, logprobs=5) \n\n pred[\"id\"] = ex[\"id\"]\n pred[\"prompt\"] = prompt\n try:\n if len(pred[\"text\"]) > len(prompt):\n pred[\"text\"] = pred[\"text\"][len(prompt):]\n else:\n pred[\"text\"] = \"null\"\n return pred\n except:\n return None\n\ndef evaluate_few_shot_predictions(dev_set, predictions, do_print=False):\n acc_records = [] \n f1_records, pre_records, rec_records = [], [], []\n logprob_records = []\n \n for idx, (ex, pred) in enumerate(zip(dev_set, predictions)):\n p_ans = pred['text'].lstrip()\n acc, (f1, pre, rec), gt_ans = wiki_evaluation(p_ans, ex[\"answer\"])\n acc_records.append(acc) \n f1_records.append(f1), pre_records.append(pre), rec_records.append(rec)\n if 'answer_prob' in pred:\n logprob_records.append(pred['answer_prob'])\n if do_print and not acc:\n print(\"--------------{} EX {} F1 {:.2f}--------------\".format(idx, acc, f1))\n print(ex['question'])\n print('PR ANS:', p_ans) \n print('GT ANS:', gt_ans) \n print(json.dumps({'qas_id': ex['id'], 'answer': p_ans}))\n\n mean_of_array = lambda x: sum(x) / len(x)\n print(\"EX\", mean_of_array(acc_records))\n print(\"F1: {:.2f}\".format(mean_of_array(f1_records)), \n \"PR: {:.2f}\".format(mean_of_array(pre_records)),\n \"RE: {:.2f}\".format(mean_of_array(rec_records)))\n print(\"Acc-Cov AUC: {:.2f}\".format(f1auc_score(\n logprob_records, acc_records)))\n\ndef test_few_shot_performance(args):\n print(\"Running prediction\")\n train_set = read_wikiqa_data(f\"data/train_subset.json\")\n train_set = train_set[args.train_slice:(args.train_slice + args.num_shot)]\n dev_set = read_wikiqa_data(f\"data/dev_sampled.json\")\n dev_set = dev_set[args.dev_slice:(args.num_dev)]\n\n showcase_examples = [\n \"Q: {}\\nA: {}\\n\".format(s[\"question\"], s[\"answer\"]) for s in train_set\n ]\n prompt = \"\\n\".join(showcase_examples)\n print('prompt: ')\n print(prompt)\n \n if os.path.exists(result_cache_name(args)) and not args.run_length_test:\n predictions = read_json(result_cache_name(args))\n else:\n predictions = []\n for x in tqdm(dev_set, total=len(dev_set), desc=\"Predicting\"):\n pred = in_context_prediction(x, train_set, engine=args.engine, \\\n style=args.style, length_test_only=args.run_length_test)\n if pred == None:\n args.num_dev = len(predictions)\n break\n else:\n predictions.append(pred)\n\n if args.run_length_test:\n print(result_cache_name(args))\n print('MAX', max(predictions), 'COMP', 32)\n return\n # save\n dump_json(predictions, result_cache_name(args))\n # acc\n for p in predictions:\n p['answer_prob'] = calc_fewshot_pred_with_prob(p, args.style) \n evaluate_few_shot_predictions(dev_set, predictions, do_print=True)\n\n\ndef calc_fewshot_pred_with_prob(pred, style):\n if pred['text'] == \"null\" or pred['text'] == \"overflow\":\n print(\"find invalid\", pred[\"text\"])\n return .0\n completion_offset = len(pred[\"prompt\"])\n tokens = pred[\"logprobs\"][\"tokens\"]\n token_offset = pred[\"logprobs\"][\"text_offset\"]\n\n completion_start_tok_idx = token_offset.index(completion_offset)\n completion_end_tok_idx = tokens.index(\"<|endoftext|>\") + 1 if '<|endoftext|>' in tokens else len(tokens)\n completion_probs = pred[\"logprobs\"][\"token_logprobs\"][completion_start_tok_idx:(completion_end_tok_idx)]\n ans_logprob = sum(completion_probs)\n\n return np.exp(ans_logprob)\n\nif __name__=='__main__':\n args = _parse_args()\n test_few_shot_performance(args)","repo_name":"RuochenZhao/Verify-and-Edit","sub_path":"2WikiMultihopQA/few_shot.py","file_name":"few_shot.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"61"} +{"seq_id":"41055720336","text":"from typing import Any, Generator\n\nimport pytest\nfrom fastapi import Depends, FastAPI\nfrom fastapi.testclient import TestClient\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\nfrom sqlalchemy.orm import Session\n\nfrom fastapi_starter.db import get_db\n\n\n# pylint: disable=unused-argument\n@pytest.fixture(name=\"client\")\ndef client_fixture(app: FastAPI, db: Session) -> Generator[TestClient, None, None]:\n @app.post(\"/_tests/test_isolated_db_session\")\n def _create_table_route(db: Session = Depends(get_db)) -> Any:\n db.execute(text(\"CREATE table _tests_isolated_db_session (name VARCHAR(32))\"))\n db.execute(text(\"INSERT INTO _tests_isolated_db_session (name) VALUES ('foo')\"))\n db.commit()\n\n @app.get(\"/_tests/test_isolated_db_session\")\n def _get_first_row_route(db: Session = Depends(get_db)) -> Any:\n row = db.execute(text(\"SELECT name FROM _tests_isolated_db_session\")).first()\n if not row:\n return {\"name\": None}\n return {\"name\": row.name}\n\n with TestClient(app) as c:\n yield c\n\n\ndef test_table_created_by_calling_route(client: TestClient) -> None:\n client.post(\"/_tests/test_isolated_db_session\")\n\n r = client.get(\"/_tests/test_isolated_db_session\")\n data = r.json()\n\n assert data[\"name\"] == \"foo\"\n\n\ndef test_table_does_not_exist_in_the_next_test_with_get_route(client: TestClient) -> None:\n with pytest.raises(OperationalError):\n client.get(\"/_tests/test_isolated_db_session\")\n\n\ndef test_table_created_by_calling_db_fixture(db: Session) -> None:\n db.execute(text(\"CREATE table _tests_isolated_db_session (name VARCHAR(32))\"))\n db.execute(text(\"INSERT INTO _tests_isolated_db_session (name) VALUES ('foo')\"))\n db.commit()\n\n row = db.execute(text(\"SELECT name FROM _tests_isolated_db_session\")).first()\n\n assert row is not None\n assert row.name == \"foo\"\n\n\ndef test_table_does_not_exist_in_the_next_test_with_db_fixture(db: Session) -> None:\n with pytest.raises(OperationalError):\n db.execute(text(\"SELECT name FROM _tests_isolated_db_session\")).first()\n","repo_name":"filipsnastins/fastapi-starter","sub_path":"tests/test_isolated_db_session.py","file_name":"test_isolated_db_session.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21145717678","text":"# dictionary with course number and room number\r\ncourse_room = {'CS101': 3004, 'CS102': 4501,\r\n 'CS103': 6755, 'NT110': 1244,\r\n 'CM241': 1411}\r\n\r\n# dictionary with course number and instructor\r\ncourse_instructor = {'CS101': \"Haynes\", 'CS102': \"Alvarado\",\r\n 'CS103': \"Rich\", 'NT110': \"Burke\",\r\n 'CM241': \"Lee\"}\r\n\r\n# dictionary with course number and meeting time\r\ncourse_schedule = {'CS101': \"8:00am\", 'CS102': \"9:00am\",\r\n 'CS103': \"10:00am\", 'NT110': \"11:00am\",\r\n 'CM241': \"1:00pm\"}\r\n\r\n# asks user for course number\r\ncourse = \"\"\r\ncourse = input(\"Enter course number for details: \")\r\n\r\n# validation\r\nwhile (course != \"CS101\" and course != \"CS102\" and course != \"CS103\" and course != \"NT110\" and course != \"CM241\"):\r\n print(\"Invalid course. Try again..\")\r\n course = input(\"Enter course number for details: \")\r\n \r\n# output \r\nprint(course)\r\nprint(\"Room #:\", course_room.get(course))\r\nprint(\"Instructor #:\", course_instructor.get(course))\r\nprint(\"Meeting Time #:\", course_room.get(course))\r\n\r\n","repo_name":"martin2421/TRU-COMP","sub_path":"COMP 2210 - Programming Methods/Lab 5/CourseInfo.py","file_name":"CourseInfo.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22829482831","text":"import yfinance as yf\n\nimport numpy as np\nimport pandas as pd\n\nfrom GenerativeMarketEnv import GenerativeMarketEnv\nfrom HistoricalMarketEnv import HistoricalMarketEnv\n\n\nclass MarketFactory:\n\n def __init__(self, stocks, start_ymd, end_ymd, type='generative'):\n self.stocks = stocks\n self.start_ymd = start_ymd\n self.end_ymd = end_ymd\n self.type = type\n\n def create_market(self):\n stocks = ' '.join(self.stocks)\n yahoo_df = yf.download(stocks, start=self.start_ymd, end=self.end_ymd)\n returns_df = pd.DataFrame(columns=self.stocks)\n\n if self.type == 'generative':\n for stock in self.stocks:\n returns_df[stock] = yahoo_df['Close'][stock].pct_change()[1:]\n μ = np.array(returns_df.mean())\n Σ = np.array(returns_df.cov())\n return GenerativeMarketEnv(μ, Σ)\n elif self.type == 'historical':\n for stock in self.stocks:\n returns_df[stock] = (np.log(yahoo_df['Close'][stock]) - np.log(yahoo_df['Close'][stock].shift(1)))[1:]\n\n return HistoricalMarketEnv(len(self.stocks), returns_df, self.start_ymd, self.end_ymd)\n else:\n raise ValueError(\"{} i not a valid market type.\".format(self.type))","repo_name":"mjhoshea/portfolio-optimisation","sub_path":"src/rl/environments/MarketFactory.py","file_name":"MarketFactory.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9965935046","text":"#!~/anaconda2/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport re\nimport numpy as np\nimport mrjob\nfrom mrjob.protocol import RawProtocol\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nimport time\nimport logging\n\nclass mostLeastDenseWords(MRJob):\n \n # START STUDENT CODE 5.4.1.C\n \n MRJob.SORT_VALUES = True\n \n def __init__(self, args):\n super(mostLeastDenseWords, self).__init__(args)\n self.total_word_count = None\n \n def mapper(self, _, line):\n \n # Split line\n splits = line.rstrip(\"\\n\").split(\"\\t\")\n words = splits[0].lower().split()\n count = int(splits[1])\n \n for word in words:\n yield \"*\", count\n yield word, count\n \n \n def combiner(self, word, counts):\n total = sum(count for count in counts)\n yield word, total\n \n def reducer(self, word, counts):\n \n total = sum(count for count in counts)\n \n if word == \"*\":\n self.total_word_count = total\n else:\n yield float(total) / float(self.total_word_count), word\n \n def max_reducer(self, count, words):\n for word in words:\n yield word, count\n\n def steps(self):\n \n custom_jobconf = {\n 'stream.num.map.output.key.fields':'2',\n 'mapred.output.key.comparator.class': 'org.apache.hadoop.mapred.lib.KeyFieldBasedComparator',\n 'mapred.text.key.comparator.options': '-g -k1,1nr',\n 'mapred.reduce.tasks': '1'\n }\n\n return [\n MRStep(\n mapper=self.mapper,\n reducer=self.reducer,\n combiner = self.combiner),\n MRStep(jobconf=custom_jobconf,\n reducer=self.max_reducer)\n ]\n \n # END STUDENT CODE 5.4.1.C\n \nif __name__ == '__main__':\n start_time = time.time()\n mostLeastDenseWords.run()\n elapsed_time = time.time() - start_time\n mins = elapsed_time/float(60)\n a = \"\"\"\n Elapsed time: %s seconds\n In minutes: %s mins\"\"\" % (str(elapsed_time), str(mins))\n logging.warning(a)","repo_name":"mathaholic/w261_hw5_phase2","sub_path":"mostLeastDenseWords.py","file_name":"mostLeastDenseWords.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20750376517","text":"from flask import Flask\nfrom flask import Flask, render_template, json, request, url_for, jsonify\n\napp = Flask(__name__)\nHR = []\n\n#This route is used to input the information, will only show one heart rate value, which will be the one input from the URL. Used primarily by the android application\n@app.route('/',methods=['GET', 'POST'])\ndef getheartRate():\n\n heartrate = request.args['H']\n DT = request.args['D']\n\n HR.append(DT)\n HR.append(heartrate)\n\n print(json.dumps(HR))\n return heartrate\n\n#This route is used to show all heart rate and temporal values. Used primarily by the Unity application to retrieve the data\n@app.route('/return',methods=['GET', 'POST'])\ndef returnheartRate():\n\n keys = []\n values = []\n for i, j in enumerate(HR):\n if i % 2 == 0:\n keys.append(j)\n else:\n values.append(j)\n\n y = dict(zip(keys, values))\n y = json.dumps(y)\n return y\n","repo_name":"CosmicJules/UnderPressureVR","sub_path":"flaskProject1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5005499726","text":"import time\nfrom array import array\n\nimport neogiinstruments\nimport numpy as np\nimport panel as pn\nimport param\nimport stellarnet\n\nfrom ..ensemblebase import EnsembleBase, Coordinate, Coordinates\nfrom ... import utils\n\nname = \"stellarnet\"\n\n\ndef get_calibs() -> list:\n \"\"\"\n Scans for calibration files\n\n :return: list of all calibration files\n :rtype: list of PosixPath\n \"\"\"\n return list(utils.scan_directory({\"WavelengthPoweredCalib\": None}).keys())\n\n\nclass Ensemble(EnsembleBase):\n wavstart = param.Integer(default=780)\n wavend = param.Integer(default=900)\n wavstep = param.Integer(default=2)\n pstart = param.Integer(default=0)\n pstop = param.Integer(default=10)\n pstep = param.Number(default=0.5)\n mai_time = param.Integer(default=30)\n pwait = param.Integer(default=1)\n type = name\n data = \"stellarnet\"\n datasets = [\"Stellarnet\", \"V\", \"Vstd\"]\n dimensions = {\"Stellarnet\": [\"wavelength\", \"power\", \"emission_wavelength\"], \"V\": [\"wavelength\", \"power\"],\n \"Vstd\": [\"wavelength\", \"power\"]}\n cap_coords = {\"Stellarnet\": [\"emission_wavelength\"], \"V\": [], \"Vstd\": []}\n loop_coords = [\"wavelength\", \"power\"]\n debug = param.Boolean(default=False)\n live = False\n calibration_file = param.ObjectSelector()\n\n def __init__(self):\n files = get_calibs()\n if len(files) == 0:\n print(\"Needs calibration file \")\n self.param[\"calibration_file\"].objects = files\n self.param[\"calibration_file\"].default = files[0]\n super().__init__()\n self.filename = \"data/stellarnet.zarr\"\n self.rotator = neogiinstruments.rotator(\"rotator\")\n self.MaiTai = neogiinstruments.MaiTai()\n try:\n self.StellarNet = neogiinstruments.StellarNet()\n except stellarnet.NotFoundError:\n raise stellarnet.NotFoundError(\"Can't run stellarnet without stellarnet\")\n self.Photodiode = neogiinstruments.Photodiode()\n self.coords = Coordinates([\n Coordinate(\"wavelength\", \"nanometer\", \"wavelength\", step_function=self.wav_step),\n Coordinate(\"power\", \"degrees\", \"power\", step_function=self.pow_step),\n Coordinate(\"emission_wavelength\", \"nanometers\", \"emission_wavelength\")]\n )\n\n def wav_step(self, xs):\n self.MaiTai.instrument.Set_Wavelength(xs[0])\n if self.debug:\n print(f'moving to {xs[0]}')\n time.sleep(self.mai_time)\n self.pow_step(xs)\n if not self.debug:\n time.sleep(10)\n self.MaiTai.instrument.Shutter(1)\n if self.debug:\n print(f'starting loop at {xs[0]}')\n if not self.debug:\n time.sleep(5)\n\n def initialize(self):\n self.initialized = True\n exclude = []\n for param in self.param:\n if not param in exclude:\n self.param[param].constant = True\n\n self.init_vars()\n\n def start(self):\n self.rotator.instrument.home()\n\n def init_vars(self):\n self.coords[\"wavelength\"].values = np.arange(self.wavstart, self.wavend, self.wavstep, dtype=np.uint16)\n power = np.arange(self.pstart, self.pstop, self.pstep)\n self.coords[\"power\"].values = power\n emission_wavelength = self.StellarNet.instrument.GetSpec()[0]\n self.emission_length = len(emission_wavelength)\n self.coords[\"emission_wavelength\"].values = emission_wavelength\n self.pc_reverse = utils.interpolate(self.calibration_file, pwr=power)\n\n def pow_step(self, xs: array):\n pow = xs[1]\n wav = xs[0]\n pol = self.pc_reverse.sel(power=pow, wavelength=wav).values\n if self.debug:\n print(f\"moving to {pol}\")\n if -360 < pol < 360:\n self.rotator.instrument.move_abs(pol)\n time.sleep(self.pwait)\n\n def get_frame(self, coords):\n data = self.StellarNet.instrument.GetSpec()[1]\n V, Vstd = self.Photodiode.instrument.gather_data()\n return {\"Stellarnet\": data, \"V\": V, \"Vstd\": Vstd}\n\n def widgets(self):\n if self.initialized:\n return pn.Column(self.rotator.view, self.StellarNet.view, self.MaiTai.view, self.Photodiode.view)\n else:\n return None\n","repo_name":"UNTNeogiLab/Dashboard","sub_path":"neogidashboard/ensembles/stellarnet/ensemble_Stellarnet.py","file_name":"ensemble_Stellarnet.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"31091930750","text":"\"\"\"\neasy to parse\n\n\"\"\"\n\nimport json\n\n# loads\n# load\n\n# json should have double quotes (\"\")\n# string to json\ndata = '{\"var1\":\"harry\", \"var2\":56}' # parse this data\n\nparse = json.loads(data)\nprint(parse)\nprint(type(parse))\n# print(data)\n# this output looks same as above , but you can not use parse['var1'] here. u can not parse the data\n# since it is a string\n\n# task1 -> read about json.load\n\n# json.dumps\n\ndata2 = {\n \"channel_name\": \"zeetv\",\n \"cars\": ['maruti', 'bmw', 'audi', 'ferrari'],\n \"fridge\": ('roti', 'dosa')\n}\n\n# dictionary to json right now data2 will give error as JSON object, it will work in python but not in javascrpt,\n# so to make it java script compatible:\n\nprint(json.dumps(data2)) # now u can use in java script\n\n\n# task2 -> what is sort_keys , indent parameter in dumps","repo_name":"RiyaMittal/pythonSessions","sub_path":"json_module.py","file_name":"json_module.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22430736000","text":"from apiwrapper.endpoints.endpoint import Endpoint\n\n\nclass DeviceServer(Endpoint):\n\n __endpoint_device_server = \"device-server\"\n\n @classmethod\n def _get_base_endpoint(cls, server_id=None):\n endpoint = \"/%s\" % cls.__endpoint_device_server\n if server_id is not None:\n endpoint += \"/%d\" % server_id\n return endpoint\n\n def get_all_device_servers(self):\n endpoint = self._get_base_endpoint()\n\n return self._make_get_request(endpoint)\n\n def get_device_server_by_id(self, server_id):\n endpoint = self._get_base_endpoint(server_id)\n\n return self._make_get_request(endpoint)\n\n def create_new_device_server(self, description, permitted_ips=None):\n endpoint = self._get_base_endpoint()\n payload = {\n 'description': description,\n 'secret': self._api_client.api_key\n }\n if permitted_ips is not None:\n payload[\"permitted_ips\"] = permitted_ips\n\n return self._make_post_request(endpoint, payload)\n\n\n","repo_name":"OGKevin/ComBunqWebApp","sub_path":"apiwrapper/endpoints/device_server.py","file_name":"device_server.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"61"} +{"seq_id":"23569208681","text":"#!/usr/bin/env python\r\n\r\nf = open('C-small-1-attempt0.in', 'r')\r\nw = open('C-small-1-attempt0.out', 'w')\r\n\r\nn = int(f.readline())\r\nfor i in range(0, n):\r\n\ts, p = map(int, f.readline().split(' '))\r\n\r\n\ta = [False] * (s + 2)\r\n\ta[0] = True\r\n\ta[s + 1] = True\r\n\r\n\txmin = -1\r\n\txmax = -1\r\n\r\n\tfor c in range(0, p):\t\t\r\n\t\tl = 0\r\n\t\tcg = 0\r\n\t\tmg = 0\r\n\t\tx = -1\r\n\r\n\t\tfor j in range(1, s + 2):\r\n\t\t\tif a[j]:\r\n\t\t\t\tm = l + (j - l) / 2\r\n\t\t\t\t\r\n\t\t\t\tif m < 0:\r\n\t\t\t\t\tm = 0\r\n\t\t\t\tml = m - l - 1\r\n\t\t\t\tmr = j - m - 1\r\n\r\n\t\t\t\tif cg > mg:\t\t\t\t\t\r\n\t\t\t\t\tmg = cg\r\n\t\t\t\t\tx = m\r\n\t\t\t\t\tif ml > mr:\r\n\t\t\t\t\t\txmin = mr\r\n\t\t\t\t\t\txmax = ml\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\txmin = ml\r\n\t\t\t\t\t\txmax = mr\r\n\r\n\t\t\t\tcg = -1\r\n\t\t\t\tl = j\r\n\r\n\t\t\tcg += 1\r\n\r\n\t\ta[x] = True\r\n\r\n\tw.write('Case #%i: %i %i\\n' % (i + 1, xmax, xmin))\r\n\r\nf.close()\r\nw.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_201/2061.py","file_name":"2061.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25482557139","text":"import random\ndef game1():\n while True: \n user_action = input(\"Enter a choice (boulder, tree, axe): \")\n possible_actions = [\"boulder\", \"tree\", \"axe\", \"bomb\"]\n computer_action = random.choice(possible_actions)\n print(f\"\\nYou chose {user_action}, computer chose {computer_action}.\\n\")\n if user_action == computer_action:\n print(f\"And...both players selected {user_action} nobody wins. Its a tie!\")\n elif user_action == \"boulder\":\n if computer_action == \"tree\":\n print(\"Tree stops boulder!! You lose!\")\n if computer_action == \"axe\":\n print(\"Snap! The axe couldn't cut the boulder, you win!\")\n if computer_action == \"bomb\":\n print(\"Boulder? More like pebble, you lose!!\")\n elif user_action == \"axe\":\n if computer_action == \"tree\":\n print(\"Timber! Axe chops down tree, you win!\")\n if computer_action == \"boulder\":\n print(\"Snap! Stone is a little harder than wood, you lose!!\")\n if computer_action == \"bomb\":\n print(\"Not cutting much anymore...you lose!!\")\n elif user_action == \"tree\":\n if computer_action == \"boulder\":\n print(\"Tree stops boulder! You win!\")\n if computer_action == \"axe\":\n print(\"Timberr! Down goes the tree, you lose!!\")\n if computer_action == \"bomb\":\n print(\"Smells like fire...wood. You lose!\")\n play_again = input(\"Play again? (yes/no):\")\n if play_again.lower() !=\"y\":\n breakpoint\ndef game2(): \n '''\n\nThis is a python game that was created using the built-in turtle module similar to the Google Snake Game \n\n'''\n\n\n\nimport turtle \n\ntop_score = 0\n\ngame = turtle.Screen()\n\ngame.bgcolor(\"gray\")\n\ngame.setup(width=500, height=500)\n\ngame.title(\"Snake Game: Abdulrahman Abbas\")\n\ngame.tracer(0)\n\n\n\nimport time\n\ndelay = 0.1\n\nscore = 0\n\n\n\nfirst_segment = turtle.Turtle()\n\nfirst_segment.speed(0)\n\n#fastest animation speed so there's no lag\n\nfirst_segment.shape(\"circle\")\n\nfirst_segment.color(\"blue\")\n\nfirst_segment.penup()\n\nfirst_segment.direction = \"stop\"\n\n\n\nsegments = []\n\n\n\nfood = turtle.Turtle()\n\nfood.speed(0)\n\nfood.shape(\"triangle\")\n\nfood.color(\"purple\")\n\nfood.penup()\n\nfood.goto(0,200)\n\n\n\n\n\npen = turtle.Turtle()\n\npen.speed(0)\n\npen.color(\"black\")\n\npen.penup()\n\npen.ht() #ht is hide turtle\n\npen.goto(0, 220)\n\npen.write(\"Current Score: 0 ----- Top Score: 0\", align=\"center\", font=(\"Sans serif\", 30, \"normal\"))\n\n\n\n\n\n\n\n\n\ndef move():\n\n if first_segment.direction == 'up':\n\n first_segment.sety(first_segment.ycor()+20)\n\n if first_segment.direction == 'left':\n\n first_segment.setx(first_segment.xcor()-20)\n\n if first_segment.direction == 'down':\n\n first_segment.sety(first_segment.ycor()-20)\n\n if first_segment.direction == 'right':\n\n first_segment.setx(first_segment.xcor()+20)\n\n\n\n\n\ndef up():\n\n first_segment.direction = 'up'\n\ndef down():\n\n first_segment.direction = 'down'\n\ndef left():\n\n first_segment.direction = 'left'\n\ndef right():\n\n first_segment.direction = 'right'\n\n\n\ngame.listen()\n\ngame.onkeypress(up, 'Up')\n\ngame.onkeypress(down, 'Down')\n\ngame.onkeypress(left, 'Left')\n\ngame.onkeypress(right, 'Right')\n\n\n\nimport random \n\n\n\nwhile True:\n\n game.update()\n\n\n\n if first_segment.xcor()>230 or first_segment.xcor()<-230 or first_segment.ycor()>230 or first_segment.ycor()<-230:\n\n time.sleep(1)\n\n first_segment.goto(0,0)\n\n first_segment.direction = 'stop'\n\n\n\n #get rid of segments\n\n for i in segments:\n\n i.goto(1000,1000)\n\n segments = []\n\n score = 0\n\n delay = 0.1\n\n\n\n pen.clear()\n\n pen.write(\"Current Score: \" + str(score) +' ----- '+ \"Top Score: \" + str(top_score), align='center', font=('Sans Serif', 30, \"normal\" ))\n\n if first_segment.distance(food) < 15:\n\n x = random.randint(-220,220)\n\n y = random.randint(-220,220)\n\n food.goto(x,y)\n\n #this moves the food to a random spot on within the screen\n\n body_segment = turtle.Turtle()\n\n body_segment.speed(0)\n\n body_segment.shape('square')\n\n body_segment.color(\"blue\")\n\n body_segment.penup()\n\n segments.append(body_segment)\n\n\n\n delay -= 0.001\n\n score += 1\n\n\n\n if score > top_score:\n\n top_score = score\n\n\n\n pen.clear()\n\n pen.write(\"Current Score: \" + str(score) +' ----- '+ \"Top Score: \" + str(top_score), align='center', font=('Sans Serif', 30, \"normal\" ))\n\n \n\n for i in range(len(segments)-1,0, -1):\n\n x = segments[i-1].xcor()\n\n #moves the segment to where the segment before it was\n\n y = segments[i-1].ycor()\n\n segments[i].goto(x,y)\n\n \n\n if len(segments) > 0:\n\n x = first_segment.xcor()\n\n y = first_segment.ycor()\n\n segments[0].goto(x,y)\n\n\n\n move()\n\n\n\n for i in segments:\n\n if i.distance(first_segment) < 20:\n\n time.sleep(1)\n\n first_segment.goto(0,0)\n\n first_segment.direction = \"stop\"\n\n for i in segments:\n\n i.goto(1000,1000)\n\n segments = [] \n\n\n\n time.sleep(delay)\n\ngame.mainloop()\n\nif __name__ == '__main__': \n choice = input('Please select 1 for game1 and 2 for game2')\n if choice == '1':\n game1()\n elif choice == '2':\n game2()\n else:\n print('Please select a valid choice next time...!')\n","repo_name":"aabbas99/Games","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38836575243","text":"## @ingroup Components-Energy-Storages-Batteries-Variable_Mass\n# Aluminum_Air.py\n# \n# Created: Oct 2014, M. Vegh\n# Modified: Feb 2016, T. MacDonald\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\n# suave imports\nimport SUAVE\n\n# package imports\nfrom SUAVE.Core import Units\nfrom SUAVE.Components.Energy.Storages.Batteries import Battery\n\n# ----------------------------------------------------------------------\n# Aluminum_Air\n# ---------------------------------------------------------------------- \n## @ingroup Components-Energy-Storages-Batteries-Variable_Mass\nclass Aluminum_Air(Battery):\n \"\"\"\n Specifies discharge/specific energy characteristics specific to\n aluminum-air batteries. Also includes parameters related to \n consumption of aluminum, oxygen, and water\n \"\"\"\n \n \n \n def __defaults__(self):\n self.specific_energy = 1300.*Units.Wh/Units.kg # convert to Joules/kg\n self.specific_power = 0.2*Units.kW/Units.kg # convert to W/kg\n self.mass_gain_factor = 0.000110145*Units.kg/Units.Wh\n self.water_mass_gain_factor = 0.000123913*Units.kg/Units.Wh\n self.aluminum_mass_factor = 0.000123828*Units.kg/Units.Wh # aluminum consumed per energy\n self.ragone.const_1 = 0.8439*Units.kW/Units.kg\n self.ragone.const_2 = -4.8647e-004/(Units.Wh/Units.kg)\n self.ragone.lower_bound = 1100.*Units.Wh/Units.kg\n self.ragone.upper_bound = 1600.*Units.Wh/Units.kg\n \n def find_water_mass(self, energy):\n water_mass = energy*self.water_mass_gain_factor\n return water_mass\n \n def find_aluminum_mass(self, energy):\n aluminum_mass = energy*self.aluminum_mass_factor\n return aluminum_mass","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Components/Energy/Storages/Batteries/Variable_Mass/Aluminum_Air.py","file_name":"Aluminum_Air.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"10312627624","text":"class NODE:\n\tdef __init__(self,data):\n\t\tself.data=data\n\t\tself.next=None\n\t\tself.prev=None\n#created node with next and previous pointer\nclass dll:\n\tdef __init__(self):\n\t\tself.head=None\n#head will always pointing to start\n\tdef ADD_LAST(self,data):\n\t\tnewnode=NODE(data)\n#allocate a node\n\t\tif self.head==None:\n\t\t\tself.head=newnode\n\t\telse:\n\t\t\ttemp=self.head\n\t\t\twhile temp.next!=None:\n\t\t\t\ttemp=temp.next\n\t\t\ttemp.next=newnode\n\t\t\tnewnode.prev=temp\n\tdef ADD_FIRST(self,data):\n\t\tnewnode=NODE(data)\n\t\tnewnode.next=self.head\n\t\tif self.head!=None:\n\t\t\tself.head.prev=newnode\n\t\tself.head=newnode\n#adding previous and next pointer to each funtion\n\tdef disp(self,node):\n\t\tif self.head==None:\n\t\t\tprint(\"no data\")\n\t\tprint(\"printing data \")\n\t\twhile node:\n\t\t\tprint(node.data,end=\" \")\n\t\t\tlast=node\n\t\t\tnode=node.next\n#taking last variable becouse we cant use nonde for previous becouse at last it will be pointing to None\n\t\tprint(\"\\nreverse order\")\n\t\twhile last:\n\t\t\tprint(last.data,end=\" \")\n\t\t\tlast=last.prev\n\tdef ADD_AFTER(self,pNODE,data):\n\t\tnewnode=NODE(data)\n\t\tif pNODE==None:\n\t\t\tprint(\"previous node can not be null\")\n\t\telse:\n\t\t\tnewnode.next=pNODE.next\n\t\t\tpNODE.next=newnode\n\t\tnewnode.prev=pNODE\n\t\tif newnode.next:\n\t\t\tnewnode.next.prev=newnode\n\nnod=dll()\n'''nod.disp(n)'''\nnod.ADD_LAST(38)\nnod.ADD_LAST(78)\nnod.ADD_FIRST(98)\nnod.ADD_AFTER(nod.head.next,99)\nnod.ADD_FIRST(56)\nnod.disp(nod.head)","repo_name":"iamninja1999/pythonProgramming","sub_path":"doublyll.py","file_name":"doublyll.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3982963543","text":"import logging\nimport os\nfrom pathlib import Path\nfrom typing import List\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_signatures(path: Path) -> set:\n signatures = set()\n ignored_dirs = [\"Backups.backupdb\", \"System Volume Information\"]\n\n for root, dirs, files in os.walk(path.as_posix()):\n files = [f for f in files if not f[0] == \".\"]\n dirs[:] = [d for d in dirs if not d[0] == \".\" and d not in ignored_dirs]\n\n for filename in files:\n filepath = os.path.join(root, filename)\n try:\n signatures.add((filename, os.path.getsize(filepath)))\n except OSError:\n logger.warning(f\"Could not get signature for {filepath}\")\n signatures.add((filename, -1))\n\n return signatures\n\n\ndef get_missing(src: str, dst: str) -> (List[str], int):\n logger.info(f\"Searching all files from {src} in {dst}\")\n src = Path(src)\n\n missing = get_signatures(src)\n count = len(missing)\n logger.info(\"Found %d files on %s\", count, src)\n\n endings = tuple({os.path.splitext(m[0])[1] or m[0] for m in missing})\n ignored_dirs = [\"Backups.backupdb\", \"System Volume Information\"]\n\n for root, dirs, files in os.walk(dst):\n dirs.sort(reverse=True)\n\n files = [f for f in files if not f[0] == \".\" and f.endswith(endings)]\n dirs[:] = [d for d in dirs if not d[0] == \".\" and d not in ignored_dirs]\n\n for filename in files:\n filepath = os.path.join(root, filename)\n try:\n missing.difference_update([(filename, os.path.getsize(filepath))])\n except OSError:\n logger.warning(f\"Could not get size for {filepath}\")\n\n # only return the filenames\n missing = [m[0] for m in missing]\n logger.info(f\"Missing files: {missing}\")\n\n return missing, count\n","repo_name":"ottomatic-io/ocopy","sub_path":"ocopy/backup_check.py","file_name":"backup_check.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"61"} +{"seq_id":"20161301358","text":"import cv2\r\nimport statistics as st\r\nimport numpy as np\r\nfrom word_segment import prepareImg\r\nfrom word_segment import wordSegmentation\r\nfrom infertext import infer\r\nimport tensorflow as tf\r\nimport os.path\r\nimport glob\r\n\r\nimg_dir = r\"./Buffalo_Dataset\" # Enter Directory of all images \r\ndata_path = os.path.join(img_dir,'*g')\r\nfiles = glob.glob(data_path)\r\ndirectory=\"Buffalo_TEXT\"\r\npath=os.path.join('./',directory)\r\nos.mkdir(path)\r\nsave_path= './'+directory\r\n#int(input())\r\nfor f1 in files:\r\n img = cv2.imread(f1,cv2.IMREAD_GRAYSCALE)\r\n print(img.shape)\r\n img = np.array(img, dtype=np.uint8)\r\n #print(f1)\r\n i=340\r\n mi=st.mean(img[i][60:100])\r\n x=0\r\n for j in range(30):\r\n up=st.mean(img[i+j][60:100])\r\n #print(up,mi)\r\n if up 40:\r\n hi=100\r\n dim=(wordImg.shape[1],hi)\r\n resi= cv2.resize(wordImg, dim, interpolation = cv2.INTER_AREA)\r\n te+=[w[1]]\r\n #l=temp\r\n l=np.append(l,resi,axis=1)\r\n l=np.append(l,temp,axis=1)\r\n #print(l.shape)\r\n \r\n #cv2.imwrite(\"image3.jpg\",l)\r\n #int(input(\"from2\"))\r\n if len(l[0])>1:\r\n text=infer(te)\r\n f.write(\" \".join(str(item) for item in text[0]))\r\n f.write('\\n')\r\n #input()\r\n res=[]\r\n a=np.array([[]])\r\n a=[]\r\n temp=np.full((100,100),255)\r\n for i in range(li,3150,103):\r\n if (sum(img[i+15][800:1600])/800+sum(img[i+16][800:1600])/800+sum(img[i+17][800:1600])/800)/3<=240:\r\n img1=img[i+3:i+106] \r\n res = wordSegmentation(img1, kernelSize=101, sigma=11, theta=10, minArea=7000) \r\n elif st.mean(img[i+90][240:2333])>=253:\r\n f.write('\\n')\r\n continue\r\n else:\r\n img1=img[i+20:i+150]\r\n res = wordSegmentation(img1, kernelSize=101, sigma=11, theta=10, minArea=1000)\r\n \r\n #cv2.imwrite(\"img1.jpg\",img1)\r\n #int(input(\"from1\"))\r\n te=[]\r\n l=np.full((100,1),255)\r\n for (j, w) in enumerate(res):\r\n (wordBox, wordImg) = w\r\n #cv2.imwrite(\"image3.jpg\",wordImg)\r\n (x,y,w1,h)=wordBox\r\n if y+h//2 > 40:\r\n hi=100\r\n dim=(wordImg.shape[1],hi)\r\n resi= cv2.resize(wordImg, dim, interpolation = cv2.INTER_AREA)\r\n te+=[w[1]]\r\n #l=temp\r\n l=np.append(l,resi,axis=1)\r\n l=np.append(l,temp,axis=1)\r\n #print(l.shape)\r\n a+=[l]\r\n #cv2.imwrite(\"image3.jpg\",l)\r\n #int(input(\"from2\"))\r\n #print(l)\r\n if len(l[0])>1:\r\n text=infer(te)\r\n f.write(\" \".join(str(item) for item in text[0]))\r\n f.write('\\n')\r\n #print(text)\r\n","repo_name":"Manan2506/Handwritten-Text-Recognition","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18545378136","text":"import socket\nimport pickle\nfrom constants import *\nfrom database import pokemons\n\nclass TCP_server:\n def __init__(self): \n server_address = (TCP_IP, TCP_PORT) \n self.connections = 0\n \n # TCP socket\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.bind(server_address)\n self.server_socket.listen(5)\n print(f\"Servidor TCP ligado em {server_address}\")\n \n # Register IP\n # UDP socket\n self.udpserver_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n resquest = f\"REGISTER-tcpserver,{TCP_IP}/{TCP_PORT}\".encode(\"utf-8\")\n self.udpserver_socket.sendto(resquest, (DNS_IP, DNS_PORT))\n dns_reponse, _ = self.udpserver_socket.recvfrom(BUFFER_SIZE)\n print(dns_reponse.decode(\"utf-8\"))\n \n self.update()\n \n def close(self):\n resquest = f\"REGISTER-tcpserver,NONE\".encode(\"utf-8\")\n self.udpserver_socket.sendto(resquest, (DNS_IP, DNS_PORT))\n dns_reponse, _ = self.udpserver_socket.recvfrom(BUFFER_SIZE)\n print(dns_reponse.decode(\"utf-8\"))\n self.server_socket.close()\n \n def update(self):\n while True:\n if(self.connections >= 5):\n break;\n self.connections += 1\n # Pega requisição do cliente\n client_socket, address = self.server_socket.accept()\n data = client_socket.recv(BUFFER_SIZE)\n data = data.decode(\"utf-8\")\n print(f\"Buscando pokemons do tipo {data}...\")\n\n # Envia a resposta para o cliente\n response = self.choose_pokemon(data)\n client_socket.send(response)\n self.close()\n\n def choose_pokemon(self, type_name):\n if type_name not in pokemons: \n return pickle.dumps([])\n \n pokes = pokemons[type_name]\n return pickle.dumps(pokes)\n \nif __name__ == \"__main__\":\n TCP_server()","repo_name":"h3nrey/PythonSockets","sub_path":"tcp_server.py","file_name":"tcp_server.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40233357878","text":"from __future__ import print_function, with_statement\n\nimport logging\nimport os\nimport shutil\nfrom subprocess import Popen, PIPE\n\nfrom pants.backend.jvm.jar_dependency_utils import M2Coordinate\nfrom pants.backend.jvm.ivy_utils import IvyUtils, IvyInfo, IvyModuleRef\nfrom pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin\nfrom pants.backend.jvm.tasks.unpack_jars import UnpackJars\nfrom pants.backend.jvm.targets.jar_library import JarLibrary\nfrom pants.base.exceptions import TaskError\nfrom pants.util.dirutil import safe_mkdir, safe_mkdtemp\nfrom pants.fs.archive import ZIP\nfrom pants.backend.jvm.tasks.classpath_products import ClasspathProducts\nfrom pants.ivy.ivy_subsystem import IvySubsystem\n\nfrom squarepants.plugins.unpack_archives.targets.unpacked_archives import UnpackedArchives\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UnpackArchives(IvyTaskMixin, UnpackJars):\n \"\"\"Downloads and extracts archives for unpacked_archives() targets.\n\n This has two key features that distinguish it from UnpackJars:\n 1. It works on tarballs (in addition to jars and zips).\n 2. It extracts the files to a directory specified by the unpacked_archives() target, instead of\n to a hidden location inside .pants.d.\n \"\"\"\n\n class TarExtractionError(TaskError):\n \"\"\"Error running the tar extraction subprocess.\"\"\"\n\n @classmethod\n def prepare(cls, options, round_manager):\n super(UnpackArchives, cls).prepare(options, round_manager)\n round_manager.require_data('compile_classpath')\n\n @classmethod\n def product_types(cls):\n # NB(gmalmquist): This is just here to override the products from the superclass.\n return []\n\n @classmethod\n def global_subsystems(cls):\n return super(IvyTaskMixin, cls).global_subsystems() + (IvySubsystem,)\n\n def _filtered_copy(self, src_dir, dst_dir, filter_func=None):\n copied_count = 0\n filter_func = filter_func or (lambda _: True)\n for (root, dirnames, filenames) in os.walk(src_dir):\n for name in filenames:\n src_path = os.path.join(root, name)\n rel_path = os.path.relpath(src_path, src_dir)\n if not filter_func(rel_path):\n continue\n dst_path = os.path.join(dst_dir, rel_path)\n safe_mkdir(os.path.dirname(dst_path))\n shutil.copyfile(src_path, dst_path)\n copied_count += 1\n return copied_count\n\n def _extract_tar(self, tar_path, unpack_dir, filter_func=None):\n temp_unpack_dir = safe_mkdtemp()\n with self.context.new_workunit(name='tar-extract'):\n p = Popen(['tar', 'xzf', tar_path, '-C', temp_unpack_dir], stdout=PIPE, stderr=PIPE)\n out, err = p.communicate()\n if p.returncode != 0:\n raise self.TarExtractionError('Error unpacking tar file \"{}\" (code={}).\\nStderr: {}'\n .format(tar_path, p.returncode, err))\n with self.context.new_workunit(name='filtered-copy'):\n copied = self._filtered_copy(temp_unpack_dir, unpack_dir, filter_func=filter_func)\n self.context.log.info('Copied {} extracted files.'.format(copied))\n\n def _unpack(self, unpacked_archives):\n \"\"\"Extracts files from the downloaded jar files and places them in a work directory.\n\n :param UnpackedArchives unpacked_archives: target referencing jar_libraries to unpack.\n \"\"\"\n self.context.log.info('Unpacking {}'.format(unpacked_archives.address.spec))\n unpack_dir = unpacked_archives.destination\n safe_mkdir(unpack_dir, clean=True)\n\n unpack_filter = self.get_unpack_filter(unpacked_archives)\n classpath_products = ClasspathProducts(self.get_options().pants_workdir)\n resolve_hashes = self.resolve(None, unpacked_archives.dependencies, classpath_products)\n ivy_cache_dir = os.path.expanduser(IvySubsystem.global_instance().get_options().cache_dir)\n\n def to_m2(jar):\n return M2Coordinate(org=jar.org, name=jar.name, rev=jar.rev, classifier=jar.classifier,\n ext=jar.ext)\n\n libraries = self.context.build_graph.transitive_subgraph_of_addresses([unpacked_archives.address])\n libraries = [t for t in libraries if isinstance(t, JarLibrary)]\n coords = set()\n for library in libraries:\n coords.update(to_m2(jar) for jar in library.payload.jars)\n\n for resolve_hash in resolve_hashes:\n path = IvyUtils.xml_report_path(ivy_cache_dir, resolve_hash, 'default')\n info = IvyUtils.parse_xml_report('default', path)\n refs_for_libraries = set()\n for ref in info.modules_by_ref.keys():\n if to_m2(ref) in coords:\n refs_for_libraries.add(ref)\n\n memo = {}\n for ref in tuple(refs_for_libraries):\n info.traverse_dependency_graph(ref, refs_for_libraries.add, memo)\n\n for ref in sorted(refs_for_libraries):\n module = info.modules_by_ref[ref]\n artifact_path = module.artifact\n self.context.log.debug('Extracting {} to {}.'.format(to_m2(ref), unpack_dir))\n if artifact_path.endswith('.zip') or artifact_path.endswith('.jar'):\n ZIP.extract(artifact_path, unpack_dir, filter_func=unpack_filter)\n else:\n self._extract_tar(artifact_path, unpack_dir, filter_func=unpack_filter)\n\n def execute(self):\n addresses = [target.address for target in self.context.targets()]\n closure = self.context.build_graph.transitive_subgraph_of_addresses(addresses)\n for target in closure:\n if isinstance(target, UnpackedArchives):\n self._unpack(target)\n","repo_name":"ericzundel/mvn2pants","sub_path":"src/python/squarepants/plugins/unpack_archives/tasks/unpack_archives.py","file_name":"unpack_archives.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"40788226735","text":"with open('inputs/day5.txt') as f:\n data = f.read().split('\\n')\n\nseating = {'B': lambda rc: [(1 + sum(rc[0]) // 2, rc[0][1]), rc[1]],\n 'F': lambda rc: [(rc[0][0], sum(rc[0]) // 2), rc[1]],\n 'R': lambda rc: [rc[0], (1 + sum(rc[1]) // 2, rc[1][1])],\n 'L': lambda rc: [rc[0], (rc[1][0], sum(rc[1]) // 2)]}\n\n\ndef find_seat_id(boarding):\n row_col_range = [(0, 127), (0, 7)]\n for char in boarding:\n row_col_range = seating[char](row_col_range)\n return 8 * row_col_range[0][0] + row_col_range[1][0]\n\n\nseats = sorted(find_seat_id(seat) for seat in data)\nprint(max(seats))\n\nfor seat_id in range(seats[1], seats[-1]):\n if seat_id not in seats:\n print(seat_id)\n break\n","repo_name":"DurhamCS/AdventOfCode","sub_path":"2020/Darius/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"13872006099","text":"import pandas as pd\nimport torchtext\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nfrom data_loader import DataLoader\nfrom RNN import RNN\n\ndef ComputeAccr(dloader, model):\n correct = 0\n total = 0\n \n model.eval()\n for i, data in enumerate(dloader):\n texts = data.text.to(device)\n labels = data.label.to(device)\n \n output = model(texts)\n _, output_index = torch.max(output, 1)\n \n total += labels.size(0)\n correct += (output_index == labels).sum().float()\n model.train()\n return (100 * correct / total).cpu().numpy()\n\n\nbatch_size = 128\nnum_epochs = 20\n\nword_vec_size = 256\ndropout_p = 0.3\n\nhidden_size = 512\nnum_layers = 4\n\nlearning_rate = 0.001\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndf = pd.read_csv('./data/sms.tsv', sep='\\t')\n#print(df.columns)\n#print(df.shape)\n\nmax_length = 256\n\nclasses = sorted(set(df['label']))\nclass_to_idx = {}\nfor i, c in enumerate(classes):\n class_to_idx.update({c:i})\n\nnclass = len(classes)\n\nnew_df = pd.DataFrame({'label' : df['label'], \n 'sms' : df['sms'].str.slice(start=0, stop=max_length)})\nnew_df = pd.DataFrame(new_df.drop_duplicates())\ndf_shuffled = new_df.sample(frac=1).reset_index(drop=True)\n\n\ntrain_ratio = 0.9\n\n'''\n# train dataset\ns, e = 0, int(df_shuffled.shape[0] * train_ratio)\ndf_train = pd.DataFrame({'label' : df_shuffled['label'][s:e],\n 'sms' : df_shuffled['sms'][s:e]})\n\n# test dataset\ns, e = e, e + int(df_shuffled.shape[0] * (1.0 - train_ratio))\ndf_test = pd.DataFrame({'label' : df_shuffled['label'][s:e],\n 'sms' : df_shuffled['sms'][s:e]})\n\n\ndf_train.to_csv('./data/sms.maxlen.uniq.shuf.train.tsv', header=False, index=False, sep='\\t')\ndf_test.to_csv('./data/sms.maxlen.uniq.shuf.test.tsv', header=False, index=False, sep='\\t')\n'''\nloaders = DataLoader(\n train_fn='./data/sms.maxlen.uniq.shuf.train.tsv',\n batch_size = batch_size,\n valid_ratio = .2,\n device = 0,\n max_vocab = 999999,\n min_freq = 5\n)\n\ntest_loaders = DataLoader(\n train_fn='./data/sms.maxlen.uniq.shuf.test.tsv',\n batch_size = batch_size,\n valid_ratio = .01,\n device = 0,\n max_vocab = 999999,\n min_freq = 5\n)\nvocab_size = len(loaders.text.vocab)\nnum_classes = len(loaders.label.vocab)\n\nmodel = RNN(vocab_size, word_vec_size, hidden_size, num_classes, num_layers, dropout_p).to(device)\n\nloss_func = nn.NLLLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)\n\ntotal_step = len(loaders.train_loader)\nfor epoch in range(num_epochs):\n for i, data in enumerate(loaders.train_loader):\n texts = data.text.to(device)\n labels = data.label.to(device)\n #print(\"[%d]\"%i)\n outputs = model(texts)\n loss = loss_func(outputs, labels)\n \n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n if (i+1) % 10 == 0:\n print(\"Epoch [{}/{}], step [{}/{}], Loss : {:.4f}, Accr: {:.2f}\".format(epoch+1,num_epochs, i+1, total_step, loss.item(), ComputeAccr(loaders.valid_loader, model)))\n \n\n\n\n\n\n\n","repo_name":"201710757/NLP_SMU","sub_path":"project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29709655484","text":"from django import forms\nfrom phonenumber_field.formfields import PhoneNumberField, RegionalPhoneNumberWidget\n\nfrom .models import Offer\n\n\nclass OfferForm(forms.ModelForm):\n class Meta:\n model = Offer\n fields = [\n 'name',\n 'description',\n 'default_price',\n 'prepayment_percent',\n 'refund_percent',\n 'main_photo',\n 'is_hidden'\n ]\n\n main_photo = forms.ImageField(widget=forms.FileInput)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.short_fields = ['name', 'default_price', 'prepayment_percent', 'refund_percent', 'slug']\n\n self.number_fields = ['default_price']\n\n self.fields['is_hidden'].widget.attrs.update({'class': 'form-check-input'})\n self.fields['description'].widget.attrs.update(\n {'class': 'form-control rounded-bottom rounded-0 h-15r flex-grow-1'}\n )\n self.fields['main_photo'].widget.attrs.update(\n {'class': 'upload_img_input d-none', 'accept': 'image/png, image/jpeg, image/jpg'}\n )\n self.fields['main_photo'].required = False\n\n for field in self.fields:\n if str(field) in self.short_fields:\n self.fields[str(field)].widget.attrs.update({'class': 'form-control'})\n\n if str(field) in self.number_fields:\n self.fields[str(field)].widget.attrs.update({'min': 1})\n\n def clean_main_photo(self):\n photo = self.cleaned_data.get(\"main_photo\")\n\n if not photo:\n raise forms.ValidationError('Главное фото не было выбрано', code='required')\n\n return photo\n\n\nclass SearchOffersForm(forms.Form):\n\n SORT_CHOICES = (\n ('default_price', 'возрастанию цен'),\n ('-default_price', 'убыванию цен'),\n )\n\n date_from = forms.DateField(label='с', required=False, widget=forms.DateInput(attrs={\n 'class': 'input_field input_field__date',\n 'type': 'date'\n }))\n date_until = forms.DateField(label='до', required=False, widget=forms.DateInput(attrs={\n 'class': 'input_field input_field__date',\n 'type': 'date'\n }))\n name = forms.CharField(max_length=255, label='Наименование', required=False, widget=forms.TextInput(attrs={\n 'class': 'input_field input_field__name'\n }))\n price_from = forms.DecimalField(label='от', required=False, widget=forms.NumberInput(attrs={\n 'class': 'input_field input_field__price'\n }))\n price_until = forms.DecimalField(label='до', required=False, widget=forms.NumberInput(attrs={\n 'class': 'input_field input_field__price'\n }))\n sort_by = forms.ChoiceField(label='Сортировать по', required=False, widget=forms.Select(attrs={\n 'class': 'select_sortby',\n }))\n\n @classmethod\n def get_sort_choices(cls): # получаем через метод класса, чтоб брал значение текущего класса, а не\n return cls.SORT_CHOICES # все время базового\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['sort_by'].choices = self.get_sort_choices()\n\n\nclass SearchOffersAdmin(forms.Form):\n id = forms.IntegerField(label='id', required=False, widget=forms.NumberInput(attrs={\n 'class': 'form-control w-auto mw-100'\n }))\n name = forms.CharField(label='Наименование', required=False, widget=forms.TextInput(attrs={\n 'class': 'form-control'\n }))\n dates_from = forms.DateField(label='с', required=False, widget=forms.DateInput(attrs={\n 'class': 'form-control w-100 mw-10r rounded-0 flex-grow-0 flex-shrink-1',\n 'type': 'date'\n }))\n dates_until = forms.DateField(label='до', required=False, widget=forms.DateInput(attrs={\n 'class': 'form-control w-100 mw-10r rounded-0 flex-grow-0 flex-shrink-1 rounded-end',\n 'type': 'date'\n }))\n price_from = forms.IntegerField(label='от', required=False, widget=forms.NumberInput(attrs={\n 'class': 'form-control w-100 mw-10r rounded-0 flex-grow-0 flex-shrink-1'\n }))\n price_until = forms.IntegerField(label='до', required=False, widget=forms.NumberInput(attrs={\n 'class': 'form-control w-100 mw-10r rounded-0 flex-grow-0 flex-shrink-1 rounded-end'\n }))\n sort_by = forms.CharField(required=False, initial='id', widget=forms.TextInput(attrs={\n 'class': 'd-none sorting_input',\n 'value': 'id'\n }))\n page = forms.IntegerField(required=False, initial=1, widget=forms.NumberInput(attrs={\n 'class': 'd-none page_input',\n }))\n \n\nclass BookOfferForm(forms.Form):\n first_name = forms.CharField(label='Ваше имя:', required=False, widget=forms.TextInput(attrs={\n 'class': 'input_field input_field__name',\n }))\n last_name = forms.CharField(label='Ваша фамилия:', required=False, widget=forms.TextInput(attrs={\n 'class': 'input_field input_field__name',\n }))\n phone = PhoneNumberField(\n region='RU',\n label='Номер телефона*:',\n required=True,\n error_messages={\n 'invalid': 'Неверный номер номера телефона. Пример: +79999999999 или 8(999)999-99-99'\n },\n widget=RegionalPhoneNumberWidget(attrs={\n 'class': 'input_field input_field__phone',\n 'placeholder': '+79999999999'\n }\n ))\n\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n\n if self.user is not None and self.user.is_authenticated:\n self.fields['phone'].widget.attrs['value'] = self.user.phone\n self.fields['phone'].widget.attrs['readonly'] = ''\n\n self.fields['first_name'].widget.attrs['value'] = self.user.first_name\n self.fields['first_name'].widget.attrs['readonly'] = ''\n\n self.fields['last_name'].widget.attrs['value'] = self.user.last_name\n self.fields['last_name'].widget.attrs['readonly'] = ''\n\n\n\n","repo_name":"SergeiGD/Crystal-Lake-backend","sub_path":"crystallake/apps/offer/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16719944137","text":"import time\n\nimport numpy as np\nfrom scipy.sparse import linalg\nfrom scipy.sparse.linalg import LinearOperator\n\n\ndef svd_sparse(sparse_matrix, no_eigen_values):\n\n def transpose(x):\n return x.T\n\n def matvec_XH_X(x):\n return XH_dot(X_dot(x))\n\n n, m = sparse_matrix.shape\n X_dot = X_matmat = sparse_matrix.dot\n XH_dot = transpose(sparse_matrix).dot\n\n XH_X = LinearOperator(\n matvec=matvec_XH_X,\n dtype=sparse_matrix.dtype,\n shape=(min(sparse_matrix.shape), min(sparse_matrix.shape))\n )\n eigvals, eigvec = linalg.eigsh(XH_X, k = no_eigen_values)\n eigvals = np.maximum(eigvals.real, 0)\n\n # in our case all eigen values are going to be greater than zero\n # create sigma diagnol matrix\n slarge = np.sqrt(eigvals)\n s = np.zeros_like(eigvals)\n s[:no_eigen_values] = slarge\n\n ularge = X_matmat(eigvec)/slarge\n vhlarge = transpose(eigvec)\n\n return ularge, s, vhlarge\n\n\ndef svd_retain_energy(sparse_matrix, no_eigen_values, energy = 1):\n u, s, vt = svd_sparse(sparse_matrix, no_eigen_values)\n s_squared_sum = np.square(s).sum()\t\t# sum of square of all eigen values (diagnol elements in s)\n\n for i in range(s.shape[0]):\n if np.square(s[i:]).sum()<(energy*s_squared_sum):\n break\n i -= 1\n\n return np.delete(u, np.s_[:i], 1), s[i:], np.delete(vt, np.s_[:i], 0)\n\n\ndef svd(sparse_matrix, no_eigen_values, energy = 1):\n \"\"\"\n Perform SVD Decomposition on the input sparse_matrix\n Pass the copy of the sparse matrix to keep the original matrix unchanged\n\n Parameters:\n sparse_matrix : input sparse_matrix\n no_eigen_values: number of largest eigen values desired\n energy: retain energy% of largest eigen values\n\n Returns : The dot product of U S and Vt matrix\n \"\"\"\n\n start = time.time()\n print(f'---- SVD with {energy * 100}% energy ----')\n\n u,s,vt = svd_retain_energy(sparse_matrix, no_eigen_values, energy)\n svd_matrix = np.dot(np.dot(u,np.diag(s)), vt)\n\n print('SVD took ' + '{0:.2f}'.format(time.time() - start) + ' secs.')\n return svd_matrix\n","repo_name":"sanchitsgupta/recommendation-algorithms","sub_path":"src/svd.py","file_name":"svd.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7282907592","text":"\"\"\"Optionally define a custom strategy.\n\nNeeded only when the strategy is not yet implemented in Flower or because you want to\nextend or modify the functionality of an existing strategy.\n\"\"\"\nfrom collections import OrderedDict\nfrom logging import WARNING\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport torch\nfrom flwr.common import (\n EvaluateIns,\n EvaluateRes,\n FitIns,\n FitRes,\n Metrics,\n NDArrays,\n Parameters,\n Scalar,\n ndarrays_to_parameters,\n parameters_to_ndarrays,\n)\nfrom flwr.common.logger import log\nfrom flwr.server.client_manager import ClientManager\nfrom flwr.server.client_proxy import ClientProxy\nfrom flwr.server.strategy import FedAvg\nfrom flwr.server.strategy.aggregate import aggregate, weighted_loss_avg\n\nfrom fedmeta.models import FemnistNetwork, StackedLSTM\nfrom fedmeta.utils import update_ema\n\n\n# pylint: disable=too-many-arguments\ndef fedmeta_update_meta_sgd(\n net: torch.nn.Module,\n alpha: torch.nn.ParameterList,\n beta: float,\n weights_results: NDArrays,\n gradients_aggregated: NDArrays,\n weight_decay: float,\n) -> Tuple[NDArrays, torch.nn.ParameterList]:\n \"\"\"Update model parameters for FedMeta(Meta-SGD).\n\n Parameters\n ----------\n net : torch.nn.Module\n The list of metrics to aggregate.\n alpha : torch.nn.ParameterList\n alpha is the learning rate. it is updated with parameters in FedMeta (Meta-SGD).\n beta : float\n beta is the learning rate for updating parameters and alpha on the server.\n weights_results : List[Tuple[NDArrays, int]]\n These are the global model parameters for the current round.\n gradients_aggregated : List[Tuple[NDArrays, int]]\n Weighted average of the gradient in the current round.\n WD : float\n The weight decay for Adam optimizer\n\n Returns\n -------\n weights_prime : List[Tuple[NDArrays, int]]\n These are updated parameters.\n alpha : torch.nn.ParameterLis\n These are updated alpha.\n \"\"\"\n params_dict = zip(net.state_dict().keys(), weights_results)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n net.load_state_dict(state_dict, strict=True)\n optimizer = torch.optim.Adam(\n list(net.parameters()) + list(alpha), lr=beta, weight_decay=weight_decay\n )\n for params, grad_ins, alphas in zip(net.parameters(), gradients_aggregated, alpha):\n params.grad = torch.tensor(grad_ins).to(params.dtype)\n alphas.grad = torch.tensor(grad_ins).to(params.dtype)\n optimizer.step()\n optimizer.zero_grad()\n weights_prime = [val.cpu().numpy() for _, val in net.state_dict().items()]\n\n return weights_prime, alpha\n\n\ndef fedmeta_update_maml(\n net: torch.nn.Module,\n beta: float,\n weights_results: NDArrays,\n gradients_aggregated: NDArrays,\n weight_decay: float,\n) -> NDArrays:\n \"\"\"Update model parameters for FedMeta(Meta-SGD).\n\n Parameters\n ----------\n net : torch.nn.Module\n The list of metrics to aggregate.\n beta : float\n beta is the learning rate for updating parameters on the server.\n weights_results : List[Tuple[NDArrays, int]]\n These are the global model parameters for the current round.\n gradients_aggregated : List[Tuple[NDArrays, int]]\n Weighted average of the gradient in the current round.\n WD : float\n The weight decay for Adam optimizer\n\n Returns\n -------\n weights_prime : List[Tuple[NDArrays, int]]\n These are updated parameters.\n \"\"\"\n params_dict = zip(net.state_dict().keys(), weights_results)\n state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n net.load_state_dict(state_dict, strict=True)\n optimizer = torch.optim.Adam(\n list(net.parameters()), lr=beta, weight_decay=weight_decay\n )\n for params, grad_ins in zip(net.parameters(), gradients_aggregated):\n params.grad = torch.tensor(grad_ins).to(params.dtype)\n optimizer.step()\n optimizer.zero_grad()\n weights_prime = [val.cpu().numpy() for _, val in net.state_dict().items()]\n\n return weights_prime\n\n\ndef weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics:\n \"\"\"Aggregate using a weighted average during evaluation.\n\n Parameters\n ----------\n metrics : List[Tuple[int, Metrics]]\n The list of metrics to aggregate.\n\n Returns\n -------\n Metrics\n The weighted average metric.\n \"\"\"\n # Multiply accuracy of each client by number of examples used\n correct = [num_examples * float(m[\"correct\"]) for num_examples, m in metrics]\n examples = [num_examples for num_examples, _ in metrics]\n\n # Aggregate and return custom metric (weighted average)\n return {\"accuracy\": float(sum(correct)) / float(sum(examples))}\n\n\nclass FedMeta(FedAvg):\n \"\"\"FedMeta averages the gradient and server parameter update through it.\"\"\"\n\n def __init__(self, alpha, beta, data, algo, **kwargs):\n super().__init__(**kwargs)\n self.algo = algo\n self.data = data\n self.beta = beta\n self.ema_loss = None\n self.ema_acc = None\n\n if self.data == \"femnist\":\n self.net = FemnistNetwork()\n elif self.data == \"shakespeare\":\n self.net = StackedLSTM()\n\n self.alpha = torch.nn.ParameterList(\n [\n torch.nn.Parameter(torch.full_like(p, alpha))\n for p in self.net.parameters()\n ]\n )\n\n def configure_fit(\n self, server_round: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, FitIns]]:\n \"\"\"Configure the next round of training.\"\"\"\n config = {\"alpha\": self.alpha, \"algo\": self.algo, \"data\": self.data}\n if self.on_fit_config_fn is not None:\n # Custom fit config function provided\n config = self.on_fit_config_fn(server_round)\n fit_ins = FitIns(parameters, config)\n\n # Sample clients\n sample_size, min_num_clients = self.num_fit_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample( # type: ignore\n num_clients=sample_size,\n min_num_clients=min_num_clients,\n server_round=server_round,\n step=\"fit\",\n )\n\n # Return client/config pairs\n return [(client, fit_ins) for client in clients]\n\n def configure_evaluate(\n self, server_round: int, parameters: Parameters, client_manager: ClientManager\n ) -> List[Tuple[ClientProxy, EvaluateIns]]:\n \"\"\"Configure the next round of evaluation.\"\"\"\n # Do not configure federated evaluation if fraction eval is 0.\n if self.fraction_evaluate == 0.0:\n return []\n\n # Parameters and config\n config = {\"alpha\": self.alpha, \"algo\": self.algo, \"data\": self.data}\n if self.on_evaluate_config_fn is not None:\n # Custom evaluation config function provided\n config = self.on_evaluate_config_fn(server_round)\n evaluate_ins = EvaluateIns(parameters, config)\n\n # Sample clients\n sample_size, min_num_clients = self.num_evaluation_clients(\n client_manager.num_available()\n )\n clients = client_manager.sample( # type: ignore\n num_clients=sample_size,\n min_num_clients=min_num_clients,\n server_round=server_round,\n step=\"evaluate\",\n )\n\n # Return client/config pairs\n return [(client, evaluate_ins) for client in clients]\n\n def aggregate_fit(\n self,\n server_round: int,\n results: List[Tuple[ClientProxy, FitRes]],\n failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]],\n ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:\n \"\"\"Aggregate fit results using weighted average.\"\"\"\n if not results:\n return None, {}\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None, {}\n\n # Convert results\n weights_results: List[Tuple[NDArrays, int]] = [\n (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples)\n for _, fit_res in results\n ]\n\n parameters_aggregated = aggregate(weights_results)\n if self.data == \"femnist\":\n weight_decay_ = 0.001\n else:\n weight_decay_ = 0.0001\n\n # Gradient Average and Update Parameter for FedMeta(MAML)\n if self.algo == \"fedmeta_maml\":\n grads_results: List[Tuple[NDArrays, int]] = [\n (fit_res.metrics[\"grads\"], fit_res.num_examples) # type: ignore\n for _, fit_res in results\n ]\n gradients_aggregated = aggregate(grads_results)\n weights_prime = fedmeta_update_maml(\n self.net,\n self.beta,\n weights_results[0][0],\n gradients_aggregated,\n weight_decay_,\n )\n parameters_aggregated = weights_prime\n\n # Gradient Average and Update Parameter for FedMeta(Meta-SGD)\n elif self.algo == \"fedmeta_meta_sgd\":\n grads_results: List[Tuple[NDArrays, int]] = [ # type: ignore\n (fit_res.metrics[\"grads\"], fit_res.num_examples)\n for _, fit_res in results\n ]\n gradients_aggregated = aggregate(grads_results)\n weights_prime, update_alpha = fedmeta_update_meta_sgd(\n self.net,\n self.alpha,\n self.beta,\n weights_results[0][0],\n gradients_aggregated,\n weight_decay_,\n )\n self.alpha = update_alpha\n parameters_aggregated = weights_prime\n\n # Aggregate custom metrics if aggregation fn was provided\n metrics_aggregated = {}\n if self.fit_metrics_aggregation_fn:\n fit_metrics = [(res.num_examples, res.metrics) for _, res in results]\n metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics)\n elif server_round == 1: # Only log this warning once\n log(WARNING, \"No fit_metrics_aggregation_fn provided\")\n\n return ndarrays_to_parameters(parameters_aggregated), metrics_aggregated\n\n def aggregate_evaluate(\n self,\n server_round: int,\n results: List[Tuple[ClientProxy, EvaluateRes]],\n failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]],\n ) -> Tuple[Optional[float], Dict[str, Scalar]]:\n \"\"\"Aggregate evaluation losses using weighted average.\"\"\"\n if not results:\n return None, {}\n # Do not aggregate if there are failures and failures are not accepted\n if not self.accept_failures and failures:\n return None, {}\n\n # Aggregate loss\n loss_aggregated = weighted_loss_avg(\n [\n (evaluate_res.num_examples, evaluate_res.loss)\n for _, evaluate_res in results\n ]\n )\n\n if self.data == \"femnist\":\n smoothing_weight = 0.95\n else:\n smoothing_weight = 0.7\n self.ema_loss = update_ema(self.ema_loss, loss_aggregated, smoothing_weight)\n loss_aggregated = self.ema_loss\n\n # Aggregate custom metrics if aggregation fn was provided\n metrics_aggregated = {}\n if self.evaluate_metrics_aggregation_fn:\n eval_metrics = [(res.num_examples, res.metrics) for _, res in results]\n metrics_aggregated = self.evaluate_metrics_aggregation_fn(eval_metrics)\n self.ema_acc = update_ema(\n self.ema_acc,\n round(float(metrics_aggregated[\"accuracy\"] * 100), 3),\n smoothing_weight,\n )\n metrics_aggregated[\"accuracy\"] = self.ema_acc\n\n elif server_round == 1: # Only log this warning once\n log(WARNING, \"No evaluate_metrics_aggregation_fn provided\")\n\n return loss_aggregated, metrics_aggregated\n","repo_name":"adap/flower","sub_path":"baselines/fedmeta/fedmeta/strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":12049,"program_lang":"python","lang":"en","doc_type":"code","stars":3287,"dataset":"github-code","pt":"61"} +{"seq_id":"8852969131","text":"from app.catalog import main\nfrom app import db\nfrom app.catalog.models import Book, Publication\nfrom app.cart.models import Cart\nfrom app.auth.models import User\nfrom flask import render_template, request, flash, redirect, url_for\nfrom flask_login import login_required\n\n@main.route('book/add_to_cart/', methods=['GET', 'POST'])\n@login_required\ndef add_to_cart(user_id, book_id):\n user_id = User.query.get(user_id)\n prod_id = Book.query.get(book_id)\n if request.method == 'POST':\n db.session.add(Cart.user_id, Cart.prod_id)\n db.session.commit()\n flash('Book added to Cart')\n return redirect(url_for('main.display_books'))\n return render_template('add_to_cart.html', user_id=user_id, prod_id=prod_id)\n\n@main.route('/cart/')\n@login_required\ndef display_cart(user_id):\n user = User.query.filter_by(id=user_id).first()\n user_books = Book.query.filter_by(Cart.user_id).all()\n\n return render_template('cart.html', user=user, user_books=user_books)\n\n\n","repo_name":"dre1144/items-catalog","sub_path":"app/cart/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23961763073","text":"from components.Luckybox import Luckybox\n\n\nclass ActionCard(Luckybox):\n def __init__(self, text: str, actions: dict):\n super().__init__(text, actions)\n self.target = None\n self.active = False\n\n def on_check(self):\n self.key_actions(\"default\", [self.target])\n\n def set_target_and_caster(self, t, c):\n self.target = t\n self.player = c\n self.text.replace(\"%\", f\"<@{self.target}>\")\n\n def counter(self):\n temp = self.target\n self.target = self.player\n self.player = temp\n\n\ncards = [\n ActionCard(f\"You throw a rock at %. He will be stunned for 1 turn\", {\"default\": {\n \"timeout\": 1\n }}),\n ActionCard(\"You use the card as a baseball bat, and you kick out % to the start line\", {\"default\": {\n \"set_field\": 1\n }}),\n ActionCard(\"You do a backflip. It's sooo cool, that % must pay some respect and goes back 2 fields\", {\"default\": {\n \"fields\": -2\n }}),\n ActionCard(\"You thrown some nerdy words, such as *\\\"cacophony\\\"*, which made your target feel stupid for 3 turns\", {\n \"default\": {\n \"timeout\": 3\n }\n }),\n ActionCard(\"You decide to give your opponent an ipad. He's so hyped about it, that he goes straight to the start line\", {\n \"default\": {\n \"set_field\": 1\n }\n })\n]\n","repo_name":"OvieDev/lucky-rush","sub_path":"components/ActionCard.py","file_name":"ActionCard.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3854255115","text":"import logging\n\nfrom torch import nn\nfrom torch.cuda import amp\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\n\n\ndef train_full_data(args, model, train_loader, optimizer, scheduler, epoch):\n # parallel model\n parallel_model = nn.DataParallel(model, device_ids=args.device_ids)\n\n # scaler for automatic mixed precision\n scaler = amp.GradScaler(enabled=args.use_amp)\n\n # training loop\n model.train()\n cur_num, cur_loss, cur_correct = 0, 0, 0\n with tqdm(\n train_loader, ncols=120, desc=f\"Epoch[{epoch+1}/{args.n_train_epochs}]\"\n ) as pbar:\n for step, (input_ids, attention_mask, labels) in enumerate(pbar):\n # initialize all grads to 0\n optimizer.zero_grad()\n\n # forward with mixed precision (fp16/bf16)\n with amp.autocast(dtype=args.dtype, enabled=args.use_amp):\n losses, logits, _ = parallel_model(\n input_ids=input_ids, attention_mask=attention_mask, labels=labels\n )\n loss = losses.mean()\n\n # backward grads of scaled loss\n scaler.scale(loss).backward()\n # `optimizer.step()` with unscaled grads or skip if Inf/NaN in grads\n scaler.step(optimizer)\n # update scaling factor\n scaler.update()\n\n scheduler.step()\n\n cur_num += len(input_ids)\n cur_loss += loss.item() * len(input_ids)\n cur_correct += logits.cpu().argmax(1).eq(labels).sum().item()\n\n # update progress bar\n pbar.set_postfix(\n {\n \"loss\": f\"{loss.item():.3f}\",\n \"lr\": f\"{scheduler.get_last_lr()[0]:.1e}\",\n }\n )\n\n # logging\n if (step + 1) % args.logging_steps == 0:\n logger.info(\n \"Epoch {:.2f}, Step {} | Running Loss = {:.3f}, \"\n \"Running Acc={:.2%}, lr={:.2e}\".format(\n epoch + (step + 1) / len(pbar),\n step + 1 + len(pbar) * epoch,\n cur_loss / cur_num,\n cur_correct / cur_num,\n scheduler.get_last_lr()[0],\n )\n )\n cur_num, cur_loss, cur_correct = 0, 0, 0\n","repo_name":"arumaekawa/text-dataset-distillation","sub_path":"src/full_data.py","file_name":"full_data.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"12257868977","text":"import os\nimport unittest\nfrom decimal import Decimal, InvalidOperation\nimport json\n\ntry:\n # python 3.4+ should use builtin unittest.mock not mock package\n from unittest.mock import patch\n from unittest import mock\nexcept ImportError:\n from mock import patch\n import mock\n\nimport sys\n\nfrom envs import env\nfrom envs.exceptions import EnvsValueException\n\nclass EnvTestCase(unittest.TestCase):\n def setUp(self):\n # Integer\n os.environ.setdefault('VALID_INTEGER', '1')\n os.environ.setdefault('INVALID_INTEGER', '[\"seven\"]')\n # String\n os.environ.setdefault('VALID_STRING', 'seven')\n # Boolean\n os.environ.setdefault('VALID_BOOLEAN', 'True')\n os.environ.setdefault('VALID_BOOLEAN_FALSE', 'false')\n os.environ.setdefault('INVALID_BOOLEAN', 'seven')\n # List\n os.environ.setdefault('VALID_LIST', \"['1','2','3']\")\n os.environ.setdefault('INVALID_LIST', \"1\")\n # Tuple\n os.environ.setdefault('VALID_TUPLE', \"('True','FALSE')\")\n os.environ.setdefault('INVALID_TUPLE', '1')\n # Dict\n os.environ.setdefault('VALID_DICT', \"{'first_name':'Suge'}\")\n os.environ.setdefault('INVALID_DICT', 'Aaron Rogers')\n # Float\n os.environ.setdefault('VALID_FLOAT', \"5.0\")\n os.environ.setdefault('INVALID_FLOAT', '[5.0]')\n # Decimal\n os.environ.setdefault('VALID_DECIMAL', \"2.39\")\n os.environ.setdefault('INVALID_DECIMAL', \"FOOBAR\")\n\n def test_integer_valid(self):\n self.assertEqual(1, env('VALID_INTEGER', var_type='integer'))\n\n def test_integer_invalid(self):\n with self.assertRaises(TypeError) as vm:\n env('INVALID_INTEGER', var_type='integer')\n\n def test_wrong_var_type(self):\n with self.assertRaises(ValueError) as vm:\n env('INVALID_INTEGER', var_type='set')\n\n def test_string_valid(self):\n self.assertEqual('seven', env('VALID_STRING'))\n\n def test_boolean_valid(self):\n self.assertEqual(True, env('VALID_BOOLEAN', var_type='boolean'))\n\n def test_boolean_valid_false(self):\n self.assertEqual(False, env('VALID_BOOLEAN_FALSE', var_type='boolean'))\n\n def test_boolean_invalid(self):\n with self.assertRaises(ValueError) as vm:\n env('INVALID_BOOLEAN', var_type='boolean')\n\n def test_list_valid(self):\n self.assertEqual(['1', '2', '3'], env('VALID_LIST', var_type='list'))\n\n def test_list_invalid(self):\n with self.assertRaises(TypeError) as vm:\n env('INVALID_LIST', var_type='list')\n\n def test_tuple_valid(self):\n self.assertEqual(('True', 'FALSE'), env('VALID_TUPLE', var_type='tuple'))\n\n def test_tuple_invalid(self):\n with self.assertRaises(TypeError) as vm:\n env('INVALID_TUPLE', var_type='tuple')\n\n def test_dict_valid(self):\n self.assertEqual({'first_name': 'Suge'}, env('VALID_DICT', var_type='dict'))\n\n def test_dict_invalid(self):\n with self.assertRaises(SyntaxError) as vm:\n env('INVALID_DICT', var_type='dict')\n\n def test_float_valid(self):\n self.assertEqual(5.0, env('VALID_FLOAT', var_type='float'))\n\n def test_float_invalid(self):\n with self.assertRaises(TypeError) as vm:\n env('INVALID_FLOAT', var_type='float')\n\n def test_decimal_valid(self):\n self.assertEqual(Decimal('2.39'), env('VALID_DECIMAL', var_type='decimal'))\n\n def test_decimal_invalid(self):\n with self.assertRaises(ArithmeticError) as vm:\n env('INVALID_DECIMAL', var_type='decimal')\n\n def test_defaults(self):\n self.assertEqual(env('HELLO', 5, var_type='integer'), 5)\n self.assertEqual(env('HELLO', 5.0, var_type='float'), 5.0)\n self.assertEqual(env('HELLO', [], var_type='list'), [])\n self.assertEqual(env('HELLO', {}, var_type='dict'), {})\n self.assertEqual(env('HELLO', (), var_type='tuple'), ())\n self.assertEqual(env('HELLO', 'world'), 'world')\n self.assertEqual(env('HELLO', False, var_type='boolean'), False)\n self.assertEqual(env('HELLO', 'False', var_type='boolean'), False)\n self.assertEqual(env('HELLO', 'true', var_type='boolean'), True)\n self.assertEqual(env('HELLO', Decimal('3.14'), var_type='decimal'), Decimal('3.14'))\n\n def test_without_defaults_allow_none(self):\n self.assertEqual(env('HELLO'), None)\n self.assertEqual(env('HELLO', var_type='integer'), None)\n self.assertEqual(env('HELLO', var_type='float'), None)\n self.assertEqual(env('HELLO', var_type='list'), None)\n\n def test_without_defaults_disallow_none(self):\n with self.assertRaises(EnvsValueException):\n env('HELLO', allow_none=False)\n with self.assertRaises(EnvsValueException):\n env('HELLO', var_type='integer', allow_none=False)\n with self.assertRaises(EnvsValueException):\n env('HELLO', var_type='float', allow_none=False)\n with self.assertRaises(EnvsValueException):\n env('HELLO', var_type='list', allow_none=False)\n\n def test_empty_values(self):\n os.environ.setdefault('EMPTY', '')\n self.assertEqual(env('EMPTY'), '')\n with self.assertRaises(SyntaxError):\n env('EMPTY', var_type='integer')\n with self.assertRaises(SyntaxError):\n env('EMPTY', var_type='float')\n with self.assertRaises(SyntaxError):\n env('EMPTY', var_type='list')\n with self.assertRaises(SyntaxError):\n env('EMPTY', var_type='dict')\n with self.assertRaises(SyntaxError):\n env('EMPTY', var_type='tuple')\n with self.assertRaises(ValueError):\n env('EMPTY', var_type='boolean')\n with self.assertRaises(ArithmeticError):\n env('EMPTY', var_type='decimal')\n\n'''\nEach CLI Test must be run outside of test suites in isolation\nsince Click CLI Runner alters the global context\n'''\ndef setup_CliRunner(test_func):\n '''\n Decorator to initialize environment for CliRunner.\n '''\n def wrapper():\n from click.testing import CliRunner\n try:\n from cli import envs as cli_envs\n except ImportError:\n from .cli import envs as cli_envs\n\n try:\n from cli import envs\n except ImportError:\n from .cli import envs\n\n test_func()\n\n return wrapper\n\n@mock.patch.object(sys, 'argv', [\"list-envs\"])\n@setup_CliRunner\ndef test_list_envs():\n os.environ.setdefault('DEBUG', 'True')\n\n runner = CliRunner()\n result = runner.invoke(envs, ['list-envs', '--settings-file', 'envs.test_settings', '--keep-result', 'True'], catch_exceptions=False)\n\n output_expected = [{\"default\": None, \"value\": None, \"var_type\": \"string\", \"key\": \"DATABASE_URL\"},{\"default\": False, \"value\": \"True\", \"var_type\": \"boolean\", \"key\": \"DEBUG\"},{\"default\": [], \"value\": None, \"var_type\": \"list\", \"key\": \"MIDDLEWARE\"},{}]\n\n with open('.envs_result', 'r') as f:\n output_actual = json.load(f)\n\n exit_code_expected = 0\n exit_code_actual = result.exit_code\n\n\n assert exit_code_actual == exit_code_expected\n assert output_actual == output_expected\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"capless/envs","sub_path":"envs/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"5542528762","text":"\"\"\" Dataset Ingest Config \"\"\"\n# The list of entities that will be loaded into the target service\ntarget_service_entities = [\n \"genomic_file\",\n \"biospecimen_genomic_file\",\n]\n\n# All paths are relative to the directory this file is in\nextract_config_dir = \"extract_configs\"\n\ntransform_function_path = \"transform_module.py\"\n\nproject = \"SD_ME0WME0W\"\n","repo_name":"kids-first/kf-lib-data-ingest","sub_path":"tests/data/kfid_study/ingest_package_config.py","file_name":"ingest_package_config.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"929044200","text":"import pdb\n\nimport argparse\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python import debug as tfdbg\n\nfrom train_frame.bframe import TrainBaseFrame\nfrom train_frame.data_process.data_processor import batch_iter\nfrom train_frame.data_process.data_processor import DataProcessor\nfrom tflags import TFlags\n\nclass Train(TrainBaseFrame):\n def __init__(self, model, flags, sess_config, field_len=2):\n super(Train, self).__init__(model, flags, sess_config)\n self.model = model\n self.flags = flags\n self.field_len = field_len\n self.sess_config = sess_config\n\n def get_batches(self, train_data, batch_size, num_epochs, shuffle=True):\n if len(train_data)==2:\n data = list(zip(train_data[0], train_data[1]))\n elif len(train_data)==3:\n data = list(zip(train_data[0], train_data[1], train_data[2]))\n batches = batch_iter(data, batch_size, num_epochs, shuffle=shuffle)\n return batches\n\n def get_feed_dict(self, batch_data, is_training=False, padding=True, samelen=False):\n '''\n @param: padding, whether to padding data in batch; If sequence data \n length in batch is not same when do feed feed_dict will throw error;\n @param: samelen, whether `x`, 'y' padding to same length;\n '''\n label_batch = None\n if is_training:\n if self.field_len == 2:\n x_batch, y_batch = zip(*batch_data)\n elif self.field_len == 3:\n x_batch, y_batch, label_batch = zip(*batch_data)\n else:\n if self.field_len == 2:\n x_batch, y_batch = zip(*batch_data)\n # x_batch, y_batch = batch_data[0], batch_data[1]\n elif self.field_len == 3:\n x_batch, y_batch, label_batch = zip(*batch_data)\n # x_batch, y_batch, label_batch = \\\n # batch_data[0], batch_data[1], batch_data[2]\n\n if padding:\n x_maxlen = max([len(x) for x in x_batch])\n y_maxlen = max([len(y) for y in y_batch])\n if samelen:\n x_maxlen = y_maxlen = max(x_maxlen, y_maxlen)\n _x_batch = [list(x)+[0]*(x_maxlen-len(x)) for x in x_batch]\n _y_batch = [list(y)+[0]*(y_maxlen-len(y)) for y in y_batch]\n x_batch = _x_batch\n y_batch = _y_batch\n\n if is_training:\n if label_batch is None:\n feed_dict = {\n self.model.input_x : x_batch, \n self.model.input_y : y_batch,\n self.model.dropout_keep_prob : self.flags.dropout_keep_prob\n }\n else:\n feed_dict = {\n self.model.input_x : x_batch, \n self.model.input_y : y_batch,\n self.model.label : label_batch,\n self.model.dropout_keep_prob : self.flags.dropout_keep_prob\n }\n\n else:\n if label_batch is None:\n feed_dict = {\n self.model.input_x : x_batch, \n self.model.input_y : y_batch,\n self.model.dropout_keep_prob : 1.0\n }\n else:\n feed_dict = {\n self.model.input_x : x_batch, \n self.model.input_y : y_batch,\n self.model.label : label_batch,\n self.model.dropout_keep_prob : 1.0\n }\n\n return feed_dict\n","repo_name":"Fisher87/ai_explore","sub_path":"nlp_explore/task/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"61"} +{"seq_id":"23426420791","text":"import sys\n\n\ndef calculateTime(farmCost, cpsIncrementer, cookieGoal,):\n totalTime = 0.0\n cookiesPerSec = 2.0\n\n while(True):\n if (cookieGoal / cookiesPerSec) < ((farmCost/cookiesPerSec) + (cookieGoal / (cookiesPerSec + cpsIncrementer))):\n return totalTime + (cookieGoal/cookiesPerSec)\n else:\n totalTime += farmCost / cookiesPerSec\n cookiesPerSec += cpsIncrementer \n \n\n#Opening File\ntry:\n inputFile = open(sys.argv[1], 'r')\nexcept Exception:\n sys.exit(\"Could not open file, program terminating\")\n\n\ntotalCases = inputFile.readline()\n\n\nfor case in range(1, int(totalCases) + 1):\n farmCost, cpsIncrementer, cookieGoal = inputFile.readline().split() \n totalTime = calculateTime(float(farmCost), float(cpsIncrementer), float(cookieGoal))\n print('Case #' + str(case) + ': ' + \"%.7f\" % totalTime)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2950.py","file_name":"2950.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18246492173","text":"'Given an List which consists of only 0, 1 and 2. Sort the List without using any sorting algo'\n\n\nclass Solution:\n def sort012(self,arr,n):\n # code here\n for i in range(n):\n for j in range(i,n-1):\n if arr[i]>arr[j+1]:\n t=arr[i]\n arr[i]=arr[j+1]\n arr[j+1]=t\n return arr\nobj=Solution()\n\nans=obj.sort012([0,1,2,2,0,0],6)\nprint(ans)\n\noutput=[0, 0, 0, 1, 2, 2]","repo_name":"Wasim-Akraam/DSA_with_Python","sub_path":"List/4_sort01without_sortalgo.py","file_name":"4_sort01without_sortalgo.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14284022643","text":"# coding=utf-8\n\"\"\"\n识别模型server,包括填空题批改/改分改错识别/数字识别\n\"\"\"\nimport numpy as np\nimport cv2\nimport math\nfrom scipy import ndimage\nimport tensorflow as tf\nfrom signal_preprocessing import average_fft_of_singal_trail\nfeature_d = 26\ncata_count = 3\n\nclass gap_recognition_server:\n def __init__(self, saved_model):\n self._model_init(saved_model)\n\n def _model_init(self, saved_model):\n def weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n def max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\n def cnn_layer(x, filter_shape, max_pool=True):\n W = weight_variable(filter_shape)\n b = bias_variable([filter_shape[3]])\n h = tf.nn.relu(conv2d(x, W) + b)\n if max_pool:\n h = max_pool_2x2(h)\n return h, W\n with tf.name_scope('MI-3'):\n with tf.name_scope('placeholder'):\n self.x = tf.placeholder(tf.float32, [None, 2, feature_d, 1])\n with tf.name_scope('CONV-1'):\n # 1.CNN layer\n cnn_1, w1 = cnn_layer(self.x, [1, 4, 1, 32], max_pool=False)\n with tf.name_scope('CONV-2'):\n # 2.CNN layer\n cnn_2, w2 = cnn_layer(cnn_1, [1, 4, 32, 16], max_pool=False)\n with tf.name_scope('FC_1'):\n # 3.FC_1\n d = 256\n W_fc1 = weight_variable([2 * (feature_d) * 16, d])\n b_fc1 = bias_variable([d])\n h_pool5_flat = tf.reshape(cnn_2, [-1, 2 * (feature_d) * 16])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool5_flat, W_fc1) + b_fc1)\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n # h_fc1_drop = h_fc1\n with tf.name_scope('FC_2'):\n # 4.FC_2\n W_fc2 = weight_variable([d, cata_count])\n b_fc2 = bias_variable([cata_count])\n self.y = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n self.prediction = tf.argmax(self.y, 1)\n\n # all_vars = tf.all_variables()\n # gap_vars = [k for k in all_vars if k.name.startswith('gap')]\n saver = tf.train.Saver()\n self.sess = tf.Session()\n saver.restore(self.sess, saved_model)\n\n return\n\n def predict(self, data_frame):\n \"\"\"\n :param data_frame: 400个两通道序列\n :return:\n \"\"\"\n data_f = average_fft_of_singal_trail(data_frame)\n return self.prediction.eval(feed_dict={self.x: data_f.reshape(-1, 2, feature_d, 1)}, session=self.sess)[0]\n\n\n# load model\ngap_model = gap_recognition_server('saved_model/gap/saved_model-9000')\n\n","repo_name":"Herschel-vodemort/MI_CNN","sub_path":"MI_CNN/model_server.py","file_name":"model_server.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"29201436472","text":"# mayor de 50=15 de descuento\n# menor de 30= si llegas a 50 euro tendras 15% de descuento\n\n\n\n# # switch no existe\n# patata =4\n# hamburgesa = 20\n# helado = 8\n\n# patata = float(input('Insertar patata:'))\n# hamburgesa = float(input('Insertar hamburgesa:'))\n# helado = float(input('Insertar helado:'))\n\n\n# eleccion = input('Elige una de las opciones (+)->').lower()\n# if eleccion == '+':\n# suma = patata + hamburgesa + helado\n# print(suma)\n# else:\n# print('Te has equivocado, introduce una de las opciones que hay marcadas')\n\n# *****************************************************************************************************************************************\n\nproducto01 = input('Producto:')\nprecio = float(input(f'Precio del {producto01}:'))\nproducto02 = input('Producto:')\nprecio += float(input(f'Precio del {producto02}:'))\nproducto03 = input('Producto:')\nprecio += float(input(f'Precio del {producto03}:'))\n \n\nprint(f'Total de la compra: {precio:.2f}€')\n\n\nif precio>50:\n print('Has conseguido un descuento del 15%')\n precioOficial = precio * (85/100)\n\n print(f'Precio con el descuento del 15% =', precioOficial)\nelif precio<30:\n print('Tienes que concederme tu codigo postal,puto,asi te dare un descuento del 10%')\n postal = input('Codigo Postal:')\n precioOficial2 = precio * (90/100)\n iva = precio * 0.21\n print(f'IVA =',iva)\n print(f'Descuento =',precioOficial2)\n print(f'Total% =', precioOficial2 + iva )","repo_name":"stelfOriginal/python","sub_path":"supermercado.py","file_name":"supermercado.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4029218032","text":"from tda_cola_dinamica import Cola, mostrar_cola, cola_vacia, arribo, atencion, tamanio, en_frente, mover_final\r\nfrom random import choice, randint\r\n\r\ncoches = [\"automóvil\",\"camioneta\",\"camione\",\"colectivo\"]\r\ntarifa = [47,59,71,64]\r\n\r\ncola1 = Cola()\r\ncola2 = Cola()\r\ncola3 = Cola()\r\n\r\nfor e in range(30):\r\n a = randint(1,3)\r\n if a == 1:\r\n a = choice(coches)\r\n arribo(cola1,a)\r\n if a == 2:\r\n a = choice(coches)\r\n arribo(cola2,a)\r\n if a == 3:\r\n a = choice(coches)\r\n arribo(cola3,a)\r\n \r\ndef atender_coches(cola):\r\n cant_autos = 0\r\n cant_camionetas = 0\r\n cant_camiones = 0\r\n cant_colectivos = 0 \r\n recaudo = 0\r\n while not cola_vacia(cola):\r\n a = atencion(cola)\r\n if a == \"automóvil\":\r\n recaudo += 47\r\n cant_autos += 1\r\n if a == \"camioneta\":\r\n recaudo += 59\r\n cant_camionetas += 1\r\n if a == \"camion\":\r\n recaudo += 71\r\n cant_camiones += 1\r\n if a == \"colectivo\":\r\n recaudo += 64\r\n cant_colectivos += 1\r\n print(\"Cantidad de automoviles atendidos\",cant_autos)\r\n print(\"Cantidad de camionetas atendidos\",cant_camionetas)\r\n print(\"Cantidad de camiones atendidos\",cant_camiones)\r\n print(\"Cantidad de colectivos atendidos\",cant_colectivos)\r\n print(\"Cantidad recaudado\",recaudo)\r\n\r\nprint(\"Cola1\")\r\natender_coches(cola1)\r\nprint(\" \")\r\nprint(\"Cola2\")\r\natender_coches(cola2)\r\nprint(\" \")\r\nprint(\"Cola3\")\r\natender_coches(cola3)\r\nprint(\" \")\r\n\r\n","repo_name":"JuanInhale/Algoritmos-2020","sub_path":"tp3ej18.py","file_name":"tp3ej18.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15408348424","text":"import requests\nimport json\n\n\ndef test_get_all_post(base_url, user_authorization):\n headers = {\n \"Authorization\": f'{user_authorization}'\n }\n response = requests.request(\n 'GET',\n f'{base_url}/meme',\n headers=headers\n ).json()\n meme_tag_list = []\n for meme in range(len(response['data'])):\n tags = (response['data'][meme])\n # print(type(tags['tags']))\n for i in tags['tags']:\n # print(type(i))\n meme_tag_list.append(i)\n assert 'fun' in meme_tag_list, 'tag fun is not found'\n\n\ndef test_create_meme(base_url, user_authorization):\n headers = {\n 'Authorization': f'{user_authorization}',\n 'Content-Type': 'application/json'\n }\n\n data_json = {\n \"text\": \"как я разбираюсь в собаках\",\n \"url\": \"https://cs14.pikabu.ru/post_img/2022/11/10/8/1668087866112982105.jpg\",\n \"tags\": [\"fun\", \"dogs\", 'fundogs'],\n \"info\": {\"colors\": [\"orange\", \"white\", \"red\"], \"objects\": [\"picture\", \"text\"]}\n }\n\n data = json.dumps(data_json)\n\n response = requests.request(\n 'POST',\n f'{base_url}/meme',\n headers=headers,\n data=data).json()\n\n assert response['text'] == \"как я разбираюсь в собаках\"\n\n\ndef test_update_meme(base_url, create_a_meme, user_authorization):\n meme_id = create_a_meme\n\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': user_authorization\n }\n\n data_json = {\n 'id': meme_id,\n 'text': \"This is the base\",\n 'url': \"https://memepedia.ru/wp-content/uploads/2017/04/%D0%A3%D0%BF%D1%8F%D1%87%D0%BA%D0%B0%D0%BC%D0%B5%D0%BD.png\",\n 'tags': ['fun', 'oldmeme', 'onotole', 'retro'],\n 'info': {'object': ['yp4ka', 'meme']}\n }\n data = json.dumps(data_json)\n response = requests.request(\"PUT\", f\"{base_url}/meme/{meme_id}\", headers=headers, data=data).json()\n assert response[\"tags\"] == ['fun', 'oldmeme', 'onotole', 'retro']\n\n\ndef test_delete_post(base_url, create_a_meme, user_authorization):\n meme_id = create_a_meme\n headers = {'Authorization': user_authorization}\n\n requests.request(\n \"DELETE\",\n f\"{base_url}/meme/{meme_id}\",\n headers=headers)\n\n response = requests.request(\n \"GET\",\n f\"{base_url}/meme/{meme_id}\",\n headers=headers\n )\n assert response.status_code == 404, 'The meme was not removed'\n","repo_name":"eugene-okulik/QAP-09onl","sub_path":"homework/evgeny_lavda/Home_work26/test_meme.py","file_name":"test_meme.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"2071054043","text":"from mindsdb import Predictor\n\nprint(\"learning...\")\n\n# tell mindsDB what we want to learn and from what data\nPredictor(name='home_rentals_price').learn(\n to_predict='rental_price', # the column we want to learn to predict given all the data in the file\n from_data=\"https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv\", # the path to the file where we can learn from, (note: can be url)\n use_gpu=False # 25 seconds using powershell: Measure-Command {python .\\main.py} \n # 29 seconds on iMac \n # use_gpu=True # 25 seconds also (much less CPU used though)\n)\n\n# use the model to make predictions\nresult = Predictor(name='home_rentals_price').predict(when={'number_of_rooms': 2,'number_of_bathrooms':1, 'sqft': 1190})\n\n# you can now print the results\nprint('The predicted price is ${price} with {conf} confidence'.format(price=result[0]['rental_price'], conf=result[0]['rental_price_confidence']))\n\nprint(\"done\")\n\n","repo_name":"abulka/mindsdb-play","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23631078681","text":"import sys\nimport re\n\ndict = {}\ndict['a'] = 'y'\ndict['b'] = 'h'\ndict['c'] = 'e'\ndict['d'] = 's'\ndict['e'] = 'o'\ndict['f'] = 'c'\ndict['g'] = 'v'\ndict['h'] = 'x'\ndict['i'] = 'd'\ndict['j'] = 'u'\ndict['k'] = 'i'\ndict['l'] = 'g'\ndict['m'] = 'l'\ndict['n'] = 'b'\ndict['o'] = 'k'\ndict['p'] = 'r'\ndict['q'] = 'z'\ndict['r'] = 't'\ndict['s'] = 'n'\ndict['t'] = 'w'\ndict['u'] = 'j'\ndict['v'] = 'p'\ndict['w'] = 'f'\ndict['x'] = 'm'\ndict['y'] = 'a'\ndict['z'] = 'q'\ndict[' '] = ' '\ndict['\\n'] = '\\n'\n\ndef main(args):\n n = int(sys.stdin.readline())\n s = set()\n for _ in range(n):\n sys.stdout.write('Case #{0}: '.format(_ + 1))\n line = sys.stdin.readline()\n for c in line:\n sys.stdout.write(dict[c])\n\n\nmain(sys.argv)\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_95/1569.py","file_name":"1569.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18211393119","text":"from sqlalchemy import and_\nfrom datetime import datetime\nfrom http.client import OK\nfrom flask import jsonify, request\nfrom flask.views import MethodView\nfrom marshmallow import ValidationError\nfrom api import log, db\nfrom api.v1.schema import (\n InternalServerErrorSchema,\n CategoriaSchema, NoticiaSchema,\n CategoriaValidationErrorSchema,\n CategoriaNotFoundSchema,\n NoticiaNotFoundSchema,\n NoticiaValidationErrorSchema,\n EmptyDataSchema,\n)\nfrom api.models import Categoria, Noticia\nfrom api.utils import restrict_access\nfrom flask_jwt_extended import (\n jwt_required, jwt_optional, get_jwt_identity\n)\n\n\nclass CategoriasView(MethodView):\n def get(self, categoria_id=None):\n page = request.args.get('page', 1, type=int)\n offset = request.args.get('offset', 10, type=int)\n\n if categoria_id:\n categoria = Categoria.query.get(categoria_id)\n return jsonify(CategoriaSchema().dump(categoria))\n\n categorias = Categoria.query.paginate(page, offset, False).items\n return jsonify(CategoriaSchema(many=True).dump(categorias)), OK.value\n\n @jwt_required\n @restrict_access(['admin', 'editor'])\n def post(self):\n data = request.get_json()\n if not data:\n return EmptyDataSchema().build()\n\n try:\n categoria = CategoriaSchema().load(data)\n except ValidationError as err:\n return CategoriaValidationErrorSchema().build(err.messages)\n\n try:\n db.session.add(Categoria(**categoria))\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during add categoria: {}\".format(e))\n return InternalServerErrorSchema().build(\"Database Error\")\n\n return CategoriaSchema().created(categoria)\n\n @jwt_required\n @restrict_access(['admin', 'editor'])\n def put(self, categoria_id):\n categoria_id = int(categoria_id)\n data = request.get_json()\n if not data or categoria_id == 0:\n return EmptyDataSchema().build()\n\n categoria = Categoria.query.get(categoria_id)\n if not categoria:\n log.error('Categoria not found')\n return CategoriaNotFoundSchema().build()\n\n try:\n new_categoria = CategoriaSchema().load(data)\n except ValidationError as err:\n log.error('Error while validate Categoria: {}'.format(err))\n return CategoriaValidationErrorSchema().build(err.message)\n\n categoria.update(**new_categoria)\n\n try:\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during update categoria: {}\".format(e))\n return InternalServerErrorSchema().build()\n\n return CategoriaSchema().dump(categoria)\n\n @jwt_required\n @restrict_access(['admin', 'editor'])\n def delete(self, categoria_id):\n categoria_id = int(categoria_id)\n\n categoria = Categoria.query.get(categoria_id)\n if not categoria:\n return CategoriaNotFoundSchema().build()\n\n try:\n db.session.delete(categoria)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during delete categoria: {}\".format(e))\n return InternalServerErrorSchema().build()\n\n return jsonify({}), OK.value\n\n\nclass CategoriaNoticiaView(MethodView):\n def get(self, categoria_slug):\n categoria = {}\n noticias = []\n pagination = []\n page = request.args.get('page', 1, type=int)\n offset = request.args.get('offset', 10, type=int)\n\n try:\n categoria = CategoriaSchema().dump(\n Categoria.query.filter(\n Categoria.slug == categoria_slug).first()\n )\n except Exception as e:\n log.error(\"Error during find categoria: {}\".format(e))\n\n log.info(\"categoria: {}\".format(categoria))\n\n try:\n noticias = Noticia.query.filter(\n Noticia.categoria_id == categoria.get('id')\n ).paginate(page, offset, False)\n except Exception as e:\n log.error(\"Error during find noticia: {}\".format(e))\n\n pagination = {\n 'page': noticias.page,\n 'per_page': noticias.per_page,\n 'total': noticias.total,\n 'data': NoticiaSchema(many=True).dump(noticias.items)\n }\n\n return jsonify(pagination), OK.value\n\n\nclass NoticiasView(MethodView):\n @jwt_optional\n def get(self, noticia_slug=None):\n filters = []\n noticias = []\n noticia = None\n pagination = {}\n page = request.args.get('page', 1, type=int)\n offset = request.args.get('offset', 10, type=int)\n\n current_user = get_jwt_identity()\n if current_user is None:\n filters.append(Noticia.publicado.is_(True))\n filters.append(Noticia.data_publicacao <= datetime.now())\n\n if noticia_slug:\n filters.append(Noticia.slug == noticia_slug)\n noticia = Noticia.query.filter(and_(*filters)).first()\n if noticia is None:\n return NoticiaNotFoundSchema().build()\n\n return jsonify(NoticiaSchema().dump(noticia)), OK.value\n\n noticias = Noticia.query.filter(\n and_(*filters)\n ).paginate(page, offset, False)\n\n pagination = {\n 'page': noticias.page,\n 'per_page': noticias.per_page,\n 'total': noticias.total,\n 'data': NoticiaSchema(many=True).dump(noticias.items)\n }\n\n return jsonify(pagination), OK.value\n\n @jwt_required\n @restrict_access(['admin', 'editor', 'jornalista'])\n def post(self):\n data = request.get_json()\n categoria = None\n if not data:\n return EmptyDataSchema().build()\n\n try:\n loaded_data = NoticiaSchema().load(data)\n except ValidationError as err:\n return NoticiaValidationErrorSchema().build(err.messages)\n\n try:\n categoria = Categoria.query.get(\n loaded_data['categoria_id']\n )\n except Exception as e:\n log.error(\"Error during get categoria: {}\".format(e))\n\n if 'publicado' in loaded_data and loaded_data['publicado'] is True:\n loaded_data['data_publicacao'] = datetime.now()\n\n noticia = Noticia(**loaded_data)\n noticia.categoria = categoria\n\n # save noticia\n try:\n db.session.add(noticia)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during add noticia: {}\".format(e))\n return InternalServerErrorSchema().build(\"Database Error\")\n\n # generate slug from noticia\n try:\n noticia.generate_slug()\n db.session.flush()\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during generate slug from noticia: {}\".format(e))\n return InternalServerErrorSchema().build(\"Database Error\")\n\n return NoticiaSchema().created(noticia)\n\n @jwt_required\n @restrict_access(['admin', 'editor', 'jornalista'])\n def put(self, noticia_slug=None):\n categoria_id = None\n data = request.get_json()\n if not data or noticia_slug is None:\n return EmptyDataSchema().build()\n\n noticia = Noticia.query.filter(\n Noticia.slug == noticia_slug\n ).first()\n if not noticia:\n log.error('Noticia not found')\n return NoticiaNotFoundSchema().build()\n\n # todo: jornalista so edita a propria noticia\n\n try:\n new_noticia = NoticiaSchema().load(data)\n except ValidationError as err:\n log.error(\"Error while validate noticia: {}\".format(err))\n return NoticiaValidationErrorSchema().build(err.message)\n\n categoria_id = new_noticia.get(categoria_id, None)\n\n if categoria_id:\n noticia['categoria'] = Categoria.query.get(categoria_id)\n\n noticia.update(**new_noticia)\n\n try:\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during update noticia: {}\".format(e))\n return InternalServerErrorSchema().build()\n\n try:\n noticia.generate_slug()\n db.session.flush()\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during generate slug from noticia: {}\".format(e))\n return InternalServerErrorSchema().build(\"Database Error\")\n\n return NoticiaSchema().dump(noticia)\n\n @jwt_required\n @restrict_access(['admin', 'editor', 'jornalista'])\n def delete(self, noticia_slug=None):\n noticia = Noticia.query.filter(\n Noticia.slug == noticia_slug\n ).first()\n if not noticia:\n return NoticiaNotFoundSchema().build()\n\n # @TODO: jornalista so apaga a propria noticia\n\n try:\n db.session.delete(noticia)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n log.error(\"Error during delete noticia: {}\".format(e))\n return InternalServerErrorSchema().build()\n\n return jsonify({}), OK.value\n","repo_name":"leandrocorreasantos/portalnoticias","sub_path":"api/v1/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":9484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9592907494","text":"from utils_plus.router import url\nfrom utils_plus.views import return_path_view\nfrom .views import CreateUpdateAuthorView\n\nurlpatterns = list(\n url('blog')[\n url.slug(view=return_path_view, name='blog-slug'),\n url(\"author\")[\n CreateUpdateAuthorView.urls(name_prefix='author')\n ],\n ]\n)\n","repo_name":"jnoortheen/django-utils-plus","sub_path":"tests/test_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"71537978754","text":"import serial\nimport os\n\nport = serial.Serial(\"/dev/ttyUSB0\", baudrate=19200, timeout=3.0)\nsend_port = serial.Serial(\"/dev/ttyAMA0\", baudrate=19200, timeout=3.0)\n\ndef proccess_packet(packet):\n packet[1], packet[2] = packet[2], packet[1]\n asstring = ''\n for data in packet:\n asstring = asstring + data\n port.write(str.encode(data))\n print(asstring)\n port.write(str.encode(asstring))\n os.system(\"record.py --fun motion-detection -x 0 1024 -y 0 768\")\n \nindex = 0\n\nwhile True:\n rcv = (port.read())\n comp_str = b''\n \n received = False;\n if rcv == comp_str:\n if index > 0:\n proccess_packet(received_signal)\n index = 0\n rcv = ' ';\n \n else:\n received = True\n if index == 0:\n received_signal = []\n if received:\n if index == 0:\n print(\"Start received bin data\")\n\n print(\"It is part number: \" + str(index + 1))\n index = index + 1\n print(bin(ord(rcv))[2:].zfill(8))\n print(hex(ord(rcv))[2:].zfill(2))\n received_signal.append(bin(ord(rcv))[2:].zfill(8))\n \n \n else:\n print(\"No data received\")\n ","repo_name":"cegielskir/RaspberryPi-Smart-Monitoring-with-SPPoB-Communication","sub_path":"video_cap/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16045953749","text":"from ml import *\r\nfrom ItemNameMap import *\r\n\r\nscena_path = 'D:\\\\Game\\\\Falcom\\\\ED_AO\\\\data\\\\scena\\\\'\r\n\r\nclass ItemInfo:\r\n def __init__(self):\r\n self.Offset = 0\r\n self.Bit = 0\r\n self.Item = ''\r\n self.Line = 0\r\n self.File = ''\r\n\r\n self.TriggerX = 0\r\n self.TriggerZ = 0\r\n self.TriggerY = 0\r\n self.TriggerRange = 0\r\n self.ActorX = 0\r\n self.ActorZ = 0\r\n self.ActorY = 0\r\n self.TalkScenaIndex = 0\r\n self.TalkFunctionIndex = 0\r\n\r\n\r\nclass ScenaInfo:\r\n def __init__(self):\r\n self.MapName = ''\r\n self.Items = []\r\n\r\n\r\ndef MakeScenarioFlags(offset, bit):\r\n return (offset << 3) | (bit & 7)\r\n\r\n\r\ndef getscenaname(file):\r\n file = os.path.basename(file)\r\n return file.split('.')[0]\r\n\r\ndef getparam(line):\r\n line = line.replace(' ', '')\r\n index = line.find('(')\r\n return line[index + 1 : -1].split(',')\r\n\r\ndef lookupactor(actor, func_index):\r\n for act in actor:\r\n TriggerX, TriggerZ, TriggerY, TriggerRange, ActorX, ActorZ, ActorY, Flags, TalkScenaIndex, TalkFunctionIndex, Unknown_22 = act\r\n TalkFunctionIndex = int(TalkFunctionIndex)\r\n TalkScenaIndex = int(TalkScenaIndex)\r\n\r\n if TalkScenaIndex != 0:\r\n raise Exception('what the fuck')\r\n\r\n if TalkFunctionIndex == func_index:\r\n return act\r\n\r\n raise Exception('can not find actor')\r\n\r\n\r\ndef main():\r\n scena = fileio.getDirectoryFiles(scena_path, '*.py')\r\n\r\n scena_info = {}\r\n\r\n global_flags_map = {}\r\n\r\n for file in scena:\r\n console.setTitle(file)\r\n lines = fileio.readLines(file)\r\n\r\n file = getscenaname(file)\r\n\r\n mapname = lines[7].split('# ')[1]\r\n if mapname == 'MapIndex':\r\n mapname = file\r\n\r\n info = ScenaInfo()\r\n scena_info[file] = info\r\n\r\n info.MapName = mapname\r\n\r\n actor = []\r\n scenaflags = None\r\n func = []\r\n\r\n current_func = ''\r\n current_item = ''\r\n\r\n for i in range(len(lines)):\r\n l = lines[i].strip()\r\n\r\n if l.startswith('DeclActor('):\r\n\r\n p = getparam(l)\r\n actor.append(p)\r\n\r\n elif l.startswith('\"Function_'):\r\n\r\n f = l.split(',')[0][1:-1]\r\n func.append(f)\r\n\r\n elif l.startswith('def Function_'):\r\n\r\n current_func = l[l.find('Function_'):l.find('(): pass')]\r\n\r\n elif l.startswith('Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags'):\r\n\r\n current_item = ''\r\n\r\n elif l.find('AddItemNumber(') != -1:\r\n\r\n prefix = 'AddItemNumber('\r\n idx = l.find(prefix)\r\n item = l[idx + len(prefix):]\r\n item = item[:item.find(', ')]\r\n\r\n #if not eval('%s in ItemTrueNameMap' % item):\r\n # continue\r\n #current_item = eval('ItemTrueNameMap[%s]' % item)\r\n current_item = eval(item)\r\n\r\n elif l.startswith('SetScenarioFlags'):\r\n\r\n scenaflags = getparam(l)\r\n\r\n elif l.find('\"宝箱里什么') != -1:\r\n\r\n if current_item == '':\r\n current_item = '耀晶片'\r\n\r\n TriggerX, TriggerZ, TriggerY, TriggerRange, ActorX, ActorZ, ActorY, Flags, TalkScenaIndex, TalkFunctionIndex, Unknown_22 = lookupactor(actor, func.index(current_func))\r\n\r\n item = ItemInfo()\r\n item.Offset = int(scenaflags[0], 16)\r\n item.Bit = int(scenaflags[1], 10)\r\n item.Item = current_item\r\n item.Line = i + 1\r\n item.File = file\r\n\r\n item.TriggerX = int(TriggerX)\r\n item.TriggerZ = int(TriggerZ)\r\n item.TriggerY = int(TriggerY)\r\n item.TriggerRange = int(TriggerRange)\r\n item.ActorX = int(ActorX)\r\n item.ActorZ = int(ActorZ)\r\n item.ActorY = int(ActorY)\r\n item.TalkScenaIndex = int(TalkScenaIndex)\r\n item.TalkFunctionIndex = int(TalkFunctionIndex)\r\n\r\n\r\n if not MakeScenarioFlags(item.Offset, item.Bit) in global_flags_map:\r\n info.Items.append(item)\r\n global_flags_map[MakeScenarioFlags(item.Offset, item.Bit)] = True\r\n else:\r\n print('0x%X, %d, %s: %s' % (item.Offset, item.Bit, mapname, current_item))\r\n\r\n current_item = ''\r\n scenaflags = None\r\n\r\n items = []\r\n\r\n for file in sorted(scena_info):\r\n info = scena_info[file]\r\n for item in info.Items:\r\n items.append((info, item))\r\n\r\n items = sorted(items, key = lambda item : MakeScenarioFlags(item[1].Offset, item[1].Bit))\r\n\r\n lines = []\r\n num = 0\r\n\r\n lines.append('[')\r\n\r\n lines.append(' {')\r\n lines.append(' \"SavePath\" : \"J:\\\\\\\\Falcom\\\\\\\\ED_AO\\\\\\\\savedata\"')\r\n lines.append(' },')\r\n lines.append('')\r\n\r\n for info, item in items:\r\n #lines.append(' \"item_%X_%d\" : {' % (item.Offset, item.Bit))\r\n lines.append(' {')\r\n\r\n itemid = 'item_%X_%d' % (item.Offset, item.Bit)\r\n\r\n lines.append(' \"ID\" : \"%s\",' % itemid)\r\n lines.append(' \"Map\" : \"%s\",' % info.MapName)\r\n lines.append(' \"Offset\" : \"0x%X\",' % item.Offset)\r\n lines.append(' \"Bit\" : %s,' % item.Bit)\r\n lines.append(' \"Item\" : \"%s\",' % item.Item)\r\n lines.append(' \"Line\" : %s,' % item.Line)\r\n lines.append(' \"File\" : \"%s\",' % item.File)\r\n\r\n lines.append(' \"TriggerX\" : %s,' % item.TriggerX)\r\n lines.append(' \"TriggerZ\" : %s,' % item.TriggerZ)\r\n lines.append(' \"TriggerY\" : %s,' % item.TriggerY)\r\n lines.append(' \"TriggerRange\" : %s,' % item.TriggerRange)\r\n lines.append(' \"ActorX\" : %s,' % item.ActorX)\r\n lines.append(' \"ActorZ\" : %s,' % item.ActorZ)\r\n lines.append(' \"ActorY\" : %s,' % item.ActorY)\r\n lines.append(' \"TalkScenaIndex\" : %s,' % item.TalkScenaIndex)\r\n lines.append(' \"TalkFunctionIndex\" : %s,' % item.TalkFunctionIndex)\r\n\r\n lines.append(' \"Description\" : \"%s\",' % '')\r\n lines.append(' \"Screenshot\" : \"Screenshot\\\\\\\\%s.png\",' % itemid)\r\n\r\n\r\n lines[-1] = lines[-1][:-1]\r\n lines.append(' },')\r\n lines.append('')\r\n\r\n lines.pop(-1)\r\n lines[-1] = lines[-1][:-1]\r\n lines.append(']')\r\n lines.append('')\r\n\r\n #lines.insert(1, ' \"NumberOfItems\" : %d,\\r\\n' % len(items))\r\n\r\n open('box.json', 'wb').write('\\r\\n'.join(lines).encode('U16'))\r\n\r\nTry(main)\r\n","repo_name":"Ouroboros/Falcom","sub_path":"ED7/Decompiler/GameData/Gen_BonusBox.py","file_name":"Gen_BonusBox.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"61"} +{"seq_id":"1048692777","text":"''' Libraries '''\nimport os\nimport time\nimport random\nimport logging\nrobot_logger = logging.getLogger(name=\"robot\")\nimport winsound\nfrom datetime import datetime\nfrom colorama import init\ninit(convert=True)\n\nfrom utils.exceptions import *\nfrom ntnu.model import Agent\nfrom database.model import UserObject, CourseObject, OrderObject\nfrom database.model import db\n\n\n\n''' Functions '''\ndef SLEEP_TIME():\n return 6 + (random.random()**1.5)*4\n\n\ndef beep_sound():\n for _ in range(5):\n winsound.Beep(800, 800)\n time.sleep(0.2)\n return\n\n\ndef get_user_based_orders():\n db.session.commit() # Use to refresh session\n orders_all = OrderObject.query.filter_by(status=\"activate\").all()\n course_ids = list(set([ order.course_id for order in orders_all ]))\n user_id2level = { user.id: user.level for user in UserObject.query.all() }\n orders_first = [ sorted(\n sorted(\n list(filter(lambda o: o.course_id == cid, orders_all)),\n key=lambda o: o.activate_time\n ),\n key=lambda o: user_id2level[o.user_id], reverse=True\n )[0]\n for cid in course_ids ]\n users = UserObject.query.all()\n user_based_orders = [{\n \"user\" : user,\n \"orders\": list(filter(lambda o: o.user_id == user.id, orders_first)),\n } for user in users ]\n return user_based_orders\n\n\ndef main_controller():\n\n robot_logger.info(\"Main controller thread starts.\")\n\n main_agent_user_id = int(os.environ.get(\"MAIN_AGENT_USER_ID\"))\n main_agent = UserObject.query.filter_by(id=main_agent_user_id).first()\n main_agent = Agent(main_agent.student_id, main_agent.original_password)\n\n user_id2level = { user.id: user.level for user in UserObject.query.all() }\n search_turn = 0\n user_counter = 0\n while True:\n\n if int(datetime.now().strftime(\"%H\")) >= 9:\n\n user_based_orders = get_user_based_orders()\n user_order = user_based_orders[user_counter % len(user_based_orders)]\n\n if len(user_order[\"orders\"]) > 0:\n\n robot_logger.info(f\"Now search_turn: {search_turn}, user_counter: {user_counter},\" + \\\n f\"total user amount: {len(user_based_orders)},\" + \\\n f\"user_based_orders amount: {len(list(filter(lambda o: len(o['orders']) != 0, user_based_orders)))}\")\n\n order = user_order[\"orders\"][search_turn % len(user_order[\"orders\"])]\n user = user_order[\"user\"]\n course = CourseObject.query.filter_by(id=order.course_id).first()\n robot_logger.info(f\"Main agent: Checking vacancy of order from user '{user.student_id}' ({user.name}) of course {course.course_no}.\")\n try:\n vacant = main_agent.check_course(course.course_no)\n except Exception as ex:\n robot_logger.error(f\"Main agent: Error while checking vacancy of course {course.course_no}: {str(ex)}\")\n vacant = False\n\n while True:\n if vacant:\n print('\\033[1;32m', end='')\n robot_logger.info(f\"Main agent: Order from user '{user.student_id}' ({user.name}) of course {course.course_no} has vacancy!\")\n sub_agent = Agent(user.student_id, user.original_password)\n robot_logger.info(f\"Sub agent '{user.student_id}' ({user.name}): Taking course {course.course_no}!\")\n try:\n result = sub_agent.take_course(course.course_no, order.domain, sub_agent.user.year)\n robot_logger.info(f\"Sub agent '{user.student_id}' ({user.name}): Result of taking course {course.course_no}: {result}.\")\n except PasswordWrongException:\n result = \"密碼錯誤,加選失敗\"\n robot_logger.info(f\"Sub agent '{user.student_id}' / '{user.original_password}' ({user.name}): Error while taking course {course.course_no}: Password wrong.\")\n except Exception as ex:\n result = ''\n robot_logger.error(f\"Sub agent '{user.student_id}' / '{user.original_password}' ({user.name}): Error while taking course {course.course_no}: {str(ex)}\")\n beep_sound()\n\n if \"成功\" in result:\n order.update_status(\"successful\")\n if sub_agent.user.line_uid is not None:\n response = sub_agent.line_notify(course)\n if response.ok:\n robot_logger.info(f\"Sending LINE Notification to User '{user.student_id}' ({user.name}): Success.\")\n else:\n robot_logger.warning(f\"Sending LINE Notification to User '{user.student_id}' ({user.name}): Failure because of {response.text}.\")\n else:\n robot_logger.info(f\"User '{user.student_id}' ({user.name}) did not link LINE Notification.\")\n\n elif \"錯誤\" in result or \"失敗\" in result:\n order.update_status(\"pause\", reason=result)\n if sub_agent.user.line_uid is not None:\n response = sub_agent.line_notify(course, successful=False, message=result)\n if response.ok:\n robot_logger.info(f\"Sending LINE Notification to User '{user.student_id}' ({user.name}): Success.\")\n else:\n robot_logger.warning(f\"Sending LINE Notification to User '{user.student_id}' ({user.name}): Failure because of {response.text}.\")\n else:\n robot_logger.info(f\"User '{user.student_id}' ({user.name}) did not link LINE Notification.\")\n\n # Still vancant --> Give to next user who wants this course\n db.session.commit() # Use to refresh session\n orders = OrderObject.query.filter_by(status=\"activate\").filter_by(course_id=order.course_id).all()\n if len(orders) == 0:\n break\n orders = sorted(orders, key=lambda o: o.activate_time)\n order = sorted(orders, key=lambda o: user_id2level[o.user_id])[0]\n user = UserObject.query.filter_by(id=order.user_id).first()\n \n sleep_time = SLEEP_TIME()\n # print(f\"Sleep for {sleep_time} seconds.\")\n time.sleep(sleep_time)\n continue\n\n break\n\n else:\n break\n\n print('\\033[1;37m', end='')\n sleep_time = SLEEP_TIME()\n # print(f\"Sleep for {sleep_time} seconds.\")\n time.sleep(sleep_time)\n\n user_counter += 1\n \n if user_counter == len(user_based_orders):\n user_counter = 0\n search_turn += 1\n \n else:\n time.sleep(60)","repo_name":"aisu-programming/NTNU-Course-Queuing-Website-BackEnd","sub_path":"ntnu/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"23605803441","text":"import collections\r\n\r\nsrc = open(\"C-small-attempt0.in\", \"r\")\r\nout = open(\"C-small-results.in\", \"w\")\r\n\r\nt = int(src.readline())\r\n\r\nfor i in range(1, t + 1):\r\n r, k, n = list(map(int, src.readline().split()))\r\n gg = list(reversed(list(map(int, src.readline().split()))))\r\n q = collections.deque(gg, maxlen=n)\r\n euros = 0\r\n\r\n for rr in range(r):\r\n fill = 0\r\n rotations = 0\r\n while fill + q[-1] <= k and rotations < n:\r\n fill += q[-1]\r\n rotations += 1\r\n q.rotate()\r\n euros += fill\r\n\r\n ans = \"Case #{0}: {1}\".format(i, euros)\r\n if i == t:\r\n out.write(ans)\r\n else:\r\n out.write(ans+'\\n')\r\n\r\nout.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_55/560.py","file_name":"560.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30557325525","text":"from selenium import webdriver\n\n# PhantomJS 드라이버 추출\nbrowser = webdriver.PhantomJS()\n# 3초 대기하기\nbrowser.implicitly_wait(3)\n\n# url 읽어 들이기\nbrowser.get(\"http://nid.naver.com/nidlogin.login\")\n# 로그인\nelement_id = browser.find_element_by_id(\"id\")\nelement_id.clear()\nelement_id.send_keys(\"ID\")\nelement_pw = browser.find_element_by_id(\"pw\")\nelement_pw.clear()\nelement_pw.send_keys(\"PW\")\n\n# 화면을 캡처해서 저장하기\nbrowser.save_screenshot(\"Website_C.png\")\n\nbutton = browser.find_element_by_css_selector(\"input.btn_global[type=submit]\")\nbutton.submit()\n\n#메일페이지 열기\nbrowser.get(\"https://mail.naver.com/\")\nbrowser.save_screenshot(\"Website_D.png\")\ntitles = browser.find_elements_by_css_selector(\"strong.mail_title\")\n\nfor title in titles:\n print(title.text)\n\n# 브라우저 종료하기\nbrowser.quit()","repo_name":"hanwjdgh/Docker-M-D-learning","sub_path":"web/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6209629808","text":"import logging\n\nlogging.basicConfig(level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_report_portal_logging():\n \"\"\"An example test with shows how to use standard python logger.\"\"\"\n logger.info(\"Standard logger logs to Report Portal\")\n assert True\n\n\ndef test_launch_url_get(rp_logger, rp_launch_id, rp_endpoint, rp_project):\n \"\"\"\n This is a test which gets Launch ID, Report Portal endpoint and Project\n Name from fixtures in `conftest.py` and prints the result in logs, both: in\n console and on Report Portal\n \"\"\"\n rp_logger.info(\"Got launch ID: %s\", rp_launch_id)\n rp_logger.info(\"Launch URL: %s/ui/#%s/launches/all/%s\", rp_endpoint,\n rp_project, rp_launch_id)\n assert True\n\n\ndef test_attachment_logging(rp_logger):\n \"\"\"This is a test which logs an image as byte array.\"\"\"\n rp_logger.info(\"Log as byte array\", attachment={\n \"name\": 'lucky_pug.jpg',\n \"data\": open('res/pug/lucky.jpg', 'rb').read(),\n \"mime\": 'image/jpeg',\n })\n assert True\n","repo_name":"HardNorth/rp-pytest-integrations","sub_path":"tests/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72906778435","text":"from typing import Optional\nfrom pydantic import BaseModel, validator\n\n\nclass Curso(BaseModel):\n id: Optional[int] = None\n titulo: str\n aulas: int\n horas: int\n\n @validator('titulo')\n def validar_titulo(cls, value: str) -> str:\n if len(value.split(' ')) < 3:\n raise ValueError('O título deve ter pelo menos 3 palavras')\n\n if value.islower():\n raise ValueError('O título deve ser capitalizado')\n\n return value\n\n @validator('aulas')\n def validar_aulas(cls, value: int) -> int:\n if value < 12:\n raise ValueError('O curso deve ter um mínimo de 12 aulas')\n\n return value\n \n @validator('horas')\n def validar_horas(cls, value: int) -> int:\n if value < 10:\n raise ValueError('O curso deve ter pelo menos 10 horas')\n \n return value\n\n\ncursos = [\n Curso(id=1, titulo='Programação para Leigos', aulas=42, horas=56),\n Curso(id=2, titulo='Algoritmos e Lógica de Programação', aulas=52, horas=66),\n]\n","repo_name":"RogerioLimas/FastAPI_-_APIs_Modernas_e_Assincronas_com_Python","sub_path":"secao03/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15645730169","text":"\"\"\"Centrality based source detection methods.\"\"\"\nfrom collections import Counter\nfrom typing import Dict, Optional, Union\n\nfrom networkx import Graph\n\nfrom rpasdt.algorithm.centralities import (\n compute_centrality,\n compute_unbiased_centrality,\n)\nfrom rpasdt.algorithm.models import (\n CentralityBasedSourceDetectionConfig,\n CentralityCommunityBasedSourceDetectionConfig,\n MultipleCentralityBasedSourceDetectionConfig,\n UnbiasedCentralityBasedSourceDetectionConfig,\n UnbiasedCentralityCommunityBasedSourceDetectionConfig,\n)\nfrom rpasdt.algorithm.source_detectors.common import (\n CommunityBasedSourceDetector,\n SourceDetector,\n)\n\n\nclass CentralityBasedSourceDetector(SourceDetector):\n CONFIG_CLASS = CentralityBasedSourceDetectionConfig\n\n def estimate_sources(self) -> Dict[int, Union[float, Dict[int, float]]]:\n return compute_centrality(type=self.config.centrality_algorithm, graph=self.IG)\n\n\nclass MultipleCentralityBasedSourceDetector(SourceDetector):\n CONFIG_CLASS = MultipleCentralityBasedSourceDetectionConfig\n\n def estimate_sources(self) -> Dict[int, Union[float, Dict[int, float]]]:\n sums = Counter()\n for alg in self.config.centrality_algorithms:\n sums.update(compute_centrality(type=alg, graph=self.IG))\n return {\n node: value / len(self.config.centrality_algorithms)\n for node, value in sums.items()\n }\n\n\nclass UnbiasedCentralityBasedSourceDetector(SourceDetector):\n CONFIG_CLASS = UnbiasedCentralityBasedSourceDetectionConfig\n\n def estimate_sources(self) -> Dict[int, Union[float, Dict[int, float]]]:\n return compute_unbiased_centrality(\n type=self.config.centrality_algorithm, r=self.config.r, graph=self.IG\n )\n\n\nclass CentralityCommunityBasedSourceDetector(CommunityBasedSourceDetector):\n CONFIG_CLASS = CentralityCommunityBasedSourceDetectionConfig\n\n def __init__(\n self,\n G: Graph,\n IG: Graph,\n config: Optional[CentralityCommunityBasedSourceDetectionConfig] = None,\n ):\n super().__init__(G, IG, config)\n\n def find_sources_in_community(self, graph: Graph):\n return compute_centrality(type=self.config.centrality_algorithm, graph=graph)\n\n def __str__(self) -> str:\n return f\"Centrality based source detector: {self.config.centrality_algorithm}\"\n\n\nclass UnbiasedCentralityCommunityBasedSourceDetector(\n CentralityCommunityBasedSourceDetector\n):\n CONFIG_CLASS = UnbiasedCentralityCommunityBasedSourceDetectionConfig\n\n def find_sources_in_community(self, graph: Graph):\n return compute_unbiased_centrality(\n type=self.config.centrality_algorithm, graph=graph, r=self.config.r\n )\n","repo_name":"damianfraszczak/rpasdt","sub_path":"src/rpasdt/algorithm/source_detectors/centrality.py","file_name":"centrality.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"23587202091","text":"T = int(input())\nfor case in range(1, T+1):\n ac, aj = map(int, input().split())\n tc = [list(map(int, input().split())) for _ in range(ac)]\n tj = [list(map(int, input().split())) for _ in range(aj)]\n tj.sort()\n tc.sort()\n if ac > aj:\n ac, aj = aj, ac\n tc, tj = tj, tc\n if [ac, aj] in ([0, 0], [0, 1], [1, 0], [1, 1]):\n result = 2\n elif ac == 0 and aj == 2:\n # print(tj)\n if (tj[1][0] - tj[0][1]) >= 24*30 or (tj[0][0] - tj[1][1]+24*60) >= 24*30:\n result = 2\n else:\n result = 4\n elif ac == 1 and aj == 2:\n if tj[0][1] <= tc[1][0] <= tj[1][0]:\n\n if (tj[1][0] - tj[0][1]) >= 24*30:\n result = 2\n else:\n result = 4\n elif (tj[0][0] - tj[1][1]+24*60) >= 24*30:\n reuslt = 2\n else:\n result = 4\n result = \"?\"\n elif ac == aj == 2:\n result = \"?\"\n print(f\"Case #{case}: {result}\")\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/121.py","file_name":"121.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41147002861","text":"from objects.modulebase import ModuleBase\nfrom objects.permissions import (\n PermissionBotOwner, PermissionAddReactions, PermissionReadMessageHistory)\nfrom objects.paginators import Paginator, SelectionPaginator\n\nfrom discord import Activity, Status\n\nimport asyncio\n\n\nclass Module(ModuleBase):\n\n usage_doc = '{prefix}{aliases} [args...]'\n short_doc = 'Bot presence utils'\n long_doc = (\n 'Subcommands:\\n'\n '\\tplaying - set playing activity\\n'\n '\\tstreaming - set streaming activity\\n'\n '\\tlistening - set listening activity\\n'\n '\\twatching - set watching activity\\n'\n '\\tlist - show all activities\\n'\n '\\tremove - remove activity from list\\n\\n'\n 'Flags:\\n'\n '\\t[--status|-s] : select status (online, dnd, etc)\\n'\n '\\t[--interval|-i]