diff --git "a/1988.jsonl" "b/1988.jsonl" new file mode 100644--- /dev/null +++ "b/1988.jsonl" @@ -0,0 +1,901 @@ +{"seq_id":"75035580502","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\n\nfrom modules.common import Common, GameStatus\nfrom modules.game import Game\nfrom modules.team import Team\nfrom modules.player import Player\n\n\nclass EspnScraper(object):\n BASE_NBA_LINK = 'https://www.espn.com/nba/'\n CHROME_HEADERS = {\n 'User-Agent': \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Mobile Safari/537.36\"}\n\n def __init__(self, link=None):\n self.scoreboards_link = link if link else EspnScraper.BASE_NBA_LINK + 'scoreboard/'\n self.boxscore_link = EspnScraper.BASE_NBA_LINK + 'boxscore'\n self.games = []\n self.game_ids = []\n\n def populate_games(self, print_summary=False, include_completed=True):\n scores = requests.get(\n self.scoreboards_link, headers=EspnScraper.CHROME_HEADERS).text\n\n soup = BeautifulSoup(scores, 'lxml')\n for scoreboard in soup.find_all(class_='scoreboard'):\n game_id = scoreboard.get('id')\n if game_id:\n self.game_ids.append(game_id)\n\n for idx, game_id in enumerate(self.game_ids):\n print(\"Collecting game: \" + str(idx+1))\n game, status = self.parse_game(self.get_game(\n game_id), include_completed=include_completed)\n if game:\n self.games.append(game)\n if print_summary:\n game.summarize_points()\n elif status == GameStatus.unknown:\n print(\"Game \" + str(idx+1) +\n \" has not started\\n\")\n elif status == GameStatus.ended:\n print(\"Game \" + str(idx+1) +\n \" has ended\\n\")\n elif status == GameStatus.postponed:\n print(\"Game \" + str(idx+1) +\n \" has been postponed\\n\")\n\n def get_game(self, id) -> BeautifulSoup:\n game = requests.get(\n (self.boxscore_link + \"?gameId={}\".format(id)), headers=EspnScraper.CHROME_HEADERS).text\n\n soup = BeautifulSoup(game, 'lxml')\n return soup\n\n def get_teams(self, game) -> list:\n return game.find_all(\"div\", {\"class\": re.compile('.*gamepackage-.*-wrap.*')})\n\n def parse_player(self, player, team_name) -> Player:\n name = player.find(class_=\"abbr\")\n points = player.find(class_=\"pts\")\n assists = player.find(class_=\"ast\")\n rebounds = player.find(class_=\"reb\")\n minutes = player.find(class_=\"min\")\n fg = player.find(class_=\"fg\")\n three_pt = player.find(class_=\"3pt\")\n return Player({\n \"team\": team_name,\n \"name\": name.text if name else \"\",\n \"points\": int(points.text) if points and points.text.isnumeric() else 0,\n \"assists\": int(assists.text) if assists and assists.text.isnumeric() else 0,\n \"rebounds\": int(rebounds.text) if rebounds and rebounds.text.isnumeric() else 0,\n \"minutes\": int(minutes.text) if minutes and minutes.text.isnumeric() else 0,\n \"fg\": fg.text if fg and re.match(\"[0-9]+-[0-9]+\", fg.text) else \"0-0\",\n \"3pt\": three_pt.text if three_pt and re.match(\"[0-9]+-[0-9]+\", three_pt.text) else \"0-0\"\n })\n\n def parse_team(self, team) -> Team:\n team_name = team.find(\n \"div\", {\"class\": re.compile('.*team-name.*')}).text\n tbl_bodies = team.find(\n \"table\", {\"class\": re.compile('.*mod-data.*')}).find_all(\"tbody\")\n team_highlights = tbl_bodies[-1].find(\"tr\", {\"class\": \"highlight\"})\n points = int(team_highlights.find(class_='pts').text)\n starters = [self.parse_player(player, team_name)\n for player in tbl_bodies[0].findChildren(\"tr\")]\n bench_players = [self.parse_player(player, team_name)\n for player in tbl_bodies[-1].findChildren(\"tr\", class_=lambda x: x != \"highlight\")]\n players = starters + bench_players\n\n return Team({\n \"name\": team_name,\n \"points\": points,\n \"players\": players\n })\n\n def parse_game(self, game, include_completed=True) -> (Game, GameStatus):\n game_details = game.find(class_='status-detail').text\n if not game_details:\n return None, GameStatus.unknown\n elif game_details.startswith(\"End of\"):\n game_details = game_details.replace(\"End of\", \"0:00 -\")\n elif game_details.startswith(\"Start of\"):\n game_details = game_details.replace(\"Start of\", \"12:00 -\")\n elif game_details == \"Halftime\":\n game_details = \"0:00 - 2nd\"\n elif game_details == \"Postponed\":\n return None, GameStatus.postponed\n elif game_details == \"Final\":\n if not include_completed:\n return None, GameStatus.ended\n game_details = \"0:00 - 4th\"\n game_details = game_details.split(\" - \")\n time_left = game_details[0]\n quarter = game_details[1]\n teams = [self.parse_team(team) for team in self.get_teams(game)]\n return Game({\n \"teams\": teams,\n \"quarter\": quarter,\n \"time_left\": time_left\n }), GameStatus.running\n","repo_name":"GurmeharS/WhenToWatch","sub_path":"modules/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":5229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"20643587452","text":"import cv2 # for computer vision.\r\nimport numpy as np # for scientific computing with Python\r\nimport pandas as pd # for data analysis\r\nimport requests\r\nimport json\r\nimport os\r\nimport smtplib # for sending emails\r\nfrom email import encoders # for encoding email attachments\r\nfrom email.mime.base import MIMEBase # for the implementation of MIME (Multipurpose Internet Mail Extensions)\r\nfrom email.mime.multipart import MIMEMultipart # A class to represent a MIME Multipart message, as used in email\r\nfrom email.mime.text import MIMEText # A class to represent plain text in email messages\r\n\r\ndef recognize_plate(image_path):\r\n # Set your API key and endpoint URL\r\n API_KEY = 'API KEY'\r\n API_URL = 'https://api.platerecognizer.com/v1/plate-reader/'\r\n\r\n # Read the image file as binary data\r\n with open(image_path, 'rb') as image_file:\r\n image_data = image_file.read()\r\n\r\n # Make the API request\r\n response = requests.post(API_URL,\r\n headers={'Authorization': f'Token {API_KEY}'},\r\n files={'upload': image_data})\r\n\r\n try:\r\n # Parse the JSON response\r\n results = json.loads(response.text)\r\n\r\n # Extract license plate information if available\r\n if 'results' in results:\r\n plate_info = results['results'][0]\r\n #return f'License plate: {plate} (confidence {confidence})'\r\n return plate\r\n\r\n else:\r\n return 'No license plates found in image.'\r\n except:\r\n if response.status_code == 201:\r\n return 'Number Plate Text Not Recognized'\r\n else:\r\n # Handle any errors that occurred during the API request or response parsing\r\n return 'Error: Unknown Error'\r\n\r\ndef send_email(to_email, subject, body, attachment):\r\n\r\n from_email = \"EMAIL\" # Defining the sender email\r\n password = \"PASSWORD\" # Defining the sender password\r\n\r\n msg = MIMEMultipart() # Creating a MIME object\r\n msg['Subject'] = subject # Adding the subject of the email to the message\r\n msg.attach(MIMEText(body, 'plain')) # Adding the body of the email to the message\r\n\r\n # Opening the attachment to be sent\r\n with open(attachment, \"rb\") as f:\r\n # Creating a MIME object for the attachment\r\n attach = MIMEBase('application', 'octet-stream', Name=attachment)\r\n\r\n # Setting the payload of the attachment\r\n attach.set_payload((f).read())\r\n\r\n encoders.encode_base64(attach) # Encoding the attachment\r\n\r\n # Adding headers to the attachment\r\n attach.add_header('Content-Decomposition', 'attachment', filename=attachment)\r\n msg.attach(attach) # Attaching the attachment to the message\r\n\r\n try:\r\n # Connecting to the Gmail SMTP server\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n\r\n # Starting the encrypted connection\r\n server.starttls()\r\n # Login to the email account\r\n server.login(from_email, password)\r\n\r\n # Sending the email\r\n server.sendmail(from_email, to_email, text)\r\n # Quitting the SMTP server\r\n server.quit()\r\n\r\n # Printing a success message\r\n print(\"Email sent to \" + to_email)\r\n except Exception as e:\r\n # Printing an error message if an error occurred while sending the email\r\n return(f\"An error occurred while sending the email: {e}\")\r\n return \"netwoking issue\"\r\n","repo_name":"rahulmuggalla/Helmet-Detection-with-License-Plate-Recognition-and-Email-Notification-System","sub_path":"mail_db_loop.py","file_name":"mail_db_loop.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"17505909410","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom django.http import HttpResponse\nfrom django.template import loader\nfrom plan.models import Schedule\n\n\nclass PageHandler:\n\n @staticmethod\n def get_home_view(request):\n template = loader.get_template('base_home.html')\n http_response = HttpResponse(template.render({}, request))\n return http_response\n\n @staticmethod\n def get_program_view(request):\n template = loader.get_template('base_program.html')\n http_response = HttpResponse(template.render({}, request))\n return http_response\n\n @staticmethod\n def get_schedule_view(request):\n template = loader.get_template('base_schedule.html')\n\n schedule_id = request.GET.get(\"id\")\n context = {\"schedule_id\": schedule_id}\n if request.user.is_authenticated:\n schedules = Schedule.objects.filter(user=request.user)\n context[\"schedules\"] = schedules\n\n if schedule_id is not None:\n try:\n context[\"requested_schedule\"] = schedules.get(id=schedule_id)\n except ObjectDoesNotExist:\n context[\"requested_schedule\"] = \"Not found\"\n\n http_response = HttpResponse(template.render(context, request))\n return http_response\n\n @staticmethod\n def get_schedule_add_view(request):\n template = loader.get_template('base_schedule_add.html')\n http_response = HttpResponse(template.render({}, request))\n return http_response\n\n @staticmethod\n def get_account_view(request):\n template = loader.get_template('base_account.html')\n http_response = HttpResponse(template.render({}, request))\n return http_response\n","repo_name":"AntonNiko/ProgramPlanner","sub_path":"plan/handlers/page_handler.py","file_name":"page_handler.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"86580190323","text":"from collections import deque\n\ndef solution(_a, _b) -> int:\n queue = deque()\n queue.append((_a, 1))\n while queue:\n num, count = queue.popleft()\n next_num = [10*num+1, 2*num]\n for _next_n in next_num:\n if _next_n == _b : return count + 1\n if _next_n < _b :\n queue.append((_next_n, count+1))\n return -1\n\n\n\na, b = map(int, input().split())\nanswer = solution(a, b)\nprint(answer)\n","repo_name":"SAlgorithmStudy6/AlgorithmStudy","sub_path":"2022/08.11/이지윤/[BOJ_16953]A_B.py","file_name":"[BOJ_16953]A_B.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"74989676819","text":"import tkinter\nfrom tkinter import *\nfrom typing import Callable, List\nfrom PIL import Image, ImageTk\nimport os\nfrom abc import ABC, abstractmethod\n\nfrom loss_counter import read_record\n\n\ndef load_img(label: tkinter.Label, img_path: str):\n image = Image.open(img_path)\n img = image.resize((350, 290))\n photo = ImageTk.PhotoImage(img)\n label.config(image=photo)\n label.image = photo\n\n\nclass EpochVis(ABC):\n @abstractmethod\n def epoch_update(self, epoch: int):\n pass\n\n\nclass ImgRecordVis(EpochVis):\n def __init__(self,\n frame_win: Tk,\n vis_name: str,\n img_dir: str,\n name_filter: Callable[[str], bool],\n name2epoch: Callable[[str], int]\n ):\n self.img_dir = img_dir\n self.name_list = list(filter(name_filter, os.listdir(img_dir)))\n self.available_epoch_list = [name2epoch(name) for name in self.name_list]\n info_frame = Frame(frame_win)\n info_frame.grid(row=0, column=0)\n name = Label(info_frame, text=vis_name)\n name.grid(row=0, column=0)\n self.label_var = StringVar()\n self.epoch_label = Label(info_frame, textvariable=self.label_var)\n self.epoch_label.grid(row=0, column=1)\n self.img_label = Label(frame_win)\n self.img_label.grid(row=1, column=0)\n\n def epoch_update(self, epoch: int):\n if epoch not in self.available_epoch_list:\n return\n self.label_var.set(f'Epoch {epoch}')\n idx = self.available_epoch_list.index(epoch)\n img_name = self.name_list[idx]\n img_path = os.path.join(self.img_dir, img_name)\n load_img(self.img_label, img_path)\n\n\nclass TextRecordVis(EpochVis):\n def __init__(self,\n frame_win: Frame,\n vis_name: str,\n record_dir: str,\n keys: List[str],\n alias: List[str],\n ):\n assert len(keys) == len(alias), \"Error: len(keys) != len(alias)\"\n self.keys = keys\n self.records = read_record(record_dir)\n self.available_epoch_list = self.records[keys[0]].X\n name = Label(frame_win, text=vis_name)\n name.pack(side=TOP)\n\n epoch_frame = Frame(frame_win)\n epoch_frame.pack(side=TOP)\n self.epoch_var = StringVar()\n self.epoch_label = Label(frame_win, textvariable=self.epoch_var)\n\n self.index_list_frame = Frame(frame_win)\n self.index_list_frame.pack(side=TOP)\n l_name_list, l_value_list, self.var_list = self.init_index_list(self.index_list_frame, alias)\n\n def init_index_list(self, frame_win, alias):\n l_name_list = []\n l_value_list = []\n var_list = []\n for i in range(len(alias)):\n l_name = Label(frame_win, text=f'{alias[i]}: ')\n l_name.grid(row=i, column=0)\n l_name_list.append(l_name)\n\n var = StringVar(value=str(0.0))\n var_list.append(var)\n\n l_value = Label(frame_win, textvariable=var)\n l_value.grid(row=i, column=1)\n l_value_list.append(l_value)\n return l_name_list, l_value_list, var_list\n\n def epoch_update(self, epoch: int):\n if epoch not in self.available_epoch_list:\n return\n idx = self.available_epoch_list.index(epoch)\n self.epoch_var.set(str(epoch))\n for i in range(len(self.keys)):\n key = self.keys[i]\n value = self.records[key].Y[idx]\n self.var_list[i].set(str(value))\n\n\nclass EpochBar:\n def __init__(self,\n frame_win,\n epoch_start: int,\n epoch_end: int,\n epoch_tick: int,\n on_epoch_change: Callable):\n self.epoch_var = IntVar(value=0)\n self.epoch_bar = Scale(\n frame_win,\n orient=HORIZONTAL,\n from_=epoch_start,\n to=epoch_end,\n resolution=epoch_tick,\n tickinterval=epoch_tick*10,\n command=on_epoch_change,\n length=1500,\n )\n self.epoch_bar.pack(side=LEFT)\n\n\nclass NamePanel:\n def __init__(self,\n win,\n names: List[str],\n ):\n self.name_labels = []\n for text in names:\n label = Label(win, text=text, width=20, wraplength=100)\n label.pack(side=TOP)\n self.name_labels.append(label)\n\n\nclass DisplayPanel:\n def __init__(self,\n win: Tk,\n exp_name_list: List[List[str]],\n epoch_vis_creator_list: List[List[Callable[[Frame], EpochVis]]],\n epoch_bar_creator: Callable[[Frame, Callable], EpochBar],\n ):\n self.exp_name_list = exp_name_list\n self.epoch_vis_creator_list = epoch_vis_creator_list\n self.epoch_vis_frame = Frame(win)\n self.epoch_vis_frame.pack(side=TOP)\n self.widget_list = self.make_rows()\n epoch_bar_frame = Frame(win)\n epoch_bar_frame.pack(side=TOP)\n self.epoch_bar = epoch_bar_creator(epoch_bar_frame, self.on_epoch_change)\n\n def make_rows(self):\n widget_list = []\n for i in range(0, len(self.epoch_vis_creator_list)):\n name_frame = Frame(self.epoch_vis_frame)\n name_frame.grid(row=i, column=0)\n name_panel = NamePanel(name_frame, self.exp_name_list[i])\n for j in range(0, len(self.epoch_vis_creator_list[i])):\n epoch_vis_creator = self.epoch_vis_creator_list[i][j]\n widget_frame = Frame(self.epoch_vis_frame)\n widget_frame.grid(row=i, column=j+1)\n widget = epoch_vis_creator(widget_frame)\n widget_list.append(widget)\n return widget_list\n\n def on_epoch_change(self, epoch):\n int_epoch = int(epoch)\n for widget in self.widget_list:\n widget.epoch_update(int_epoch)\n\n\n\n\nif __name__ == '__main__':\n dir = \"D:/Projects/Gus Xia/S3Plus/VQ/exp/2023.04.20_100vq_Zc[1]_Zs[0]_edim8_plusUnit128.1_encFc128.1_singleS_plusOnE/1/EvalResults\"\n\n win = Tk()\n\n\n def name_filter(name: str):\n return name.split('.')[0].isdigit()\n\n\n def name2epoch(name: str):\n return int(name.split('.')[0])\n\n\n irv = ImgRecordVis(win, 'aa', dir, name_filter, name2epoch)\n print(irv.name_list)\n irv.epoch_update(11800)\n","repo_name":"XuanjieLiu/S3Plus","sub_path":"eval/record_vis.py","file_name":"record_vis.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22792469745","text":"import requests\nimport csv\nimport sys\nimport os\n\nimport time\n\n\ndef collect_data(symbol, interval):\n base_url = \"https://api.binance.com/api/v3/klines\"\n\n params = {\n \"symbol\": symbol,\n \"interval\": interval,\n \"limit\": 1000\n }\n response = requests.get(base_url, params=params)\n if response.status_code == 200:\n data = response.json()\n\n # Format data and save to CSV\n headers = [\"Open_Time\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\", \"Close Time\", \"Quote Asset Volume\",\n \"Number_of_Trades\", \"Taker_buy_volume\", \"Taker_Buy_Quote_Volume\", \"Ignore\"]\n\n directory = \"data\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n filename = f\"{symbol}_{interval}.csv\"\n\n with open(filename, \"w\", newline=\"\") as file:\n writer = csv.writer(file)\n writer.writerow(headers)\n writer.writerows(data)\n\n print(f\"Data collected and saved for {symbol} at {interval}\")\n else:\n print(\"Failed to fetch data from the Binance API.\")\n\n\nif __name__ == \"__main__\":\n symbol = input(\"Enter the symbol (e.g., BTCUSDT): \")\n interval = input(\"Enter the interval (e.g., 4h): \")\n collect_data(symbol, interval)\n","repo_name":"oluwasube/technical-challenge","sub_path":"data/collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34772626761","text":"from supervised.classification.decisionTreeClassifier import DecisionTreeClassifier\nfrom visualisations.color_palette import two_colors, two_colors_map\nfrom utils.metrics import accuracy\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.model_selection import train_test_split\n\nfrom matplotlib.animation import FuncAnimation, PillowWriter\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nimport numpy as np\nimport argparse\n\n\nmatplotlib.use(\"TkAgg\")\n\nif __name__ == '__main__':\n\n # Argument parsing.\n parser = argparse.ArgumentParser(description='Visualise a custom Decision Tree Classifier model in training.')\n parser.add_argument('--max_depth', type=int, help='Maximum depth of the tree.', default=15)\n parser.add_argument('--random_state', type=int, help='Random state for data generation.', default=42)\n parser.add_argument('--n_samples', type=int, help='Number of data points.', default=1000)\n parser.add_argument('--test_size', type=float, help='Test set size.', default=.2)\n args = parser.parse_args()\n\n # Parameters\n max_depth = args.max_depth\n\n X, y = make_classification(n_samples=args.n_samples, n_features=2, n_informative=2, n_redundant=0,\n random_state=args.random_state)\n\n # Split train and test data\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=args.test_size, random_state=42)\n\n # Model\n classifiers = [DecisionTreeClassifier(max_depth=i + 1, minimum_sample_leaf=1) for i in range(max_depth)]\n\n # Create decision boundary data\n h = 2\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n area_data = np.c_[xx.ravel(), yy.ravel()]\n\n # Fitting the models\n predictions_test = []\n predictions_train = []\n area_pred = []\n for clf in classifiers:\n clf.fit(X_train, y_train)\n predictions_test.append(clf.predict(X_test))\n predictions_train.append(clf.predict(X_train))\n area = np.array(clf.predict(area_data))\n area_pred.append(area.reshape(xx.shape))\n\n # Create the update function for the graph\n def update(i):\n plt.clf()\n plt.xlabel('Feature 1')\n plt.ylabel('Feature 2')\n fig.suptitle('Decision Tree Classifier', fontsize=20)\n plt.title(f'Decision Tree Depth: {i + 1} - '\n f'Accuracy Test: {round(100 * accuracy(y_test, predictions_test[i]), 2)}% '\n f'Accuracy Train: {round(100 * accuracy(y_train, predictions_train[i]), 2)}% '\n )\n plt.scatter(X[:, 0], X[:, 1], c=[two_colors[k] for k in y])\n plt.imshow(area_pred[i], interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n cmap=two_colors_map, aspect='auto', origin='lower', alpha=.4)\n\n # Plotting and saving the gif\n fig, ax = plt.subplots(figsize=(15, 6), dpi=80)\n animation = FuncAnimation(fig, update, frames=max_depth, interval=800, repeat=False)\n plt.xlabel('Feature 1')\n plt.ylabel('Feature 2')\n plt.show()","repo_name":"SaadChaouki/ml-eli5-cli5","sub_path":"visualisations/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"21243952330","text":"import os\nimport shutil\nimport time\n\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torchvision.models.optical_flow import raft_large\nfrom torchvision.models.optical_flow import Raft_Large_Weights\nfrom torchvision.utils import flow_to_image\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom src.agent.unit import UNIT\nfrom src.model.unet import UNet\nfrom src.utils.train import read_config\nfrom src.utils.datasets import get_dataloaders, denormalize\n\n# Read config, make log dirs etc.\nconf, uneasy_conf = read_config('src/config/OF_UNIT_CATARACTS_cataract101.yml')\nconf.device = 'cuda:2'\nconf.data.seq_frames_train = 2\nconf.training.batch_size = 4\nroot = '/gris/gris-f/homestud/yfrisch/swc/Temporal-Consistent-CycleGAN/'\n# root = '/home/yannik/Temporal-Consistent-CycleGAN/'\nif os.path.isdir(root + 'of_test/'):\n shutil.rmtree(root + 'of_test/')\nos.makedirs(root + 'of_test/', exist_ok=False)\n\ntrain_dl, test_dl = get_dataloaders(conf, shuffle_test=True)\n\nagent = UNIT(conf)\nagent.gen_A.load_state_dict(torch.load(\n root + 'results/UNIT_CATARACTS_Cataract101/2022_09_30-21_30_35/checkpoints/gen_A_epoch499.PTH', map_location='cpu'))\nagent.gen_B.load_state_dict(torch.load(\n root + 'results/UNIT_CATARACTS_Cataract101/2022_09_30-21_30_35/checkpoints/gen_B_epoch499.PTH', map_location='cpu'))\n\nunet = UNet(n_channels=3+3+3+3+2, n_classes=2).to(conf.device)\noptim = torch.optim.AdamW(list(unet.parameters()) +\n list(agent.gen_A.parameters()) +\n list(agent.gen_B.parameters()), lr=0.01)\nweights = Raft_Large_Weights.DEFAULT\ntransforms = weights.transforms()\nflo_model = raft_large(weights=weights, progress=False).to(conf.device)\nflo_model = flo_model.eval()\n\nfor epoch in range(10):\n\n loss_per_sample = []\n for id, sample in enumerate(tqdm(train_dl)):\n\n a1 = sample['A'][:, -2].to(conf.device)\n a2 = sample['A'][:, -1].to(conf.device)\n b1 = sample['B'][:, -2].to(conf.device)\n b2 = sample['B'][:, -1].to(conf.device)\n N, C, H, W = b2.shape\n\n optim.zero_grad()\n\n # Estimate flow a1 -> a2\n with torch.no_grad():\n a_flow = flo_model(a1, a2)[-1]\n\n # Translate a1 and a2\n h_a1, n_a1 = agent.gen_A.encode(a1)\n h_a2, n_a2 = agent.gen_A.encode(a2)\n ab1 = agent.gen_B.decode(h_a1 + n_a1)\n ab2 = agent.gen_B.decode(h_a2 + n_a2)\n\n # Translate motion\n translated_flow = unet(torch.cat([\n a1, a2, ab1, ab2, a_flow\n ], dim=1))\n\n # Warp translated image according to translated motion\n xx = torch.arange(0, W).view(1, -1).repeat(H, 1)\n yy = torch.arange(0, H).view(-1, 1).repeat(1, W)\n xx = xx.view(1, 1, H, W).repeat(N, 1, 1, 1)\n yy = yy.view(1, 1, H, W).repeat(N, 1, 1, 1)\n grid = torch.cat((xx, yy), 1).float().to(conf.device)\n b_vgrid = Variable(grid, requires_grad=True) - a_flow\n b_vgrid[:, 0, :, :] = 2.0 * b_vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0\n b_vgrid[:, 1, :, :] = 2.0 * b_vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0\n b_vgrid = b_vgrid.permute(0, 2, 3, 1)\n ab2_warped = F.grid_sample(ab1, b_vgrid, align_corners=True)\n ab_mask = Variable(torch.ones(ab1.size()), requires_grad=True).to(conf.device)\n ab_mask = F.grid_sample(ab_mask, b_vgrid)\n ab_mask[ab_mask < 0.9999] = 0\n ab_mask[ab_mask > 0] = 1\n ab2_warped = ab_mask * ab2_warped\n\n loss = F.mse_loss(ab2_warped, ab2)\n loss.backward()\n loss_per_sample.append(loss.item())\n optim.step()\n\n if id == 10:\n fig, ax = plt.subplots(N, 5)\n for n in range(N):\n ax[n, 0].imshow(denormalize(a1[n]).permute(1, 2, 0).detach().cpu())\n ax[n, 0].axis('off')\n ax[n, 1].imshow(flow_to_image(a_flow[n]).permute(1, 2, 0).detach().cpu())\n ax[n, 1].axis('off')\n ax[n, 2].imshow(denormalize(a2[n]).permute(1, 2, 0).detach().cpu())\n ax[n, 2].axis('off')\n ax[n, 3].imshow(denormalize(ab1[n]).permute(1, 2, 0).detach().cpu())\n ax[n, 3].axis('off')\n ax[n, 4].imshow(denormalize(ab2_warped[n]).permute(1, 2, 0).detach().cpu())\n ax[n, 4].axis('off')\n plt.savefig(root + f'of_test/ep{epoch}.png')\n break\n\n time.sleep(0.2)\n print(f\"Epoch {epoch} Avg. loss {np.mean(loss_per_sample)}\")\n time.sleep(0.2)\n","repo_name":"MECLabTUDA/TC-Seq2Seq","sub_path":"scripts/train/train_motion_translator.py","file_name":"train_motion_translator.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70055093781","text":"import constants\nfrom game.scripting.action import Action\nfrom game.shared.coordinate import Coordinate\n\n\nclass ControlPlayers(Action):\n # Controls the method of action of the players.\n\n def __init__(self, keyboard_service):\n\n self._keyboard_service = keyboard_service\n \n self._player_one_direction = Coordinate(0, -constants.CELL_SIZE)\n self._player_two_direction = Coordinate(0, -constants.CELL_SIZE)\n\n def execute(self, cast):\n\n # Player one keys\n\n # left\n if self._keyboard_service.press_key_down('a'):\n self._player_one_direction = Coordinate(-constants.CELL_SIZE, 0)\n\n # right\n if self._keyboard_service.press_key_down('d'):\n self._player_one_direction = Coordinate(constants.CELL_SIZE, 0)\n\n # up\n if self._keyboard_service.press_key_down('w'):\n self._player_one_direction = Coordinate(0, -constants.CELL_SIZE)\n\n # down\n if self._keyboard_service.press_key_down('s'):\n self._player_one_direction = Coordinate(0, constants.CELL_SIZE)\n\n cycle_one = cast.get_first_player(\"player_one\")\n cycle_one.turn_cycle(self._player_one_direction)\n\n # Player two keys\n\n # left\n if self._keyboard_service.press_key_down('j'):\n self._player_two_direction = Coordinate(-constants.CELL_SIZE, 0)\n\n # right\n if self._keyboard_service.press_key_down('l'):\n self._player_two_direction = Coordinate(constants.CELL_SIZE, 0)\n\n # up\n if self._keyboard_service.press_key_down('i'):\n self._player_two_direction = Coordinate(0, -constants.CELL_SIZE)\n\n # down\n if self._keyboard_service.press_key_down('k'):\n self._player_two_direction = Coordinate(0, constants.CELL_SIZE)\n\n cycle_two = cast.get_second_player(\"player_two\")\n cycle_two.turn_cycle(self._player_two_direction)\n","repo_name":"CalebHatch/Cycle-Game","sub_path":"cycle/game/scripting/control_players.py","file_name":"control_players.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1486230595","text":"#!/usr/bin/python3\n\"\"\"\nThis module contains the BaseModel class, that all other classes inherit from\n\"\"\"\nfrom datetime import datetime\nimport models\nfrom os import getenv\nfrom sqlalchemy import Column, Integer, String, Table, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nimport uuid\n\nBase = declarative_base()\n\nclass BaseModel:\n \"\"\"BaseModel to be inherited for all objects\"\"\"\n id = Column(String(60), primary_key=True, nullable=False)\n created_at = Column(DateTime(timezone=True), default=datetime.now(),\n nullable=False)\n updated_at = Column(DateTime(timezone=True), default=datetime.now(),\n nullable=False, onupdate=datetime.now)\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize class\n \"\"\"\n if args:\n kwargs = args[0]\n if kwargs:\n flag_id = False\n flag_created_at = False\n for k in kwargs.keys():\n if k == \"created_at\" or k == \"updated_at\":\n if k == \"created_at\":\n flag_created_at = True\n if not instance(kwargs[k], datetime):\n kwargs[k] = datetime(*self.__str_to_numbers(kwargs[k]))\n elif k == \"id\":\n flag_id = True\n setattr(self, k, kwargs[k])\n if not flag_created_at:\n self.created_at = datetime.now()\n if not flag_id:\n self.id = str(uuid.uuid4())\n elif not args:\n self.created_at = datetime.now()\n self.id = str(uuid.uuid4())\n\n def __str_to_numbers(self, s):\n \"\"\"\n Prepares a string for datetime\n \"\"\"\n tmp = ''.join([o if o not in \"T;:.,-_\" else \" \" for o in s]).split()\n res = [int(i) for i in tmp]\n return res\n\n def save(self):\n \"\"\"\n method to save model\n \"\"\"\n self.__dict__[\"updated_at\"] = datetime.now()\n models.storage.new(self)\n models.storage.save()\n\n def __str__ (self):\n \"\"\"\n format string representation\n \"\"\"\n return \"[{}] ({}) {}\".format(type(self).__name__, self.id,\n self.__dict__)\n\n def to_json(self, saving=False):\n \"\"\"\n convert file to json\n \"\"\"\n dupe = self.__dict__.copy()\n dupe.pop('_sa_instance_state', None)\n\n dupe[\"created_at\"] = dupe[\"created_at\"].isoformat()\n if (\"updated_at\" in dupe):\n dupe[\"updated_at\"] = dupe[\"created_at\"].isoformat()\n dupe[\"__class__\"] = type(self).__name__\n if not saving:\n dupe.pop(\"password\", None)\n dupe.pop(\"amenities\", None)\n dupe.pop(\"amenities_id\", None)\n return dupe\n","repo_name":"JennieChu/text_meme","sub_path":"models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"21906894847","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\nfrom rest_framework import status\nfrom django.db.models import Q\nfrom rest_framework import viewsets\n\nfrom rest_framework.filters import (\n SearchFilter,\n OrderingFilter,\n )\nfrom rest_framework.generics import (\n CreateAPIView,\n DestroyAPIView,\n ListAPIView, \n UpdateAPIView,\n RetrieveAPIView,\n RetrieveUpdateAPIView\n )\n\n\nfrom .paginators import ContainerPaginator\nfrom .serializers import ContainerViewSerializer\nfrom dependantmodels.serializers import OrderedListProjectGroupByGroupSerializer\nfrom dependantmodels.utils import OrganizationMethods,OrganizationGroupMethods,OrganizationProjectMethods\nfrom .utils import ContainerViewMethods\nfrom accounts.permissions import SuperUserPermission\nfrom accounts.authentication import UserAuthentication\nfrom accounts.utils import UserClass\nfrom dependantmodels.serializers import OrganizationGroupSerializerProjectsPerGroup, OrganizationGroupSerializer,OrderedListProjectSerializer\nfrom dependantmodels.models import Organization,OrganizationGroup,ContainerView, OrganizationProject\nfrom features.models import FeatureTypeGroup,Feature\nfrom containers.permissions import CanUpdateContainer,CanReadContainer\n\n\n# Create the Container\nclass ContainerViewCreateAPIView(CreateAPIView):\n\n \"\"\" Creates the Container \"\"\"\n queryset = ContainerView.objects.all()\n serializer_class = ContainerViewSerializer\n permission_classes = [CanUpdateContainer]\n authentication_classes = (UserAuthentication,)\n\n # def create(self, request, *args, **kwargs):\n # \"\"\"Overiding the create function\"\"\"\n # serializer = self.get_serializer(data=request.data)\n # serializer.is_valid(raise_exception=True)\n # self.perform_create(serializer)\n # headers = self.get_success_headers(serializer.data)\n # return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n def perform_create(self, serializer):\n serializer.save(owner=self.request.user)\n\n\n\nclass ContainerViewListAPIView(ListAPIView):\n \"\"\" \n List all the containers of the organization \n \"\"\"\n queryset = ContainerView.objects.filter(active = True)\n serializer_class = ContainerViewSerializer\n filter_backends= [SearchFilter, OrderingFilter]\n authentication_classes = [UserAuthentication]\n permission_classes = []\n search_fields = []\n \n def get_queryset(self, *args, **kwargs):\n filters = {}\n queryset = self.queryset.order_by('organization')\n user = self.request.user\n if UserClass.UserIsSuperUser(user):\n return queryset\n else:\n orgObjs = Organization.objects.filter(Q(uid = user.get('organization')))\n if orgObjs.exists():\n filters['organization'] = orgObjs.first()\n return self.queryset.filter(**filters)\n return ContainerView.objects.none() \n\n def list(self, request, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n userUid = request.user.get('uid')\n\n if UserClass.UserIsSuperUser(request.user) or request.user.get('is_manager') or request.user.get('is_owner') :\n filteredQueryset = queryset\n else:\n filterDict = [{'uid': userUid}]\n filteredQueryset_1 = queryset.filter(Q(readLabels__contains = filterDict) | Q(readUsers__contains = filterDict)| Q(writeUsers__contains = filterDict) | Q(writeLabels__contains = filterDict) | Q(owner__uid = userUid))\n filteredQueryset_2 = queryset.filter(Q(organization__readLabels__contains = filterDict) | Q(organization__readUsers__contains = filterDict)| Q(organization__writeUsers__contains = filterDict) | Q(organization__writeLabels__contains = filterDict))\n filteredQueryset = filteredQueryset_1 | filteredQueryset_2\n serializer = self.get_serializer(filteredQueryset, many=True)\n return Response(serializer.data)\n\n\n \nclass AddFTGToContainerView(APIView):\n \"\"\"\n Add FTG To the Container View\n \"\"\"\n authentication_classes = [UserAuthentication]\n permission_classes = [CanUpdateContainer]\n\n def get(self, request,uid=None, format=None):\n return Response(\"Add FTG To the Container View\")\n\n def post(self,request,uid,format=None):\n\n \n FTGUid = request.data.get('featureTypeGroup', None)\n try: \n containerView = ContainerView.objects.get(uid = uid)\n # May raise a permission denied\n self.check_object_permissions(self.request, containerView)\n except ContainerView.DoesNotExist:\n return Response({'error': True, 'errorList': 'Container with uid doesnot exist'}, status = status.HTTP_404_NOT_FOUND)\n try: \n featureTypeGroup = FeatureTypeGroup.objects.get(uid = FTGUid)\n except FeatureTypeGroup.DoesNotExist:\n return Response({'error': True, 'errorList': 'FTG with uid doesnot exist'}, status = status.HTTP_404_NOT_FOUND)\n \n if featureTypeGroup:\n if featureTypeGroup.org == containerView.organization:\n containerView.featureTypeGroups.add(featureTypeGroup)\n else:\n return Response({'error': True, 'errorList' : \"feature Type group belong to differnt org\"},status = status.HTTP_400_BAD_REQUEST)\n\n\n message = { 'featureTypeGroup':FTGUid,'containerView': uid }\n return Response({'success': True, 'data' : message},status = status.HTTP_200_OK)\n\n\nclass RemoveFTGToContainerView(APIView):\n \"\"\"\n Remove FTG To the Container View\n \"\"\"\n authentication_classes = [UserAuthentication]\n permission_classes = [CanUpdateContainer]\n\n def get(self, request,uid=None, format=None):\n return Response(\"Remove FTG To the Container View\")\n\n def post(self, request,uid, format=None):\n\n FTGUid = request.data.get('featureTypeGroup', None)\n try: \n containerView = ContainerView.objects.get(uid = uid)\n except ContainerView.DoesNotExist:\n return Response({'error': True, 'errorList': 'Container with uid doesnot exist', 'data':[]}, status = status.HTTP_404_NOT_FOUND)\n try: \n featureTypeGroup = FeatureTypeGroup.objects.get(uid = FTGUid)\n except FeatureTypeGroup.DoesNotExist:\n return Response({'error': True, 'errorList': 'FTG with uid doesnot exist'}, status = status.HTTP_404_NOT_FOUND)\n \n # Remove FTG from the CV\n if featureTypeGroup:\n containerView.featureTypeGroups.remove(featureTypeGroup )\n \n # Get all the Features associated with the CV and dereference FT from them\n message = {'featureTypeGroup':FTGUid,'containerView': uid }\n featureTypes = featureTypeGroup.featureTypes.all()\n features = Feature.objects.filter(project__group__containerView = containerView,featureType__in = featureTypes)\n message['features'] = list(features.values_list('uid', flat = True)) \n features.update(featureType = None) \n\n message['featureTypes'] = featureTypes.values_list('uid', flat = True)\n return Response({'success': True,'data':message},status = status.HTTP_200_OK)\n\nclass ContainerViewRetrieveAPIView(RetrieveAPIView):\n \"\"\" \n Get the container of the organization \n \"\"\"\n queryset = ContainerView.objects.filter(active = True)\n serializer_class = ContainerViewSerializer\n authentication_classes = [UserAuthentication]\n permission_classes =[CanReadContainer]\n lookup_field = 'uid'\n\n def retrieve(self, request, *args, **kwargs):\n try:\n uid = kwargs.get('uid', None)\n instance = self.get_queryset().get(uid =uid )\n\n # May raise a permission denied\n self.check_object_permissions(self.request, instance)\n\n except self.get_queryset().model.DoesNotExist:\n return Response({'error': True, 'errorList': 'object wit uid doesnot exist' }, status= status.HTTP_404_NOT_FOUND)\n \n serializer = self.get_serializer(instance)\n data = self.get_serializer(instance).data \n\n # get all the active groups of the container view \n groups = instance.organizationgroup_set.filter(active = True) \n data['groups'] = OrganizationGroupSerializerProjectsPerGroup(groups, many = True).data\n return Response(data)\n\n \n \nclass CreateOrUpdateOrgContainerAndAttachGroupsFromJson(APIView):\n\n authentication_classes = [UserAuthentication]\n permission_classes = [SuperUserPermission]\n\n def get(self, request):\n data = {\"message\": \"This request used to create or update containers and attach groups to the container \"}\n return Response(data)\n\n def post(self, request):\n data = request.data\n returnData = self.CreateOrUpdateOrgContainerAndAttachGroups(data)\n return Response(returnData)\n\n @staticmethod\n def popDataAttributes(data):\n popKeys = [\n 'read_labels',\n 'read_users',\n 'write_labels',\n 'write_users'\n ]\n for key in popKeys:\n data.pop(key)\n\n return data\n\n @staticmethod\n def removeDuplicates(data):\n from iteration_utilities import unique_everseen \n permissionkeys = [\"readLabels\", \"readUsers\", \"writeLabels\", \"writeUsers\"]\n for key in permissionkeys:\n if data.get(key):\n data[key] = list(unique_everseen(data.get(key)))\n\n return data\n\n @staticmethod\n def mapPermissionsBasedOnModels(data,model = None):\n from iteration_utilities import unique_everseen \n permisionData = {}\n permKeyValues = {\n 'read_labels': 'readLabels',\n 'read_users': 'readUsers',\n 'write_labels': 'writeLabels',\n 'write_users': 'writeUsers'\n }\n\n for key, value in permKeyValues.items():\n if data.get(key):\n permisionData[value] = data[key]\n else:\n permisionData[value] = []\n\n if model in ['asset', 'entity'] and data.get('owner',None):\n permisionData['readUsers'].append(data.get('owner'))\n permisionData['writeUsers'].append(data.get('owner'))\n\n permisionData = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.removeDuplicates(permisionData)\n return permisionData\n\n\n @staticmethod\n def updateContainerPermissions(containerData, permissions):\n keys = [\"readLabels\", \"readUsers\", \"writeLabels\", \"writeUsers\"]\n \n containerPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.mapPermissionsBasedOnModels(containerData)\n containerData.update(containerPermissions)\n containerData = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.popDataAttributes(containerData)\n for key in keys:\n if permissions.get(key):\n if containerData.get(key):\n containerData[key].extend(permissions.get(key))\n else:\n containerData[key] = permissions.get(key)\n containerData = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.removeDuplicates(containerData)\n return containerData\n \n\n @staticmethod\n def getReadAndWriteUsersAndLabelsFromEntityAsset(data):\n permissions = {'readUsers': [], 'readLabels': [], 'writeLabels': [], 'writeUsers': []}\n\n # from entity\n if data.get('entity'): \n entityPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.mapPermissionsBasedOnModels(data.get('entity'), 'entity')\n for key , value in entityPermissions.items():\n if key in permissions:\n permissions[key].extend(value)\n\n if data.get('asset'): \n assetPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.mapPermissionsBasedOnModels(data.get('asset'), 'asset')\n for key , value in assetPermissions.items():\n if key in permissions:\n permissions[key].extend(value)\n\n return permissions\n\n @staticmethod\n def CreateOrUpdateOrgContainerAndAttachGroups(data): \n\n dataDict = {}\n if data.get('organization', None) and data.get('container', None) and 'groups' in data:\n \n # organization Creation\n orgPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.mapPermissionsBasedOnModels(data.get('organization'))\n data.get('organization').update(orgPermissions)\n returnOrgData = OrganizationMethods.CreateOrUpdateOrg(data.get('organization'))\n if isinstance(returnOrgData,Organization):\n dataDict['org'] = returnOrgData.uid\n else:\n errorList = returnOrgData.get('error')\n errorList['org_uid'] = data.get('organization').get('uid')\n return {'error': True, 'errorList':errorList, 'data': dataDict}\n\n containerPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.getReadAndWriteUsersAndLabelsFromEntityAsset(data)\n # creation of container \n containerData = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.updateContainerPermissions(data.get('container'), containerPermissions)\n containerData['organization'] = returnOrgData.pk\n returnContainerData = ContainerViewMethods.CreateorUpdateContainerView(containerData)\n \n if isinstance(returnContainerData,ContainerView):\n containerView = returnContainerData\n dataDict['containerView'] = containerView.uid\n\n # clear all the groups\n containerView.organizationgroup_set.clear() \n\n elif returnContainerData.get('error', None):\n returnContainerData['error']['container_uid'] = data.get('container').get('uid')\n return {'error': True, 'errorList':returnContainerData.get('error'),'data': dataDict}\n \n # creation of groups\n groupsData = data.get('groups', None)\n groupErrorList = []; dataDict['groups'] = []; \n projectErrorList = []; \n\n\n for groupData in groupsData:\n\n eachGroupSuccessData = {'group' : None , 'projects': []}\n\n groupPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.mapPermissionsBasedOnModels(groupData)\n groupData.update(groupPermissions)\n groupData = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.popDataAttributes(groupData)\n \n groupData['containerView'] = containerView.pk\n groupData['organization'] = returnOrgData.pk\n returnGroupData = OrganizationGroupMethods.CreateOrUpdateGroup(groupData)\n\n if isinstance(returnGroupData, OrganizationGroup):\n\n eachGroupSuccessData['group'] = returnGroupData.pk\n\n # clear all the projects\n returnGroupData.organizationproject_set.all().update(active = False) \n returnGroupData.organizationproject_set.clear() \n\n # create Project\n projectDataDictList = groupData.get('projects', [])\n\n for projectDataDict in projectDataDictList:\n\n projectPermissions = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.mapPermissionsBasedOnModels(projectDataDict)\n projectDataDict.update(projectPermissions)\n projectDataDict = CreateOrUpdateOrgContainerAndAttachGroupsFromJson.popDataAttributes(projectDataDict)\n\n projectDataDict['group'] = returnGroupData.pk\n\n if 'reports' in projectDataDict:\n projectDataDict['data'] = projectDataDict['reports']\n\n projectData = OrganizationProjectMethods.CreateOrUpdateProject(projectDataDict)\n if not isinstance(projectData, OrganizationProject):\n projectError = projectData.get('error')\n projectError['project_uid'] = projectDataDict.get('uid')\n projectErrorList.append(projectError)\n else:\n eachGroupSuccessData['projects'].append(projectData.uid)\n \n dataDict['groups'].append(eachGroupSuccessData)\n \n elif returnGroupData.get('error'):\n returnGroupData['error']['group_uid'] = groupData.get('uid')\n groupErrorList.append(returnGroupData.get('error'))\n\n \n if groupErrorList or projectErrorList:\n errorList = {}\n errorList['groups'] = groupErrorList\n errorList['projects'] = projectErrorList\n return {'error': True, 'errorList':errorList, 'data': dataDict}\n else:\n return {'error': False, 'success' : True, 'data': dataDict, 'errorList': []}\n else:\n return {'error' : True, 'errorList': 'The Json doesnot contain either of the attributes \"organization,container,groups\" '}\n\n\n \n\n\n \n\n \n\n\n\n\n","repo_name":"Poori19/sensehawkTerra","sub_path":"containers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74106811541","text":"#Phone.py\n\n#Kyle Nabors\n\n#Gets phone number from user and checks if it is in the right format\n\n#global variables\nlength = 0\ndigit = 0\nspecial = 0\n\nnumber = input(\"Enter number as ###-###-####:\")#gets input from user\n\nlength = len(number)#gets the length of the number to know for step one of checking input\n\nif length == 12:#checks if the number is the correct legnth of the user input \n for letter in number:\n if letter.isdigit():#checks how many numbers are in the phone number\n digit = digit + 1\n if letter in '-': #checks how many \"-\" are in the phone number\n special = special + 1\n if digit == 10: #checks if the number of digits is correct for a valid phonenumber\n if special == 2:#checks if the number of \"-\" is correct for a valid phone number\n print(\"Valid\")\n else:\n print(\"Invalid\") \n else:\n print(\"Invalid\")\nelse:\n print(\"Invalid\")\n \n","repo_name":"KyleNabors/ESC-32A","sub_path":"phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36857417111","text":"import os.path\nimport sys\nimport string\n\ndef prepare_gcc(env):\n env['CC'] = 'gcc'\n env['TOOLS'] = ['default', 'gcc']\n env['CPPPATH'].extend([])\n env['CPPDEFINES'].extend([])\n if env['PLATFORM'] == 'x86':\n env['CCFLAGS'].extend(['-m32','-fpic','-Werror'])\n env['LINKFLAGS'].extend(['-m32'])\n env['LIBS'].extend([])#'stdc++'\n env['LIBPATH'].extend(['/usr/lib32'])\n elif env['PLATFORM'] == 'x64':\n env['CCFLAGS'].extend(['-m64','-fpic','-Werror'])\n env['LINKFLAGS'].extend(['-m64'])\n env['LIBPATH'].extend(['/usr/local/lib64'])\n else:\n print(\"Unknown platform: \"+env['PLATFORM'])\n sys.exit()\n if env['CONFIGURATION'] == 'Debug':\n env['CCFLAGS'].extend(['-g'])\n return env\n","repo_name":"osblinnikov/snocs","sub_path":"gcc.py","file_name":"gcc.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"39608396628","text":"items = [\n ('Product1', 13),\n ('Product2', 18),\n ('Product3', 11)\n]\n\nprices = []\n\n# for item in items:\n# prices.append(item[1])\n\n# print(prices)\n\nx = map(lambda item: item[1], items) # returns map object\nprint(x) # we can iterate using loop\nprint(list(x)) # make a list\n","repo_name":"Ichigo-lab/PythonBegins","sub_path":"Data Structure/map_function.py","file_name":"map_function.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"14130981782","text":"import requests\r\nfrom tkinter import *\r\nimport customtkinter\r\nfrom PIL import Image\r\n#window\r\nwindow = customtkinter.CTk()\r\nwindow.resizable(False, False)\r\nwindow.geometry(\"700x400\")\r\nwindow.title(\"ISS\")\r\nwindow.iconbitmap(\"icon.ico\")\r\ncustomtkinter.set_appearance_mode(\"dark\")\r\n\r\n#Functions\r\ndef iss_coordinates():\r\n response = requests.get(\"http://api.open-notify.org/iss-now.json\")\r\n response.raise_for_status()\r\n data = response.json()\r\n latitude = data[\"iss_position\"][\"latitude\"]\r\n longitude = \"The height is: \" + data[\"iss_position\"][\"longitude\"]\r\n\r\n latitude_label.configure(text=f\"The latitude of the ISS is {latitude}\")\r\n longitude_label.configure(text=f\"The longitude of the ISS is {longitude}\")\r\n\r\n#Canva\r\nmy_image = customtkinter.CTkImage(dark_image=Image.open(\"iss_gif.gif\"),\r\n size=(480,270))\r\nimage_label = customtkinter.CTkLabel(window, image=my_image, text=\"\")\r\nimage_label.pack()\r\n\r\n#Frame \r\ncoordinates_frame = customtkinter.CTkFrame(window)\r\ncoordinates_frame.pack()\r\n\r\n#Button\r\nrecount_button = customtkinter.CTkButton(coordinates_frame, text=\"Current ISS coordinates\", command=iss_coordinates)\r\nrecount_button.pack()\r\n\r\n#Labels\r\nlatitude_label = customtkinter.CTkLabel(window, text=\"\")\r\nlatitude_label.pack()\r\n\r\nlongitude_label = customtkinter.CTkLabel(window, text=\"\")\r\nlongitude_label.pack()\r\n\r\n\r\n#mainloop\r\nwindow.mainloop()","repo_name":"tastefulblatant/ISSAPI","sub_path":"API-ISS.py","file_name":"API-ISS.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34393403926","text":"import json\n\nfrom odoo.addons.mail_plugin.tests.common import TestMailPluginControllerCommon, mock_auth_method_outlook\nfrom odoo.addons.mail.tests.common import MailCase\n\n\nclass TestHelpdeskClient(TestMailPluginControllerCommon, MailCase):\n @mock_auth_method_outlook('employee')\n def test_ticket_creation_notification(self):\n \"\"\"Test the ticket creation using the mail plugin endpoint.\n\n Test that the ticket is created, with the\n - name set with the email subject\n - description set with the email body\n - user set with the current logged user\n\n Check also that the acknowledgement email has been sent.\n \"\"\"\n self.user_test.groups_id |= self.env.ref('helpdesk.group_helpdesk_user')\n customer = self.env['res.partner'].create({'name': 'Customer', 'email': 'customer@example.com'})\n\n email_body = 'Test email body'\n email_subject = 'Test email subject'\n\n data = {\n 'id': 0,\n 'jsonrpc': '2.0',\n 'method': 'call',\n 'params': {\n 'email_body': email_body,\n 'email_subject': email_subject,\n 'partner_id': customer.id,\n },\n }\n\n messages_info = [{\n 'content': 'The reference of your ticket is',\n 'message_type': 'notification',\n 'subtype': 'mail.mt_note',\n 'email_values': {\n 'email_from': self.env.company.email_formatted,\n },\n 'notif': [{'partner': customer, 'type': 'email', 'status': 'sent'}],\n }, {\n 'content': '',\n 'message_type': 'notification',\n 'email_values': {\n 'email_from': self.env.company.email_formatted,\n },\n 'subtype': 'helpdesk.mt_ticket_new',\n 'notif': [],\n }]\n\n with self.assertPostNotifications(messages_info):\n response = self.url_open(\n url='/mail_plugin/ticket/create',\n data=json.dumps(data),\n headers={'Content-Type': 'application/json'},\n ).json()\n\n self.env['mail.mail'].process_email_queue()\n\n ticket_id = response.get('result', {}).get('ticket_id')\n\n self.assertTrue(bool(ticket_id))\n\n ticket = self.env['helpdesk.ticket'].browse(ticket_id)\n\n self.assertTrue(bool(ticket))\n self.assertIn(email_body, ticket.description)\n self.assertEqual(ticket.name, email_subject)\n self.assertEqual(ticket.user_id, self.user_test)\n","repo_name":"dinar-it/odoo_16_enter","sub_path":"helpdesk_mail_plugin/tests/test_helpdesk_client.py","file_name":"test_helpdesk_client.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"6960087213","text":"import os\nclass Path:\n def templateWay(self):\n template_dir = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n template_dir = os.path.join(template_dir, 'View')\n template_dir = os.path.join(template_dir, 'templates')\n\n return template_dir\n\n def staticWay(self):\n static_dir = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\n static_dir = os.path.join(static_dir, 'View')\n static_dir = os.path.join(static_dir, 'static')\n\n return static_dir\n\ns = Path()\nprint(s.templateWay())","repo_name":"Robis123/Modelo-Curriculo-Digital","sub_path":"Utilitarios/pathing.py","file_name":"pathing.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5994014061","text":"#https://en.wikipedia.org/w/api.php?action=query&list=search&format=json&srsearch=poop\n\nimport urllib.request\nimport urllib.parse\nfrom urllib.error import *\nimport json\nfrom .searchprovider import SearchProvider\nfrom .searchresult import SearchResult\n\nclass WikipediaSearchProvider(SearchProvider):\n \"\"\"Search for info about topics on wikipedia sets.\"\"\"\n\n @property\n def title(self):\n return \"Wikipedia Results\"\n\n def routine(self, query):\n \"\"\"Overiding the parents routine to search using the wikipedia.org\"\"\"\n\n success = True\n reply = \"\"\n\n try:\n url = \"https://en.wikipedia.org/w/api.php?action=query&list=search&format=json&srsearch=\"+urllib.parse.quote_plus(query)\n page = urllib.request.urlopen(url, timeout=2.5)\n jdata = json.loads(page.read().decode('utf-8'))\n for i in jdata[\"query\"][\"search\"]:\n reply += i[\"title\"] + \" : \"+ i[\"snippet\"] + \"
\"\n\n except (URLError,KeyError) as e:\n reply = \"Failed to connect: \" + str(e)\n success = False\n\n if reply == \"\":\n success = False\n\n self.result = SearchResult(reply, confidence=success)\n","repo_name":"micahh2/marvin","sub_path":"searchproviders/wikipedia.py","file_name":"wikipedia.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4801773952","text":"from rest_framework import serializers\n\nfrom scrum.models import Project, Task, Question\n\n\nclass ProjectSerializer(serializers.HyperlinkedModelSerializer):\n tasks_count = serializers.SerializerMethodField()\n\n class Meta:\n model = Project\n fields = ['id', 'name', 'description', 'tasks_count']\n\n def get_tasks_count(self, obj):\n tasks_ids = Task.objects.filter(project=obj).values_list('id', flat=True)\n return Question.objects.filter(task__id__in=tasks_ids).count()\n","repo_name":"MiraBerkobich/simulation-project-master","sub_path":"api/serializers/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21140384554","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def recoverTree(self, root: Optional[TreeNode]) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n l = []\n def inorder(node):\n if node is None:\n return \n inorder(node.left)\n l.append(node)\n inorder(node.right)\n inorder(root)\n a = l[0] \n for i in range(1,len(l)):\n if l[i].val < l[i-1].val:\n a = l[i-1]\n break\n b = l[-1] \n for i in range(len(l)-2,-1,-1):\n if l[i].val > l[i+1].val:\n b = l[i+1]\n break\n a.val,b.val = b.val,a.val \n\n\n ","repo_name":"Hacker-KM/Leetcode_Hacker_KM","sub_path":"0099-recover-binary-search-tree/0099-recover-binary-search-tree.py","file_name":"0099-recover-binary-search-tree.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2575029438","text":"import requests\nimport json\nimport pytz\nimport tzlocal\nimport os\nfrom datetime import datetime, timedelta\nfrom django.core.management.base import BaseCommand\nfrom honeybadger import honeybadger\nfrom rating.models import *\n\nhoneybadger.configure(api_key=os.environ.get('HONEYBADGER_API_KEY'))\n\n\ndef get_town_venue(url):\n t_response = requests.get(url, timeout=10, headers={\"accept\":\"application/json\"})\n t_data = json.loads(t_response.text)['town']\n\n # проверяем есть ли у нас в базе страна команды\n country = Country.objects.filter(id=t_data['country']['id']).first()\n if not country:\n country, is_updated = Country.objects.update_or_create(\n id=t_data['country']['id'],\n defaults={\n 'title': t_data['country']['name']\n },\n )\n\n # регион\n try:\n region = Region.objects.filter(id=t_data['region']['id']).first()\n if not region:\n region, is_updated = Region.objects.update_or_create(\n id=t_data['region']['id'],\n defaults={\n 'title': t_data['region']['name'],\n 'country': country\n },\n )\n except TypeError:\n region = None\n\n # город\n town = Town.objects.filter(id=t_data['id']).first()\n if not town:\n town, is_updated = Town.objects.update_or_create(\n id=t_data['id'],\n defaults={\n 'title': t_data['name'],\n 'country': country,\n 'region': region,\n },\n )\n return town\n\n\ndef get_town(url):\n t_response = requests.get(url, timeout=10, headers={\"accept\":\"application/json\"})\n t_data = json.loads(t_response.text)\n\n # проверяем есть ли у нас в базе страна команды\n country = Country.objects.filter(id=t_data['country']['id']).first()\n if not country:\n country, is_updated = Country.objects.update_or_create(\n id=t_data['country']['id'],\n defaults={\n 'title': t_data['country']['name']\n },\n )\n\n # регион\n try:\n region = Region.objects.filter(id=t_data['region']['id']).first()\n if not region:\n region, is_updated = Region.objects.update_or_create(\n id=t_data['region']['id'],\n defaults={\n 'title': t_data['region']['name'],\n 'country': country\n },\n )\n except TypeError:\n region = None\n\n # город\n town = Town.objects.filter(id=t_data['id']).first()\n if not town:\n town, is_updated = Town.objects.update_or_create(\n id=t_data['id'],\n defaults={\n 'title': t_data['name'],\n 'country': country,\n 'region': region,\n },\n )\n return town\n\n\n\ndef parse_tournaments(t_id, t_id_end, maii=False, force=False, date_diff=0):\n if maii:\n parse_range = []\n for j in range(1, 21, 1):\n maii_tournament_url = \"http://api.rating.chgk.net/tournaments?properties.maiiRating=true&page=\" + str(j)\n maii_tournament_response = requests.get(maii_tournament_url, timeout=10, headers={\"accept\":\"application/json\"})\n maii_tournament_data = json.loads(maii_tournament_response.text)\n for tournament in maii_tournament_data:\n parse_range.append(tournament['id'])\n elif date_diff > 0:\n time_from = (datetime.now()-timedelta(days=date_diff)).strftime(\"%Y-%m-%d\")\n parse_range = []\n for j in range(1, 21, 1):\n tournament_url = \"http://api.rating.chgk.net/tournaments?lastEditDate%5Bafter%5D=\"+time_from+\"&page=\" + str(j)\n tournament_response = requests.get(tournament_url, timeout=10, headers={\"accept\":\"application/json\"})\n tournament_data = json.loads(tournament_response.text)\n for tournament in tournament_data:\n parse_range.append(tournament['id'])\n else:\n parse_range = [*range(t_id, t_id_end+1, 1)]\n\n for i in parse_range:\n # парсим данные о турнире\n print(\"Парсим турнир:\", i)\n tournament_url = \"http://api.rating.chgk.net/tournaments/\" + str(i)\n tournament_response = requests.get(tournament_url, timeout=10, headers={\"accept\":\"application/json\"})\n tournament_data = json.loads(tournament_response.text)\n\n # берём тип турнира, если в базе ещё нет - создаём\n try:\n typeoft = Typeoft.objects.filter(id=tournament_data['type']['id']).first()\n except TypeError:\n # ловим турниры которых нет на турнирном сайте, если мы парсим по диапазону и пропускаем отсутсвующий id\n continue\n\n # сравниваем дату обновления на турнирном сайте, с датой, которая у нас в базе\n local_timezone = tzlocal.get_localzone()\n end_datetime_to_compare = datetime.strptime(\n tournament_data['lastEditDate'], '%Y-%m-%dT%H:%M:%S%z'\n ).astimezone(local_timezone).replace(tzinfo=None)\n\n try:\n t_end_datetime_to_compare = Tournament.objects.filter(id=tournament_data['id']).first().edit_datetime\n if end_datetime_to_compare == t_end_datetime_to_compare and not force:\n # если турнир не обновлялся, пропускаем его\n print(tournament_data['name'], \"- турнир не обновлялся\")\n continue\n except AttributeError:\n # если турнир новый и его ещё нет у нас в базе, продолжаем flow\n pass\n\n if not typeoft:\n typeoft, is_updated = Typeoft.objects.update_or_create(\n id=tournament_data['type']['id'],\n defaults={\n 'title': tournament_data['type']['name'],\n },\n )\n\n # обновляем данные по самому турниру, кроме даты обновления, чтобы не получить неполные данные, если парсер прервется\n try:\n tournament, is_updated = Tournament.objects.update_or_create(\n id=tournament_data['id'],\n defaults={\n 'title': tournament_data['name'],\n 'start_datetime': tournament_data['dateStart'],\n 'end_datetime': tournament_data['dateEnd'],\n 'questionQty': json.dumps(tournament_data['questionQty']),\n 'typeoft': typeoft,\n 'maiiAegis': tournament_data['maiiAegis'],\n 'maii_rating': tournament_data['maiiRating'],\n 'maiiAegisUpdatedAt': tournament_data['maiiAegisUpdatedAt'],\n 'maiiRatingUpdatedAt': tournament_data['maiiRatingUpdatedAt']\n },\n )\n except Exception as e:\n continue\n # парсим оргномитет турнира, линкуем\n tournament.orgcommittee.clear()\n for org in tournament_data['orgcommittee']:\n player, is_updated = Player.objects.update_or_create(\n id=org['id'],\n defaults={\n 'first_name': org['name'],\n 'last_name': org['surname'],\n 'patronymic': org['patronymic'],\n },\n )\n tournament.orgcommittee.add(player)\n\n print(\"Турнир сохранен\")\n\n # парсим результаты турнира\n url = \"http://api.rating.chgk.net/tournaments/\" + str(i) +\"/results?includeTeamMembers=1&includeMasksAndControversials=1&includeTeamFlags=1&includeRatingB=1\"\n response = requests.get(url, timeout=20, headers={\"accept\":\"application/json\"})\n data = json.loads(response.text)\n\n # ищем не удалили ли результаты каких-то команд с турнирного сайта\n # берем результаты из своей базы\n db_results = list(Result.objects.filter(tournament=tournament).values_list('team__id',flat=True))\n # берем результаты отданные в API\n t_site_results = []\n for result in data:\n t_site_results.append(result['team']['id'])\n # смотрим что есть лишнего у нас\n diff_results = list(set(db_results) - set(t_site_results))\n # удаляем лишние результаты из базы\n Result.objects.filter(tournament=tournament, team__id__in=diff_results).delete()\n\n for result in data:\n # тянем информация о городе\n try:\n t_url = \"http://api.rating.chgk.net/towns/\" + str(result['team']['town']['id'])\n town = get_town(t_url)\n except TypeError:\n town = None\n # для каждого результата создаём команду\n team, is_updated = Team.objects.update_or_create(\n id=result['team']['id'],\n defaults={\n 'title': result['team']['name'],\n 'town': town\n },\n )\n\n # данные о площадке\n try:\n v_url = \"http://api.rating.chgk.net/venues/\" + str(result['synchRequest']['venue']['id'])\n town = get_town_venue(v_url)\n venue = Venue.objects.filter(id=result['synchRequest']['venue']['id']).first()\n if not venue:\n venue, is_updated = Venue.objects.update_or_create(\n id=result['synchRequest']['venue']['id'],\n defaults={\n 'title': result['synchRequest']['venue']['name'],\n 'town': town,\n },\n )\n except TypeError:\n venue = None\n\n try:\n syncrequest = Syncrequest.objects.filter(id=result['synchRequest']['id']).first()\n if not syncrequest:\n syncrequest, is_updated = Syncrequest.objects.update_or_create(\n id=result['synchRequest']['id'],\n defaults={\n 'venue': venue,\n },\n )\n except TypeError:\n syncrequest = None\n\n # апдейтим результат\n try:\n mask = list(result['mask'])\n except TypeError:\n mask = []\n try:\n total = int(result['questionsTotal'])\n except TypeError:\n total = 0\n try:\n position = float(result['position'])\n except TypeError:\n position = 0\n db_result, is_updated = Result.objects.update_or_create(\n team=team, tournament=tournament,\n defaults={\n 'mask': mask,\n 'team_title': result['current']['name'],\n 'total': total,\n 'syncrequest': syncrequest,\n 'position': position,\n },\n )\n \n # прописываем флаги\n db_result.flags.clear()\n for flag in result['flags']:\n db_flag, is_updated = Flag.objects.update_or_create(\n id=flag['id'],\n defaults={\n 'shortName': flag['shortName'],\n 'longName': flag['longName'],\n },\n )\n db_result.flags.add(db_flag)\n\n # создаём или апдейтим игроков, пишем их старые индивидуальные рейтинги для сравнения моделей в будущем\n db_result.teamMembers.clear()\n for teammember in result['teamMembers']:\n player, is_updated = Player.objects.update_or_create(\n id=teammember['player']['id'],\n defaults={\n 'first_name': teammember['player']['name'],\n 'last_name': teammember['player']['surname'],\n 'patronymic': teammember['player']['patronymic'],\n },\n )\n db_result.teamMembers.add(player)\n\n oldrating, is_updated = Oldrating.objects.update_or_create(\n player=player, result=db_result,\n defaults={\n 'rating': teammember['rating'],\n 'usedRating': teammember['usedRating'],\n 'flag': teammember['flag'],\n },\n )\n\n # удаляем oldrating для игроков убранных из составов команд на турнирном сайте\n for oldrating in Oldrating.objects.filter(result=db_result):\n if oldrating.player not in db_result.teamMembers.all():\n oldrating.delete()\n \n # фиксируем старый командный рейтинг для сравнения моделей в будущем\n inRating = False\n try:\n if result['rating']['inRating']:\n inRating = True\n except TypeError:\n print(db_result.team_title, db_result.mask,db_result.total,db_result.position, \"|\", tournament, tournament.id)\n continue\n rating, is_updated = Oldteamrating.objects.update_or_create(\n result=db_result,\n defaults={\n 'inRating': inRating,\n 'b': result['rating']['b'],\n 'predictedPosition': result['rating']['predictedPosition'],\n 'rt': result['rating']['rt'],\n 'rb': result['rating']['rb'],\n 'rg': result['rating']['rg'],\n 'r': result['rating']['r'],\n 'bp': result['rating']['bp'],\n 'd1': result['rating']['d1'],\n 'd2': result['rating']['d2'],\n 'd': result['rating']['d'],\n },\n )\n\n tournament.edit_datetime = tournament_data['lastEditDate']\n tournament.save()\n print(\"done: \", i)\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('-i', '--t_id', type=int)\n parser.add_argument('-e', '--t_id_end', type=int)\n parser.add_argument('--maii', action='store_true')\n parser.add_argument('--force', action='store_true')\n parser.add_argument('-d', '--date_diff', type=int)\n\n def handle(self, *args, **kwargs):\n maii = False\n force = False\n if kwargs[\"maii\"]:\n maii = True\n if kwargs[\"force\"]:\n force = True\n t_id = kwargs[\"t_id\"]\n t_id_end = kwargs[\"t_id_end\"]\n date_diff = kwargs[\"date_diff\"]\n if not date_diff:\n date_diff = 0\n\n parse_tournaments(t_id, t_id_end, maii, force, date_diff)\n","repo_name":"maii-chgk/rating-scraper","sub_path":"src/rating/management/commands/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":15682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"72130723222","text":"from django.contrib import admin\nfrom . models import Issue, IssueAttachment, List,Todo,FAQ,Feedback,ActivityLog,News,Updates,Notification,Project,TaskAttachment,SubTask\n\n\n# Register your models here.\n\n# class ProjectAdmin(admin.ModelAdmin) : \n# list_display = ['project_title','project_description','assignees','start_date','end_date','project_type','project_status','duration','estimated_hours','created_at','updated_at']\n# list_per_page = 20\n \nadmin.site.register(Project)\n\nclass TaskAdmin(admin.ModelAdmin) : \n list_display = ['user','task_title','task_description','task_duedate','task_priority','task_status','created_date','updated_date']\n list_per_page = 10\n \nadmin.site.register(Todo,TaskAdmin)\n\nclass FaqAdmin(admin.ModelAdmin) : \n list_display = ['question','answer','created_at','updated_at']\n list_per_page = 10\n \nadmin.site.register(FAQ,FaqAdmin) \n\n\nclass FeedbackAdmin(admin.ModelAdmin) : \n list_display = ['user_name','user_email','subject','message','message_date']\n list_per_page = 20\n \nadmin.site.register(Feedback,FeedbackAdmin)\n\n#activity log admin \nclass ActivityLogAdmin(admin.ModelAdmin) : \n list_display = ['user','activity','activity_date','activity_time']\n list_per_page = 20\n \nadmin.site.register(ActivityLog,ActivityLogAdmin)\n\n#news model admin\nclass NewsAdmin(admin.ModelAdmin) : \n list_display = ['user','title','category','description','slug','published_date','published_time']\n list_per_page = 20\n prepopulated_fields = {'slug':('title',)}\nadmin.site.register(News,NewsAdmin)\n\n#updates model admin\nclass UpdatesAdmin(admin.ModelAdmin) : \n list_display = ['user','title','description','slug','published_date','published_time']\n list_per_page = 20\n prepopulated_fields = {'slug':('title',)}\nadmin.site.register(Updates,UpdatesAdmin)\n \n\nclass NotificationAdmin(admin.ModelAdmin) : \n list_display = ['user','task_title','task_description','task_duedate']\n \nadmin.site.register(Notification,NotificationAdmin)\n\nclass TaskAttachmentAdmin(admin.ModelAdmin):\n list_display = ['user','task','attachment']\nadmin.site.register(TaskAttachment,TaskAttachmentAdmin)\n\nclass IssueAttachmentAdmin(admin.ModelAdmin):\n list_display = ['user','issue','attachment']\nadmin.site.register(IssueAttachment,IssueAttachmentAdmin)\n\nclass SubTaskAdmin(admin.ModelAdmin):\n list_display = ['user','task','sub_task_title','sub_task_priority','sub_task_status','created_at','updated_at']\n list_per_page = 20\n \nadmin.site.register(SubTask, SubTaskAdmin)\n\nclass ListAdmin(admin.ModelAdmin) : \n list_display = ['user','project','list_name','list_description','created_at','updated_at']\n list_per_page = 20\nadmin.site.register(List,ListAdmin)\n\nclass IssueAdmin(admin.ModelAdmin):\n list_display = ['assignee','project','list','issue_title','issue_description','issue_priority','issue_status']\n list_per_page = 20\n \nadmin.site.register(Issue,IssueAdmin)\n","repo_name":"Joshi-kv/TodoTracker","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26192898655","text":"from cms.plugin_base import CMSPluginBase\nfrom cms.plugin_pool import plugin_pool\nfrom cms.models.pluginmodel import CMSPlugin\nfrom django.utils.translation import ugettext_lazy as _\nfrom .models import DanceEvent, Dancer\nfrom .models import OwnCompetition\nfrom django.utils import timezone\nfrom django.db import models\nimport datetime\nfrom django.db.models import Count, Q, BooleanField, ExpressionWrapper\n\nclass PossibleEventsPlugin(CMSPluginBase):\n model = CMSPlugin\n render_template = \"danceclub/possible_events.html\"\n name = _(\"Possible Events\")\n cache = False\n\n def render(self, context, instance, placeholder):\n request = context['request']\n context['instance'] = instance\n dancer_filter = public_filter = Q(public_since__lte=timezone.now())\n if request.user.is_authenticated():\n try:\n dancer = Dancer.objects.get(user=request.user)\n dancer_filter = Q(start__lte=timezone.now()+datetime.timedelta(days=4)) | Q(public=True)\n except Dancer.DoesNotExist:\n pass\n \n context['events'] = events = DanceEvent.objects.annotate(\n part_count=Count('participations'),\n public=ExpressionWrapper(public_filter, output_field=BooleanField())\n ).filter(\n end__gte=timezone.now(),\n deadline__gte=timezone.now(),\n part_count=0\n ).filter(dancer_filter).order_by('start')\n return context\n\nplugin_pool.register_plugin(PossibleEventsPlugin)\n\nfrom cms.models.fields import PlaceholderField\n\nclass NextCompetitionPlugin(CMSPlugin):\n description = PlaceholderField('description', related_name=\"nextcompetition_description\")\n competition = models.ForeignKey(OwnCompetition)\n\nclass NextCompetitionLink(CMSPluginBase):\n model = NextCompetitionPlugin\n render_template = \"danceclub/cmsplugins/next_competition.html\"\n name = _(\"Next Competition\")\n cache = False\n\n def render(self, context, instance, placeholder):\n context[\"own\"] = instance.competition\n context[\"instance\"] = instance\n \n return context\n\nplugin_pool.register_plugin(NextCompetitionLink)","repo_name":"jrutila/dancing","sub_path":"danceclub/cms_plugins.py","file_name":"cms_plugins.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27563173988","text":"from django import forms\n\nfrom .models import Subscribers\n\n\nclass SubscribeForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"destination\"].empty_label = \"Choose your destination\"\n\n class Meta:\n model = Subscribers\n fields = \"__all__\"\n","repo_name":"OleksiiKukla/ferry_site_2","sub_path":"subscribe/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5180071175","text":"from send_email import send_email\nimport requests\n\ntopic = 'tesla'\napi_key = '5803574c55b04535837cc22cefbcc1ce'\nurl = f'https://newsapi.org/v2/everything?q={topic}&from=2022-11-07&\\\n sortBy=publishedAt&apiKey=5803574c55b04535837cc22cefbcc1ce&\\\n language=en'\n\n# Make request\nrequest = requests.get(url)\n\n# Get a dictionary with data\ncontent = request.json()\n\nmessage = 'Subject: News for you!\\n'\n\n# Access the article titles and description\nfor article in content['articles'][:20]:\n if article['title'] is not None:\n message += f\"{article['title']}\\n{article['description']}\\n{article['url']}\\n\\n\"\n # message = message.replace(\"\\u2026\", \" \")\n # print(message)\n\n# Encode message\nmessage = message.encode(\"utf-8\")\n\n# Send email\nsend_email(message)","repo_name":"DominicanCodes/News-API","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12437562051","text":"import pytest\nfrom search import *\nimport random\nfrom termcolor import colored\nimport time\n\nos.system('color')\nrandom.seed(\"aima-python\")\n\nromania_problem = GraphProblem('Arad', 'Bucharest', romania_map)\nbackwards_romania = GraphProblem('Bucharest', 'Arad', romania_map)\ndont_move = GraphProblem('Arad', 'Arad', romania_map)\n\n\n\ndef test_best_first_graph_search():\n # uniform_cost_search and astar_search test it indirectly\n assert best_first_graph_search(\n romania_problem,\n lambda node: node.state).solution() == ['Sibiu', 'Fagaras', 'Bucharest']\n assert best_first_graph_search(\n romania_problem,\n lambda node: node.state[::-1]).solution() == ['Timisoara',\n 'Lugoj',\n 'Mehadia',\n 'Drobeta',\n 'Craiova',\n 'Pitesti',\n 'Bucharest']\n\n\n\n\n\n\n\ndef test_simulated_annealing(prob: GraphProblem):\n return simulated_annealing(prob)\n\ndef test_astar_search(prob: GraphProblem):\n return astar_search(prob).solution()\n # assert astar_search(eight_puzzle).solution() == ['LEFT', 'LEFT', 'UP', 'RIGHT', 'RIGHT', 'DOWN', 'LEFT', 'UP',\n # 'LEFT', 'DOWN', 'RIGHT', 'RIGHT']\n # assert astar_search(EightPuzzle((1, 2, 3, 4, 5, 6, 0, 7, 8))).solution() == ['RIGHT', 'RIGHT']\n # assert astar_search(nqueens).solution() == [7, 1, 3, 0, 6, 4, 2, 5]\n\ndef line_spacing():\n print(\"\\n\")\n\n\nif __name__ == '__main__':\n start = time.time()\n print(colored(\"FIRST TEST. ARAD TO BUCHAREST\", 'green'))\n print(colored(\"Romania Map A Star path: \", 'blue'), test_astar_search(romania_problem))\n end1 = time.time()\n print(\"Time taken: \", str(end1 - start))\n print(colored(\"Romania Map Simulated Annealing path:\", 'blue'), test_simulated_annealing(romania_problem))\n end2 = time.time()\n print(\"Time taken: \", str(end2 - start))\n line_spacing()\n\n print(colored(\"SECOND TEST. BUCHAREST TO ARAD\", 'green'))\n print(colored(\"Backwards Romania Map A Star path: \", 'blue'), test_astar_search(backwards_romania))\n end3 = time.time()\n print(\"Time taken: \", str(end3 - start))\n print(colored(\"Backwards Romania Map Simulated Annealing path:\", 'blue'), test_simulated_annealing(backwards_romania))\n end4 = time.time()\n print(\"Time taken: \", str(end4 - start))\n\n line_spacing()\n\n print(colored(\"SECOND TEST. BUCHAREST TO BUCHAREST\", 'green'))\n print(colored(\"Backwards Romania Map A Star path: \", 'blue'), test_astar_search(dont_move))\n end5 = time.time()\n print(\"Time taken: \", str(end5 - start))\n print(colored(\"Backwards Romania Map Simulated Annealing path:\", 'blue'), test_simulated_annealing(dont_move))\n end6 = time.time()\n print(\"Time taken: \", str(end6 - start))\n\n\n\n\n\n","repo_name":"GrandDad7/Artificial-Int-Proj","sub_path":"test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"31188965628","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n# import scipy as sp\n# from numpy import linalg as LA\nimport matplotlib\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n# import statsmodels.formula.api as smf\nimport pandas as pd\n# import pyodbc\n# import glob\nfrom datetime import datetime\nimport glob\nimport os\nimport time\n\nimport seaborn as sns\nsns.set_style(\"whitegrid\")\n\nfrom scipy.stats import wilcoxon, mannwhitneyu\nfrom scipy import stats\nfrom collections import Counter\nimport statsmodels.api as sm\nfrom random import sample\nimport pickle\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.model_selection import train_test_split\n\n\n# In[2]:\n\n\npath = \"/local/aling\"\ntestweights2 = pd.read_pickle(path +'/testweightsbiweek.pkl')\ncontrolweights2 = pd.read_pickle(path +'/controlweightsbiweek.pkl')\n\ntest_weight_workouts = pd.read_pickle('/local/aling/test_weight_workouts.pkl')\ncontrol_weight_workouts = pd.read_pickle('/local/aling/control_weight_workouts.pkl')\n\n\n# In[5]:\n\n\ntest_weight_workouts.head()\n\n\n# In[6]:\n\n\ncontrol_weight_workouts.head()\n\n\n# In[7]:\n\n\ncontroldf1 = control_weight_workouts[['bmicat','time_taken','freq','calories_burned']]\ncontroldf1.head()\n\n\n# In[8]:\n\n\ntestdf1 = test_weight_workouts[['bmicat','time_taken','freq','calories_burned']]\ntestdf1.head()\n\n\n# In[24]:\n\n\nfulldf = testdf1.append(controldf1)\nfulldf.loc[fulldf['bmicat'] ==1, 'bmicat'] = 0\nfulldf.loc[fulldf['bmicat'] ==2, 'bmicat'] = 1\nfulldf.head()\n\n\n# In[25]:\n\n\nCounter(fulldf.bmicat)\n\n\n# In[26]:\n\n\narrayframe = fulldf.values\narrayframe\n\n\n# In[19]:\n\n\narrayframe[:,0]\n\n\n# In[27]:\n\n\nX = arrayframe[:,1:4]\nX = X.astype('int')\nY = arrayframe[:,0]\nY = Y.astype('int')\n\n\n# In[28]:\n\n\nX[0]\n\n\n# In[29]:\n\n\nX_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.25,random_state=0)\n\n\n# In[30]:\n\n\nlogreg = LogisticRegressionCV(cv = 10,random_state=0, solver = 'lbfgs', multi_class = 'multinomial', max_iter=10000)\n\n\n# In[31]:\n\n\nlogreg.fit(X_train,y_train)\n\n\n# In[32]:\n\n\nprint(logreg.coef_)\nlogreg.fit(X / np.std(X, 0), Y)\n\n\n# In[33]:\n\n\ny_pred = logreg.predict(X_test)\n\n\n# In[34]:\n\n\nfrom sklearn import metrics\ncnf_matrix = metrics.confusion_matrix(y_test, y_pred)\ncnf_matrix\n\n\n# In[35]:\n\n\nclass_names=[0,1] # name of classes\nfig, ax = plt.subplots()\ntick_marks = np.arange(len(class_names))\nplt.xticks(tick_marks, class_names)\nplt.yticks(tick_marks, class_names)\n# create heatmap\nsns.heatmap(pd.DataFrame(cnf_matrix), annot=True, cmap=\"YlGnBu\" ,fmt='g')\nax.xaxis.set_label_position(\"top\")\nplt.tight_layout()\nplt.title('Confusion matrix', y=1.1)\nplt.ylabel('Actual label')\nplt.xlabel('Predicted label')\n\n\n# In[36]:\n\n\nprint(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\nprint(\"Precision:\",metrics.precision_score(y_test, y_pred))\nprint(\"Recall:\",metrics.recall_score(y_test, y_pred))\n\n\n# In[37]:\n\n\ny_pred_proba = logreg.predict_proba(X_test)[::,1]\nfpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)\nauc = metrics.roc_auc_score(y_test, y_pred_proba)\nplt.plot(fpr,tpr,label=\"data 1, auc=\"+str(auc))\nplt.legend(loc=4)\nplt.show()\n\n\n# In[38]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport pandas as pd\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 12})\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\n\n\n#print type(newY)# pandas core frame\nX_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=3)\nprint(len(X_test))\nprint(len(y_test))\n\n\n# In[39]:\n\n\nfrom sklearn.linear_model import Lasso\nlassoReg = Lasso(alpha = 0.3, normalize = True)\nlassoReg.fit(X_train,y_train)\npred = lassoReg.predict(X_test)\nmse = np.mean((pred - y_test)**2)\nprint(lassoReg.score(X_test,y_test))\nprint(mse)\nx_plot = plt.scatter(pred,(pred-y_test), c='b')\nplt.hlines(y=0,xmin = -1000, xmax = 5000)\nplt.title('Residual plot')\n#coef = Series(lreg.coef_,predictors).sort_values()\n#coef.plot(kind='bar', title='Modal Coefficients')\n\n\n# In[40]:\n\n\nlr = LinearRegression(normalize=True)\nlr.fit(X_train, y_train)\nrr = Ridge(alpha=0.01) # higher the alpha value, more restriction on the coefficients; low alpha > more generalization, coefficients are barely\n# restricted and in this case linear and ridge regression resembles\nrr.fit(X_train, y_train)\nrr100 = Ridge(alpha=100) # comparison with alpha value\nrr100.fit(X_train, y_train)\ntrain_score=lr.score(X_train, y_train)\ntest_score=lr.score(X_test, y_test)\nRidge_train_score = rr.score(X_train,y_train)\nRidge_test_score = rr.score(X_test, y_test)\nRidge_train_score100 = rr100.score(X_train,y_train)\nRidge_test_score100 = rr100.score(X_test, y_test)\n\n\n# In[41]:\n\n\nprint(\"linear regression train score: \" + str(train_score))\nprint(\"linear regression test score: \" + str(test_score))\nprint(\"ridge regression train score low alpha: \" + str(Ridge_train_score))\nprint(\"ridge regression test score low alpha: \"+ str(Ridge_test_score))\nprint(\"ridge regression train score high alpha: \" + str(Ridge_train_score100))\nprint(\"ridge regression test score high alpha: \" + str(Ridge_test_score100))\n\n\n# In[42]:\n\n\nplt.plot(rr.coef_,alpha=0.7,linestyle='none',marker='*',markersize=5,color='red',label=r'Ridge; $\\alpha = 0.01$',zorder=7) # zorder for ordering the markers\nplt.plot(rr100.coef_,alpha=0.5,linestyle='none',marker='d',markersize=6,color='blue',label=r'Ridge; $\\alpha = 100$') # alpha here is for transparency\nplt.plot(lr.coef_,alpha=0.4,linestyle='none',marker='o',markersize=7,color='green',label='Linear Regression')\nplt.xlabel('Coefficient Index',fontsize=16)\nplt.ylabel('Coefficient Magnitude',fontsize=16)\nplt.legend(fontsize=13,loc=4)\nplt.show()\n\n\n# In[43]:\n\n\nfrom sklearn.linear_model import Lasso\nX_train,X_test,y_train,y_test=train_test_split(X,Y, test_size=0.3, random_state=31)\nlasso = Lasso()\nlasso.fit(X_train,y_train)\ntrain_score=lasso.score(X_train,y_train)\ntest_score=lasso.score(X_test,y_test)\ncoeff_used = np.sum(lasso.coef_!=0)\n\n\n# In[44]:\n\n\nprint(train_score)\nprint(test_score)\nprint(coeff_used)\n\n\n# In[45]:\n\n\nlasso001 = Lasso(alpha=0.01, max_iter=10e5)\nlasso001.fit(X_train,y_train)\n\n\n# In[46]:\n\n\ntrain_score001=lasso001.score(X_train,y_train)\ntest_score001=lasso001.score(X_test,y_test)\ncoeff_used001 = np.sum(lasso001.coef_!=0)\n\n\n# In[47]:\n\n\nprint(train_score001)\nprint(test_score001)\nprint(coeff_used001)\n\n\n# In[48]:\n\n\nlasso00001 = Lasso(alpha=0.0001, max_iter=10e5)\nlasso00001.fit(X_train,y_train)\ntrain_score00001=lasso00001.score(X_train,y_train)\ntest_score00001=lasso00001.score(X_test,y_test)\ncoeff_used00001 = np.sum(lasso00001.coef_!=0)\n\n\n# In[49]:\n\n\nprint(train_score00001)\nprint(test_score00001)\nprint(coeff_used00001)\n\n\n# In[50]:\n\n\nlr = LinearRegression()\nlr.fit(X_train,y_train)\nlr_train_score=lr.score(X_train,y_train)\nlr_test_score=lr.score(X_test,y_test)\nprint(lr_train_score)\nprint(lr_test_score)\n\n\n# In[51]:\n\n\n\nplt.plot(lasso.coef_,alpha=0.7,linestyle='none',marker='*',markersize=5,color='red',label=r'Lasso; $\\alpha = 1$',zorder=7) # alpha here is for transparency\nplt.plot(lasso001.coef_,alpha=0.5,linestyle='none',marker='d',markersize=6,color='blue',label=r'Lasso; $\\alpha = 0.01$') # alpha here is for transparency\n\nplt.xlabel('Coefficient Index',fontsize=16)\nplt.ylabel('Coefficient Magnitude',fontsize=16)\nplt.legend(fontsize=13,loc=4)\n\nplt.plot(lasso.coef_,alpha=0.7,linestyle='none',marker='*',markersize=5,color='red',label=r'Lasso; $\\alpha = 1$',zorder=7) # alpha here is for transparency\nplt.plot(lasso001.coef_,alpha=0.5,linestyle='none',marker='d',markersize=6,color='blue',label=r'Lasso; $\\alpha = 0.01$') # alpha here is for transparency\nplt.plot(lasso00001.coef_,alpha=0.8,linestyle='none',marker='v',markersize=6,color='black',label=r'Lasso; $\\alpha = 0.00001$') # alpha here is for transparency\nplt.plot(lr.coef_,alpha=0.7,linestyle='none',marker='o',markersize=5,color='green',label='Linear Regression',zorder=2)\nplt.xlabel('Coefficient Index',fontsize=16)\nplt.ylabel('Coefficient Magnitude',fontsize=16)\nplt.legend(fontsize=13,loc=4)\nplt.tight_layout()\nplt.show()\n\n","repo_name":"aling96/UnderArmour","sub_path":"Regression Model.py","file_name":"Regression Model.py","file_ext":"py","file_size_in_byte":8027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"16096112773","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport argparse\nimport datetime\n\nimport easyts_lib.tasks\nimport easyts_lib.entries\nimport easyts_lib.entry\n\n\ndef handle_tasks(task_args):\n easyts_lib.tasks.get_tasks(task_args.api_key)\n\n\ndef handle_list(list_args):\n easyts_lib.entries.send_request(list_args.api_key,\n list_args.start,\n list_args.end)\n\n\ndef handle_entry(entry_args):\n easyts_lib.entry.add_time_entries(entry_args.api_key,\n entry_args.days,\n entry_args.minutes,\n entry_args.task,\n entry_args.comment)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-a\", \"--api_key\",\n help=\"Timestamp.io API Key, or set TIMESTAMP_IO_API_KEY environment variable\",\n default=os.getenv('TIMESTAMP_IO_API_KEY'))\nparser.add_argument('--version', action='version',\n version='%(prog)s 0.1')\n\nsubparsers = parser.add_subparsers(title=\"TimeStamp Commands\", help=\"TimeStamp Commands Help\")\n\nparser_tasks = subparsers.add_parser(\"tasks\", help=\"Show all the tasks available\")\nparser_tasks.set_defaults(func=handle_tasks)\n\nparser_list = subparsers.add_parser(\"list\", help=\"Show all timesheet entries\")\nparser_list.add_argument(\"-s\", \"--start\",\n help=\"Filter output to show everything after this date\",\n type=datetime.date.fromisoformat)\nparser_list.add_argument(\"-e\", \"--end\",\n help=\"Filter output to show everything before this date\",\n type=datetime.date.fromisoformat)\nparser_list.set_defaults(func=handle_list)\n\nparser_entry = subparsers.add_parser(\"entry\", help=\"Enter a timesheet entry\")\nparser_entry.add_argument(\"-d\", \"--days\",\n help=\"Days to create a timesheet entry\",\n type=datetime.date.fromisoformat,\n nargs='*')\nparser_entry.add_argument(\"-c\", \"--comment\",\n help=\"Comment for the entry\")\nparser_entry.add_argument(\"-m\", \"--minutes\",\n help=\"Minutes worked (default 480)./easy\",\n default=480)\nparser_entry.add_argument(\"-t\", \"--task\",\n help=\"Task ID for the entry\",\n type=int)\nparser_entry.set_defaults(func=handle_entry)\n\nargs = parser.parse_args()\n\nif args.api_key is None:\n print(\"You must specify an api_key (ENV: TIMESTAMP_IO_API_KEY) to access TimeStamp API\")\n sys.exit()\n\ntry:\n args.func(args)\nexcept AttributeError:\n parser.print_help()\n","repo_name":"tupps/easyts","sub_path":"easyts.py","file_name":"easyts.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"23015182826","text":"from xml.dom import minidom\ndoc = minidom.parse('/Users/sandy/Documents/GitHub/test/plant_catalog.xml')\n\n# doc.getElementsByTagName returns NodeList\n\nstaffs = doc.getElementsByTagName(\"PLANT\")\nfor staff in staffs:\n sid = staff.firstChild.nodeValue\n #nickname = staff.getElementsByTagName(\"nickname\")[0]\n #salary = staff.getElementsByTagName(\"salary\")[0]\n print(sid)\n","repo_name":"SandyLinux/FileHandler","sub_path":"xmlreader.py","file_name":"xmlreader.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1314565555","text":"from .models import TagClass, Tag\n\n\ndef populate_new_database():\n \"\"\"\n The client asked to have some specific tags, but because of the generality of the tag systems, the specific tags\n have to added manually. The function adds them programmatically.\n \"\"\"\n tags_groups = {\n 'Approaches': [\n 'Biological',\n 'Cognitive',\n 'Sociocultural',\n ],\n 'Topics': [\n 'Brain & Behavior',\n 'Hormones and pheromones and their effects on behavior',\n 'The relationship between genetics and behavior',\n 'Cognitive processes',\n 'The reliability of cognitive processes',\n 'Emotion and cognition',\n 'The individual and the group',\n 'Cultural origins of behavior and cognition',\n 'Cultural influences on behavior',\n ],\n 'Content': []\n }\n for tag_class, tags in tags_groups.items():\n tc = TagClass(name=tag_class)\n tc.save()\n for tag in tags:\n Tag(tag_class=tc, name=tag).save()\n\n","repo_name":"DE0CH/csia","sub_path":"codestudy/init_utils.py","file_name":"init_utils.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1409702785","text":"import boto3\r\n\"\"\" This code shows the difference\r\n between using the service console as a resource\r\n or using the service console as a client\r\n Note : The resource are only for specific services s3, ec2, cloudformation..\r\n for other services you can use only client \r\n\"\"\"\r\n\r\naws_man_con=boto3.session.Session(profile_name='sunil-admin')\r\niam_con_resource=aws_man_con.resource(\"iam\")\r\n\r\n\r\nfor each_user in iam_con_resource.users.all():\r\n print(\"user : \", each_user.name)\r\n\r\n\r\niam_con_client=aws_man_con.client(\"iam\")\r\nfor usr_name in (iam_con_client.list_users()['Users']):\r\n print (\"User :\", usr_name['UserName'], \" Created : \", usr_name['CreateDate']\r\n )\r\n","repo_name":"ghsunil0/boto3-scripts","sub_path":"res_cli_diff.py","file_name":"res_cli_diff.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32974170852","text":"import random \r\n\r\nprint(\"Welcome to the Number Guessing Game!!!!!!!!\")\r\n\r\nn = random.randint(1,9)\r\n\r\nchances = 0\r\n\r\nprint(\"Guess a Number Between 1 and 9!!! \")\r\n\r\nwhile chances<5 :\r\n \r\n guess = int(input(\"Enter Your Guess!!!!! \"))\r\n\r\n if guess == n:\r\n print(\"Congrats! YOU GUESSED IT!!!!!\")\r\n break\r\n\r\n elif guess < n:\r\n print(\"Your GUESS IS VERY LOW!!!!!!!! Guess a higher number than \", guess)\r\n\r\n else:\r\n print(\"Your GUESS IS TOO HIGH!!!!!!! Guess a lower number than \", guess)\r\n\r\n chances += 1\r\n\r\n\r\n if chances > 5:\r\n print(\"YOU DIDN'T GUESS IT LOSER!!!!! The answer was \" , n)\r\n\r\n\r\n\r\n","repo_name":"sakaiw/guessingnumbergame","sub_path":"guessinggame.py","file_name":"guessinggame.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26922119914","text":"#!/usr/bin/python\nfrom Bio import SeqIO\nimport sys\nimport os\n\n#usage: python long.seq.py in.fasta out.fasta 200\n\nfasta_file = snakemake.input[0]\nsize_limit = snakemake.params[0]\nout_file_1 = snakemake.output[0]\nout_file_2 = snakemake.output[1]\n\noutput_small = open(out_file_1, \"w\")\noutput_large = open(out_file_2, \"w\")\n\nsmall = []\nlarge = []\nfor record in SeqIO.parse(open(fasta_file, \"rU\"), \"fasta\"):\n if len(record.seq) <= size_limit:\n small.append(record)\n else:\n large.append(record)\n\nSeqIO.write(small, out_file_1, \"fasta\")\nSeqIO.write(large, out_file_2, \"fasta\")\noutput_small.close()\noutput_large.close()","repo_name":"metagenlab/diag_pipelines","sub_path":"rules/downloading/scripts/fasta_filter_size.py","file_name":"fasta_filter_size.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"1398337363","text":"#!/usr/bin/env -S python3 -i\n# pylint: disable=invalid-name\n\nimport typing;\n\nimport io;\n\nfrom .. import iters;\nfrom ..funcs import keyDispatch as kd;\nfrom ..funcs import typeDispatch as td;\n\nclass Matrix:\n __slots__: typing.Tuple[str] = (\n '_matrix',\n # '__dict__',\n #'__weakref__',\n );\n\n def __init__(\n self: 'Matrix', source: typing.Any = None,\n *args, **kwargs) -> None:\n '''\n Initialize a Matrix depending on the type of `source` or\n the use of keywords\n\n Keyword:\n Both `width` and `height` defined with int:\n\n\n `source` is `io.IOBase` (file):\n Read lines from file; Optionally supply the custom\n deliminator to `delim`; default to comma (`,`)\n\n `source` is `tuple`, specifically, `Tuple[int, int]`:\n Initialize matrix container with zeros, assuming\n `row, col` size.\n '''\n self._matrix: typing.List[typing.List[int]] = [];\n Matrix.__construct(self, source, *args, **kwargs);\n\n @kd.keywordPriorityDispatch\n # default func is methodDispatch\n @td.methodDispatch\n def __construct(self: 'Matrix', *_, **__) -> None:\n pass\n @__construct.__wrapped__.register\n def _(self, source: io.IOBase, *_, delim: str = ',', **__) -> None:\n self.__init__();\n for line in source:\n self._matrix.append(list(int(num) for num in line.split(delim)));\n @__construct.__wrapped__.register\n def _(self, source: tuple, *_, **__) -> None:\n 'Initialize with zeros, given width and height'\n if len(source) < 2:\n raise ValueError;\n if not all(isinstance(elem, int) for elem in source):\n raise TypeError;\n\n # initialize with keywords\n self.__init__(width=source[0], height=source[1]);\n\n @__construct.register('width', 'height')\n def _(\n self, *_,\n width: int, height: int,\n value: int = 0, **__) -> None:\n '''\n Initialize an empty matrix with given width and height;\n optionally take a value to initialize with\n '''\n # default init\n self.__init__();\n if not all(isinstance(elem, int) for elem in (width, height)):\n raise TypeError;\n # row\n for _ in range(width):\n # append\n self._matrix.append([]);\n # col\n for _ in range(height):\n self._matrix[-1].append(value);\n\n def __repr__(self: 'Matrix') -> str:\n 'Formal representation of matrix'\n # print(f'object: {object.__repr__(self)}');\n return (\n '<'\n f'{self.__class__.__module__}.'\n f'{self.__class__.__qualname__}'\n f' object at {hex(id(self))}'\n f'; width={self.width}, height={self.height}>'\n );\n\n def __str__(self: 'Matrix') -> str:\n 'Informal representation of a Matrix object'\n def _row(lst: typing.List[int]) -> str:\n return f'[{\",\".join(str(num) for num in lst)}]';\n return f'[{\";\".join(_row(row) for row in self._matrix)}]';\n\n @property\n def prettyStr(self: 'Matrix') -> str:\n 'A descriptor returning a somewhat prettier string representation'\n return str(self).replace(';', '\\n ').replace(',', ',\\t');\n\n def pretty(self: 'Matrix') -> None:\n 'Print the prettyStr'\n print(self.prettyStr);\n\n @td.methodDispatch\n def __getitem__(self: 'Matrix', index: typing.Any) -> int:\n '''\n Return item indicated by `index`\n\n If `index` is `tuple[int row, int col]`:\n Return item at `(row, col)` position\n\n If `index` is int:\n Not implemented\n '''\n raise NotImplementedError;\n @__getitem__.register\n def _(self, index: tuple) -> int:\n assert len(index) >= 2, f'Insufficient length (at least 2): {index}';\n assert all(isinstance(elem, int) for elem in index), \\\n 'All elements of `index` should be of index type';\n return self._matrix[index[0]][index[1]];\n\n @td.methodDispatch\n def __setitem__(self: 'Matrix', index: typing.Any, value: int) -> None:\n '''\n Set value of item indicated by `index`\n\n If `index` is `tuple[int row, int col]`:\n Set value at `(row, col)` position\n\n If `index` is int:\n Not implemented\n '''\n raise NotImplementedError;\n @__setitem__.register\n def _(self, index: tuple, value: int) -> None:\n assert len(index) >= 2, f'Insufficient length (at least 2): {index}';\n assert all(isinstance(elem, int) for elem in index), \\\n 'All elements of `index` should be of index type';\n self._matrix[index[0]][index[1]] = value;\n\n @property\n def height(self: 'Matrix') -> int:\n 'Return the height of the matrix'\n return len(self._matrix);\n\n @property\n def width(self: 'Matrix') -> int:\n 'Return the width of the matrix; if uneven, raise error'\n if not self.height:\n return 0;\n wid: int = len(self._matrix[0]);\n if all(wid == len(row) for row in self._matrix):\n return wid;\n raise ValueError('Uneven matrix encountered.');\n\n def __iter__(self: 'Matrix') -> typing.Iterator[int]:\n '''\n Return a flat iterator for all elements of the matrix;\n order of elements is not necessarily retained\n '''\n return iters.iterAppend(\n iter(row) for row in self._matrix\n );\n\ndef debug() -> None:\n # pylint: disable=unused-variable\n m1 = Matrix(width=3, height=3);\n m2 = Matrix(width=3, height=3, value=1);\n\n# debug();\n\n__all__: typing.Tuple[str, ...] = (\n 'Matrix',\n);\n","repo_name":"RuijieYu/algo","sub_path":"data/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38995324286","text":"import sys\n\nfor _ in range(int(input())):\n n = int(input())\n p, q = map(int,input().split())\n res = p/q\n pattern = ''\n ct = 0\n sumi = 0\n\n if(res==0):\n print(0)\n sys.exit(0)\n\n res = str(res)\n res = res[:-2]\n dec = res.split('.')[1]\n\n #print(res)\n #print(dec)\n #print(type(dec))\n\n for i in dec:\n if(i not in pattern):\n pattern += i\n\n #print(pattern)\n \n for j in pattern:\n sumi += int(j)\n\n if(n%len(pattern)==0):\n tot = sumi * n/len(pattern)\n else:\n tot = sumi * n//len(pattern)\n val = n * n//len(pattern)\n\n for k in range(val,n+1):\n for l in pattern:\n tot += int(l)\n\n print(int(tot))\n \n ","repo_name":"Dharm3438/Problem-Solving","sub_path":"2.Codechef/project_code_2.0/3.FRAKK2.py","file_name":"3.FRAKK2.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41267876633","text":"import numpy as np\n\nfrom cftool.misc import timeit\nfrom cftool.misc import allclose\n\nx = np.arange(4).reshape([2, 2])\nprint(\"x\", x)\nprint(\"ii->i\", np.einsum(\"ii->i\", x))\nprint(\"ij,ij->ij\", np.einsum(\"ij,ij->ij\", x, x))\nprint(\"ij,ij->i\", np.einsum(\"ij,ij->i\", x, x))\nprint(\"ij,jk->ik\", np.einsum(\"ij,jk->ik\", x, x))\n\n# B1, T1, D\nx1 = np.random.random([128, 32, 8])\n# B2, T2, D\nx2 = np.random.random([64, 16, 8])\n# inner products (B1, B2, T1, T2)\nwith timeit(\"naive\"):\n inner1 = (x1[..., None, :, None, :] * x2[None, :, None, ...]).sum(4)\nwith timeit(\"einsum\"):\n inner2 = np.einsum(\"ijm,klm->ikjl\", x1, x2)\nwith timeit(\"optimized\"):\n inner3 = np.einsum(\"ijm,klm->ikjl\", x1, x2, optimize=True)\nassert allclose(inner1, inner2, inner3)\n","repo_name":"pku-aiic/styles-and-tricks","sub_path":"tricks/einsum.py","file_name":"einsum.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5012745278","text":"# Ejercicio 2\n# Un programa de descarga de archivos multimedia tiene diferentes velocidades\n# de descarga según la calidad de la conexión a internet del usuario. \n# Si la conexión es mayor a 20 Mbps, la velocidad de descarga será de 10 Mbps,\n# si la conexión es menor a 20 Mbps pero mayor a 5 Mbps, \n# la velocidad será de 5 Mbps y si la conexión es menor a 5 Mbps, la velocidad de descarga será de 1 Mbps.\n# Escribir un programa que calcule el tiempo de descarga de un archivo\n# y el ancho de banda utilizado, según la velocidad de descarga.\n\ndef tiempo_de_descarga(tamaño_archivo, velocidad_conexion):\n if velocidad_conexion > 20:\n velocidad_descarga = 10\n elif velocidad_conexion > 5:\n velocidad_descarga = 5\n else:\n velocidad_descarga = 1\n \n tiempo_de_descarga = (tamaño_archivo * 8 * 1024 * 1024) / (velocidad_descarga * 1000000)\n banda_ancha = velocidad_descarga\n \n return tiempo_de_descarga, banda_ancha\n\ntamaño_archivo = float(input(\"ingrese tamaño_archivo del archivo en MB: \"))\nvelocidad_conexion = float(input(\"Ingrese la velocidad de internet en Mbps: \"))\n\ntiempo_de_descarga, banda_ancha = tiempo_de_descarga(tamaño_archivo,velocidad_conexion)\n\nprint (\"El tiempo de descarga del archivo es: \", tiempo_de_descarga, \"Segundos\")\nprint (\"Su ancho de banda utilizado es:\", banda_ancha, \"Mbps\")\n","repo_name":"JuanArion/Python","sub_path":"apropiacion sesión 3/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38796506294","text":"import os\nfrom datetime import datetime\nfrom time import time as timestamp\n\nimport shutil\n\n\ndef convert_if_timestamp(time):\n if isinstance(time, int) or isinstance(time, float):\n return datetime.fromtimestamp(time)\n\n\n# https://github.com/notifico/notifico/blob/master/notifico/util/pretty.py\ndef pretty_date(time, now=None):\n \"\"\"\n Get a datetime object or a int() Epoch timestamp and return a\n pretty string like 'an hour ago', 'Yesterday', '3 months ago',\n 'just now', etc\n \"\"\"\n\n if now is None:\n now = timestamp()\n\n now = convert_if_timestamp(now)\n time = convert_if_timestamp(time)\n\n diff = now - time\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff // 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff // 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff // 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff // 30) + \" months ago\"\n return str(day_diff // 365) + \" years ago\"\n\n\ndef write_dir_to_zipfile(path, zipf, exclude=None):\n if exclude is None:\n exclude = []\n\n for root, dirs, files in os.walk(path):\n for file_ in files:\n if file_ in exclude:\n continue\n\n zipf.write(\n os.path.join(root, file_),\n os.path.relpath(os.path.join(root, file_), path)\n )\n\n\ndef remove_file_or_dir(path):\n if os.path.isfile(path) or os.path.islink(path):\n os.remove(path)\n else:\n shutil.rmtree(path)\n","repo_name":"Dav1dde/glad-web","sub_path":"gladweb/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"12"} +{"seq_id":"30761809772","text":"# coding: utf-8\n\nimport sys\nimport pymongo\n\nfrom django.conf import settings\n\nfrom apps.core.models import Document\n\n\ndef main():\n if len(sys.argv) != 2:\n exit(1)\n\n corpus_name = sys.argv[1]\n doc_ids = Document.objects.filter(corpus__name=corpus_name).values_list(\n 'id', flat=True)\n\n conn = pymongo.connection.Connection(host=settings.MONGODB_CONFIG['host'],\n port=settings.MONGODB_CONFIG['port'])\n collection = conn['pypln']['analysis']\n\n total_size = 0\n total_documents = 0\n size = sys.getsizeof\n tuple_size = size((None, None, None)) # 3-elements tuple size\n for doc_id in doc_ids:\n doc_entry = collection.find_one({u\"_id\": u\"id:{}:pos\".format(doc_id)},\n {\"value\": 1, \"_id\": 0})\n if doc_entry is None:\n continue\n total_documents += 1\n\n doc_pos = doc_entry[u'value']\n if doc_pos is None:\n total_size += size(None)\n else:\n for token, tag, offset in doc_pos:\n total_size += size(token) + size(tag) + size(offset) + \\\n tuple_size\n\n total_size += size([None for i in range(total_documents)])\n print(\"Total documents: {}\".format(total_documents))\n print(\"Total size: {}\".format(total_size))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"turicas/ptwp_tagger","sub_path":"calculate_pos_size.py","file_name":"calculate_pos_size.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"70325209942","text":"#!/usr/bin/python\n\n\ndef readc(filename, sep=\" \"):\n \"\"\"\n Read file with cardinality\n \"\"\"\n sl = {}\n with open(filename, \"r\") as f:\n for line in f.readlines():\n line = line.split()\n if tuple(line) not in sl:\n sl[tuple(line)] = 1\n else:\n sl[tuple(line)] += 1\n return sl\n\n\n\ndef read(filename, sep=\" \"):\n \"\"\"\n No cardinality version\n\n \"\"\"\n sl = []\n with open(filename, \"r\") as f:\n for line in f.readlines():\n line = line.split()\n if line not in sl: sl.append(line)\n return sl\n\n\n","repo_name":"tdi/pypm","sub_path":"simple_logs.py","file_name":"simple_logs.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"12"} +{"seq_id":"39153609090","text":"from __future__ import unicode_literals\n\nimport frappe\nimport unittest\nfrom frappe.utils import getdate, add_days\nfrom erpnext.hr.doctype.salary_structure.test_salary_structure import make_employee\n\nclass TestEmployeePromotion(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.employee = make_employee(\"employee@promotions.com\")\n\t\tfrappe.db.sql(\"\"\"delete from `tabEmployee Promotion`\"\"\")\n\n\tdef test_submit_before_promotion_date(self):\n\t\tpromotion_obj = frappe.get_doc({\n\t\t\t\"doctype\": \"Employee Promotion\",\n\t\t\t\"employee\": self.employee,\n\t\t\t\"promotion_details\" :[\n\t\t\t\t{\n\t\t\t\t\"property\": \"Designation\",\n\t\t\t\t\"current\": \"Software Developer\",\n\t\t\t\t\"new\": \"Project Manager\",\n\t\t\t\t\"fieldname\": \"designation\"\n\t\t\t\t}\n\t\t\t]\n\t\t})\n\t\tpromotion_obj.promotion_date = add_days(getdate(), 1)\n\t\tpromotion_obj.save()\n\t\tself.assertRaises(frappe.DocstatusTransitionError, promotion_obj.submit)\n\t\tpromotion = frappe.get_doc(\"Employee Promotion\", promotion_obj.name)\n\t\tpromotion.promotion_date = getdate()\n\t\tpromotion.submit()\n\t\tself.assertEqual(promotion.docstatus, 1)\n","repo_name":"libracore/erpnext","sub_path":"erpnext/hr/doctype/employee_promotion/test_employee_promotion.py","file_name":"test_employee_promotion.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"12"} +{"seq_id":"21879222813","text":"#!/usr/bin/env python\n'''\nTo produce volumn data (like CHGCAR) to show \nthe lattice distortion bewteen too twin structures.\nby lmliu@mail.ustc.edu.cn\n'''\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import griddata\n\n# get lattice parameters\n# -----------------------------------------------------------\ndef read_poscar(file_name):\n poscar = open(file_name, 'r')\n for i in range(2): # skip 2 rows\n poscar.readline()\n\n latt = np.zeros((3,3))\n for i in range(3):\n latt[i] = np.array(poscar.readline().split(), dtype=float)\n\n poscar.readline() # skip elemental symbol line\n\n N = np.array(poscar.readline().split(), dtype=int).sum()\n poscar.close()\n #pos = np.loadtxt(file_name, skiprows=9, usecols=(0, 1, 2)[0:N, :]\n return latt, N\n\n# Get grid number\n# -------------------------------------------------------------\ndef get_gridnumber(OUTCAR):\n outcar = open('OUTCAR', 'r')\n for i in outcar.readlines():\n if 'NGXF= ' in i:\n NGX, NGY, NGZ = re.findall('(\\d+)', i)\n outcar.close() \n return NGX, NGY, NGZ\n\n# interpolation\n# -------------------------------------------------------------\ndef interpolator(latt, NGX, NGY, NGZ, ini_pos_cart, diff_s):\n grid = []\n for i in np.linspace(0, np.sqrt(np.dot(latt[2], latt[2].T)), NGZ):\n for j in np.linspace(0, np.sqrt(np.dot(latt[1], latt[1].T)), NGY):\n for k in np.linspace(0, np.sqrt(np.dot(latt[0], latt[0].T)),NGX):\n grid.append([k, j, i])\n grid_array = np.array(grid, dtype='float')\n np.savetxt('grid', grid) \n return griddata(ini_pos_cart, diff_s, grid_array, method='linear', fill_value=0.) # only give 0\n\n# MAIN\n# ============================================================\nini_latt, ini_N = read_poscar('POSCAR')\nini_pos = np.loadtxt('POSCAR', skiprows=9, usecols=(0, 1, 2))[0:ini_N, :]\nini_pos_cart = np.dot(ini_latt, ini_pos.T).T\n\nfin_latt, fin_N = read_poscar('CONTCAR')\nfin_pos = np.loadtxt('CONTCAR', skiprows=9, usecols=(0, 1, 2))[0:fin_N, :]\nfin_pos_cart = np.dot(fin_latt, fin_pos.T).T\n\ndiff_v = fin_pos_cart - ini_pos_cart\ndiff_s = np.sqrt(np.dot(diff_v, diff_v.T).diagonal())\nnp.savetxt('diff_s', diff_s)\n\n#NGX, NGY, NGZ = get_gridnumber('OUTCAR')\nNGX, NGY, NGZ = 20,30,50\n\ndata = interpolator(ini_latt, NGX, NGY, NGZ, ini_pos_cart, diff_s)\n\nnp.savetxt('volumn_data', data.reshape(-1,10))\n\n\n\n# plot\n# ------------------------------------------------------------\n#plt.scatter3D(ini[:,0], ini[:,1], ini[:,2])\n#plt.imshow(ip.T, origin='lower', extent=(0, lyz[0,0], 0, lyz[1,1]))\n#plt.colorbar()\n#plt.savefig('fig.png', dpi=300)\n#plt.show()\n","repo_name":"liming-liu/distortion_hotmap","sub_path":"distortion.py","file_name":"distortion.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29380634741","text":"from bs4 import BeautifulSoup\nimport requests\n\n\ndef getAnimeName(id):\n url = \"https://myanimelist.net/anime/\" + str(id)\n response = requests.get(url)\n if response.status_code != 200:\n return None\n soup = BeautifulSoup(response.text, 'html.parser')\n anime_name = soup.find('p', class_=\"title-english title-inherit\")\n if anime_name is None:\n anime_name = soup.find(\n 'h1', class_=\"title-name\").text.strip()\n else:\n anime_name = anime_name.text.strip()\n return anime_name\n\n\ndef getAnime(id):\n url = f'https://myanimelist.net/anime/{id}/'\n response = requests.get(url)\n\n if response.status_code != 200:\n return None\n\n soup = BeautifulSoup(response.text, 'html.parser')\n\n tags = soup('div', id=\"horiznav_nav\")\n for tag in tags:\n link = tag.find_all('li')\n s = link[2]\n link = s.find('a')\n episode_link = link.get('href')\n\n episode_link = requests.get(episode_link)\n\n soupy = BeautifulSoup(episode_link.text, 'html.parser')\n # get tbody\n table_body = soupy.find('tbody')\n # find all tr with class episode-list-data\n trs = table_body.find_all('tr', class_=\"episode-list-data\")\n\n episode_number = None\n episode_name = \"\"\n anime_name = \"\"\n image_url = \"\"\n\n for tr in trs:\n # td with class episode-number nowrap\n episode_number = tr.find('td', class_=\"episode-number nowrap\").text\n # td with class episode-title\n episode_name = tr.find('td', class_=\"episode-title\").text.strip()\n\n # h1 class title-name\n anime_name = soupy.find('p', class_=\"title-english title-inherit\")\n if anime_name is None:\n anime_name = soupy.find(\n 'h1', class_=\"title-name\").text.strip()\n else:\n anime_name = anime_name.text.strip()\n\n # img class lazyloaded\n image_url = soupy.find(\n 'div', style=\"text-align: center;\").find('a').find('img').get('data-src')\n\n data = {\"anime_id\": id, \"anime_name\": anime_name, \"episode_name\": episode_name,\n \"image_url\": image_url, \"episode_number\": episode_number}\n\n return data\n","repo_name":"Servatom/AnimeNotifierBot","sub_path":"bot/functionality/datascrape.py","file_name":"datascrape.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2798038596","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 11 17:40:13 2022\n\n@author: Rupesh Roy\n\"\"\"\n\n#from sklearn.datasets import fetch_openml\n\n# 1.0 Import libraries\n\nimport os\nimport pandas as pd\n\n# 1.1 Load data\n\nos.chdir(\"F:\\\\SPBT Training\\\\20200725-26\\\\Exercise\")\ndf = pd.read_csv(\"caravan-insurance-challenge.csv\")\n\n# 1.2 Analyse the dataset\n\ndf.info() \ndf.isnull().values.any() # There is no NULL value\ndf.isna().values.any() # There is no NA \n\n# 2.0 Seperate train test \n\ndf[\"ORIGIN\"].value_counts()\nX_train = df[df[\"ORIGIN\"]==\"train\"]\nX_test = df[df[\"ORIGIN\"]==\"test\"]\n\ny_train = X_train.pop(\"CARAVAN\")\n\nX_train.shape\nX_test.shape\n\n# 2.1 Check data is balanced or not\n\ny_train.value_counts()\n\n#X_train,X_val,y_train,y_val = train_test_split(X_train, y_train,test_size=0.2, shuffle=True,stratify = y_train,random_state = 42)\n\n# Feature Scaling\n# All features are categorical variables out of which Except MOSTYPE and MOSHOOFD all are\n# ordinal and represent correct order between categories, therefore we will encode only these\n# two features. We will use Binary Encoding.\n\n","repo_name":"rup0039/ML-CaravanInsurance","sub_path":"Caravan_Insurance.py","file_name":"Caravan_Insurance.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18536798282","text":"import sys\n\nfrom qcforever.gaussian_run import GaussianRunPack\n\n\ndef main():\n usage ='Usage; %s infile' % sys.argv[0]\n\n try:\n infilename = sys.argv[1]\n except:\n print (usage); sys.exit()\n\n #for stargint from gaussian calculation\n #option = \"opt pka symm\"\n option = \"freq opt homolumo dipole uv symm satkoopmans\"\n #option = \"symm opt freq nmr uv=/home/sumita/GaussianRun_2.2beta/Samples/UV_peak.dat\" \n #option = \"opt homolumo energy dipole deen stable2o2 uv=/home/sumita/GaussianRun_2.2beta/Samples/UV_peak.dat vip vea aip aea\" \n #option = \"opt homolumo energy dipole deen stable2o2 uv vip vea aip aea\" \n #option = \"opt homolumo energy dipole deen stable2o2 fluor=3\" \n #option = \"opt cden homolumo energy dipole deen stable2o2 tadf\" \n #option = \"opt nmr uv energy homolumo dipole deen stable2o2 vip vea cden symm\"\n\n #test = GaussianRunPack.GaussianDFTRun('B3LYP', 'STO-3G', 10, option, infilename, solvent='water', restart=False)\n test = GaussianRunPack.GaussianDFTRun('LC-BLYP', 'STO-3G', 10, option, infilename, restart=False)\n\n test.para_functional = [0.3]\n test.mem = '5GB'\n test.timexe = 60*60\n #for geometric constrain\n #test.geom_spec = { '1 2 3 14': [180.0, 'F'], '6 5 4 13': [180.0, 'F'], '2 1 7 12': [180.0, 'F'], '5 6 8 9' : [180.0, 'F']}\n #Specify spin multiplicity and charge of the target\n #test.SpecSpinMulti = 3\n #test.SpecTotalCharge = 3\n outdic = test.run_gaussian()\n\n print (outdic)\n\n\nif __name__=='__main__':\n main()\n","repo_name":"molecule-generator-collection/QCforever","sub_path":"gaussian_main.py","file_name":"gaussian_main.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"12"} +{"seq_id":"35592213262","text":"from app import Gtk\n\n\nclass EditWindow(Gtk.MessageDialog):\n def __init__(self, parent, toolbox):\n super().__init__(title=f\"Edit {toolbox}\", transient_for=parent, flags=0)\n\n self.add_buttons(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)\n\n save_btn = Gtk.Button(label=\"Save\")\n self.add_action_widget(save_btn, Gtk.ResponseType.OK)\n\n l1 = Gtk.Label(label=\"Toolbox Name:\")\n self.toolbox_name = Gtk.Entry(text=toolbox)\n self.toolbox_name.connect(\n \"activate\", lambda e: self.emit(\"response\", Gtk.ResponseType.OK)\n )\n\n box = self.get_content_area()\n\n spacer = Gtk.Box(spacing=10)\n spacer.set_border_width(10)\n\n spacer.add(l1)\n spacer.add(self.toolbox_name)\n\n box.add(spacer)\n\n self.show_all()\n\n def get_entered_name(self):\n return self.toolbox_name.get_text()\n","repo_name":"Dvlv/toolbox-gui","sub_path":"src/edit_window.py","file_name":"edit_window.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"12"} +{"seq_id":"71132845461","text":"from flask import Blueprint\nfrom flask import request\nfrom flask import make_response\nfrom flask import current_app\n\nfrom models.Signup_login_model import Signup_login_model\nfrom models.auth_model import auth_model\nfrom flask import jsonify\nfrom flask_cors import CORS\n\nsignup_login_model = Signup_login_model()\nauth_model_decorator = auth_model()\n\n\nsignup_login_bp = Blueprint(\"signup_login_bp\", __name__)\n# CORS(signup_login_bp, resources={\"*\": {\"origins\": \"http://localhost:3000\"}})\nCORS(signup_login_bp, supports_credentials=True)\n\n\n# HTTP METHODS\nall_methods = [\"GET\", \"POST\", \"PUT\"]\n\n\n@signup_login_bp.route(\"/signup\", methods=all_methods)\ndef signup_controller():\n method = request.method\n if method == \"POST\":\n json_data = request.json\n return signup_login_model.signup_model(json_data)\n else:\n pass\n\n\n@signup_login_bp.route(\"/login\", methods=all_methods)\ndef login():\n method = request.method\n if method == \"POST\":\n json_data = request.json\n if \"body\" in json_data.keys():\n json_data = json_data[\"body\"]\n print(\"............json_data\", json_data)\n acc_and_ref_token = signup_login_model.login_model(json_data).json\n # auth_model.access_token = acc_and_ref_token[\"access_token\"]\n # auth_model.refresh_token = acc_and_ref_token[\"refresh_token\"]\n return acc_and_ref_token\n\n else:\n pass\n\n\n@signup_login_bp.route(\"/protected\", methods=all_methods)\n@auth_model_decorator.token_auth_model(\"\")\ndef protected():\n method = request.method\n if method == \"POST\":\n return make_response({\"message\": \"post bharta\"})\n\n if method == \"GET\":\n return make_response({\"message\": \" getbharatdsdfsdf\"})\n return \"protected data received\"\n\n\n@signup_login_bp.route(\"/refresh_token\", methods=all_methods)\ndef refresh_token():\n if request.method == \"GET\":\n jwt_refresh_token = request.headers\n\n print(jwt_refresh_token)\n if \"headers\" in jwt_refresh_token.keys():\n jwt_refresh_token = jwt_refresh_token[\"headers\"]\n print(jwt_refresh_token)\n jwt_refresh_token = jwt_refresh_token[\"Authorization\"]\n\n # print(\"\\n \\n\",jwt_refresh_token)\n obj = auth_model()\n obj.refresh_token = jwt_refresh_token\n obj.refresh_token_model()\n jwt_access_token = obj.access_token\n # del obj\n res = make_response({\"access_token\": jwt_access_token}, 200)\n res.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n res.headers.add(\"Access-Control-Allow-Headers\", \"*\")\n res.headers.add(\"Access-Control-Allow-Methods\", \"*\")\n return res\n\n return \"something wrong\"\n","repo_name":"bharatadk/Image-Background-Eraser","sub_path":"server/controllers/signup_login_blueprint.py","file_name":"signup_login_blueprint.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"15502817137","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 24 15:29:05 2020\n\n@author: danielstilckfranca\n\"\"\"\n\nfrom scipy.sparse import csr_matrix, find\nimport random\nimport numpy as np\nimport time\nimport itertools\nimport pandas as pd\n\ndef import_instance(name_file):\n \n \n \n file1 = open(name_file, 'r') \n Lines = file1.readlines()\n line0=Lines[0].split()\n n=int(line0[0])\n m=len(Lines)\n rows=[]\n columns=[]\n entries=[]\n for k in range(1,m):\n info_entry=Lines[k].split()\n rows.append(int(info_entry[0])-1)\n rows.append(int(info_entry[1])-1)\n columns.append(int(info_entry[1])-1)\n columns.append(int(info_entry[0])-1)\n entries.append(0.5*int(info_entry[2]))\n entries.append(0.5*int(info_entry[2]))\n A=csr_matrix((entries, (rows, columns)))\n return A\n \n \n \n\n\n\n\n\n\n\n","repo_name":"dsfranca/limitations_optimization_plots","sub_path":"import_sparse_maxcut.py","file_name":"import_sparse_maxcut.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"19228875932","text":"#Dictionary\n#_________________________________________________\n\ncar_0 = {'make':'toyota','model':'highlander','colour':'black'}\nprint(car_0['make'])\nprint(car_0['model'])\nprint(car_0,\"\\n\\n\")\n\n#________________________________________________\n\n#adding extra keys and value pairs to the dictionary\nstudent_0 = {\n 'name':'john hopkins',\n 'age':28,\n 'b_year': 1995,\n 'body_fit': 'athletic'\n }\nprint(f\"{student_0['name'].title()},\\\n is a student and was born in {student_0['b_year']}, \\\n he is {student_0['age']} year old.\")\nstudent_0['location'] = \"Chicago IL\"\nstudent_0['major'] = \"Computer Science\"\nprint(student_0,\"\\n\\n\")\n#________________________________________________\n\n#Creating empty dictionary\nstudent_1 = {}\nstudent_1['name'] = \"adam smith\"\nstudent_1['major'] = \"Economics\"\nstudent_1['age'] = 25\nprint(f\"Student name is {student_1['name'].title()}, he studies {student_1['major']} and he is {student_1['age']} year old\")\ndel student_1['age']\nprint(student_1)\nstudent_1['gender'] = 'Male'\nprint(student_1,\"\\n\\n\")\n\n#__________________________________________________\n\n#Method--> get() \neng_uzb = {\n 'apple':\"olma\",\n 'peach':\"nok\",\n 'melon':\"qovun\",\n 'orange': \"apelsin\",\n 'mulberry':\"tut\"\n }\nprint(\"Here is the English - Uzbek fruit dictioanry: \\n\",eng_uzb)\nmeva = eng_uzb.get('carrot',\"There is no such fruit.\")\nprint(meva, \"\\n\\n\")\n\n#___________________________________________________\n\n#AMALIYOT\n#____________________________________________________\n#1\nbrother = {'name' : 'aaron',\n 'b_year': 2004,\n 'age' : 19,\n 'l_place': \"UZB\"\n }\n\nprint(f\"Brother`s name is {brother['name'].title()},\\\n He was born in {brother['b_year']}\\\n he is living in {brother['l_place']}\\\n he is currently {brother['age']} years old.\\n\\n\")\n#____________________________________________________\n\n#2\nfamily = {\n 'father':\"steak\",\n 'mother':\"palov\",\n 'brother_1':\"noodles\",\n 'brother_2':\"somsa\",\n 'sister':\"soup\"\n }\nprint(\"This is the list of family member`s favorite food: \\n\",family,\"\\nNow will print only 3 meals that are mostly consumed: >>> \")\nprint(family['father'], family['brother_2'], family['sister'],\"\\n\\n\")\n\n#____________________________________________________\n\n#3\nterms = {\n 'print': \"consolega chiqarish\",\n 'if': \"agar\",\n 'else': \"unday bolsa\",\n 'float':\"o`nlik son\",\n 'int':\"butun son\",\n 'del': \"delete qilish\",\n '.append()': \"element qoshish\",\n '.title()':\"bosh xarfi katta bulsin\",\n '.upper()': \"barcha xarflar katta bulsin\",\n '.lowee()':\"barcha xarflar kichik bulsin\"\n }\nprint(\"These the basics what have been covered during this Python course:\\n\", terms,\"\\n\\n\")\n\n#____________________________________________________\n\n#4\nen_uz = {\n 'apple':\"olma\",\n 'peach':\"nok\",\n 'melon':\"qovun\",\n 'orange': \"apelsin\",\n 'mulberry':\"tut\"\n }\nkey = input(\"Write a fruit name please:\").lower()\nprint(en_uz.get(key,\"There is no such fruit on our dictionary\"))\n\nkey = input(\"\\nWrite a fruit name please:\").lower()\ntranslation = en_uz.get(key)\nif translation == None:\n print(\"There is no such fruit on our dictionary\")\nelse:\n print(f\"The fruit {key.title()} is translated into uzbek as {translation}\")\n\n\n\n\n\n\n\n\n\n\n","repo_name":"abdulqodirgo/python_lessons","sub_path":"Sariq Dev Python/Dars_5.1_Dictionary.py","file_name":"Dars_5.1_Dictionary.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30701048056","text":"# library imports\r\nfrom select import select\r\nimport socket\r\nimport sys\r\nimport threading\r\nimport time\r\n# local file imports\r\nimport client_auth as auth\r\n\r\n# constants\r\nMAX_USERS = 10000\r\n\r\ndef recv_handler():\r\n while True:\r\n data = client_socket.recv(2048).decode()\r\n code, response = data.split(':', 1)\r\n\r\n # logout request\r\n if code == 'response 22':\r\n logout(response)\r\n break\r\n\r\n # server message\r\n if code.startswith('response 1'):\r\n print(f'> {response} < <\\n> ', end='')\r\n # private request\r\n elif code.startswith('response 5'):\r\n if code == 'response 50':\r\n p2p_setup_send(response)\r\n elif code == 'response 51':\r\n print(f'> {response} < <\\n> ', end='')\r\n\r\ndef send_handler():\r\n while True:\r\n command = input('> ')\r\n # send private message\r\n if command.startswith('private '):\r\n try:\r\n user, message = command.split(' ', 2)[1:]\r\n if connections.get(user):\r\n data = f'private 10:{username} (private): {message}'\r\n connections[user]['send'].send(data.encode())\r\n else:\r\n print(f'> > Error. Private messaging to {user} not enabled < <')\r\n except ValueError:\r\n print('> > Error. Invalid usage of private < <')\r\n # stop private messaging\r\n elif command.startswith('stopprivate '):\r\n try:\r\n user = command.split(' ', 1)[1]\r\n if connections.get(user):\r\n data = f'private 11:{username}'\r\n connections[user]['send'].send(data.encode())\r\n print(f'> > Private messaging with {user} has been stopped < <')\r\n del connections[user]\r\n else:\r\n print(f'> > Error. There is no private with {user} to stop < <')\r\n except ValueError:\r\n print('> > Error. Invalid usage of stopprivate < <')\r\n # send server command\r\n else:\r\n data = f'request 20:{username}:{command}'\r\n client_socket.send(data.encode())\r\n\r\ndef p2p_handler():\r\n global connections\r\n\r\n while True:\r\n recv_conns = [conn['recv'] for conn in connections.values()]\r\n read_sockets = select(recv_conns, [], [])[0]\r\n\r\n for socket in read_sockets:\r\n # handle new peer connection\r\n if socket == p2p_socket:\r\n peer_conn, peer_addr = p2p_socket.accept()\r\n data = peer_conn.recv(2048).decode()\r\n\r\n status, data = data.split('|', 1)\r\n if status == 'response':\r\n connections[data.split('|')[0]] = {\r\n 'recv': peer_conn,\r\n 'send': socket_store\r\n }\r\n else:\r\n p2p_setup_recv(peer_conn, data)\r\n # peer sent data\r\n else:\r\n data = socket.recv(2048).decode()\r\n # force logout\r\n if not data:\r\n for user in connections:\r\n if connections[user] == socket:\r\n del connections[user]\r\n break\r\n continue\r\n\r\n code, response = data.split(':', 1)\r\n # data was message\r\n if code == 'private 10':\r\n print(f'> {response} < <\\n> ', end='')\r\n # data was stop request\r\n elif code == 'private 11':\r\n message = f'Private messaging with {response} has been stopped'\r\n print(f'> {message} < <\\n> ', end='')\r\n del connections[response]\r\n\r\ndef p2p_setup_recv(conn, data):\r\n global connections\r\n\r\n peer_user, peer_ip, peer_port = data.split('|')\r\n peer_socket = p2p_setup_port(peer_ip, peer_port)\r\n connections[peer_user] = {\r\n 'recv': conn,\r\n 'send': peer_socket\r\n }\r\n\r\n data = f'response|{username}|{ip}|{assigned_port}'\r\n peer_socket.send(data.encode())\r\n\r\n print(f'> {peer_user} started private messaging with you < <\\n> ', end='')\r\n\r\ndef p2p_setup_send(data):\r\n global connections\r\n global socket_store\r\n\r\n peer_user, peer_ip, peer_port = data.split('|')\r\n socket_store = p2p_setup_port(peer_ip, peer_port)\r\n\r\n data = f'request|{username}|{ip}|{assigned_port}'\r\n socket_store.send(data.encode())\r\n\r\n print(f'> You started private messaging with {peer_user} < <\\n> ', end='')\r\n\r\ndef p2p_setup_port(ip, port):\r\n peer_port = int(port) + MAX_USERS\r\n peer_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n peer_socket.connect((ip, peer_port))\r\n return peer_socket\r\n\r\ndef logout(message):\r\n global connections\r\n\r\n data = f'private 11:{username}'\r\n for user in [*connections.keys()]:\r\n if user != '!p2p':\r\n connections[user]['send'].send(data.encode())\r\n del connections[user]\r\n print(f'> {message} < <')\r\n client_socket.close()\r\n p2p_socket.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n # setup client to connect with server\r\n ip, port = sys.argv[1], int(sys.argv[2])\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client_socket.connect((ip, port))\r\n assigned_port = int(client_socket.recv(2048).decode())\r\n\r\n # authenticate with server\r\n username = auth.username(client_socket, assigned_port)\r\n if not username and not auth.password(client_socket, assigned_port):\r\n client_socket.close()\r\n exit()\r\n\r\n # setup socket to connect with peers\r\n p2p_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n p2p_socket.bind((ip, assigned_port + MAX_USERS))\r\n p2p_socket.listen(1)\r\n\r\n # global data\r\n connections = {'!p2p': {'recv': p2p_socket}}\r\n socket_store = None\r\n\r\n send_thread = threading.Thread(name='SendHandler', target=send_handler)\r\n send_thread.daemon = True\r\n send_thread.start()\r\n\r\n # start main recieving thread\r\n recv_thread = threading.Thread(name='RecvHandler', target=recv_handler)\r\n recv_thread.start()\r\n\r\n p2p_thread = threading.Thread(name='p2pHandler', target=p2p_handler)\r\n p2p_thread.daemon = True\r\n p2p_thread.start()\r\n","repo_name":"evan-t-lee/cs3331","sub_path":"assignment/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"1397091976","text":"from currency_converter import CurrencyConverter\r\nimport tkinter as tk\r\na = CurrencyConverter()\r\nroot = tk.Tk()\r\nroot.geometry(\"500x300\")\r\n\r\ndef clicked():\r\n amount = int(entry_1.get())\r\n currency_1 = entry_2.get()\r\n currency_2 = entry_3.get()\r\n data = a.convert(amount,currency_1,currency_2)\r\n label_5 = tk.Label(root,text=data).place(x = 230,y = 250)\r\n\r\nlabel_1 = tk.Label(root,text=\"CURRENCY CONVERTER\",font = \"calibri 20 bold\").place(x = 120,y = 10)\r\nlabel_2 = tk.Label(root,text=\"Enter amount: \",font = \"calibri\").place(x = 80,y = 60)\r\nentry_1 = tk.Entry(root)\r\n\r\nlabel_3 = tk.Label(root,text=\"Enter currency: \",font = \"calibri\").place(x = 80,y = 110)\r\nentry_2 = tk.Entry(root)\r\nlabel_4 = tk.Label(root,text=\"Enter required currency: \",font = \"calibri\").place(x = 80,y = 160)\r\nentry_3 = tk.Entry(root)\r\n\r\nbutton = tk.Button(root,text=\"click\",command=clicked).place(x = 230,y = 210)\r\nentry_1.place(x = 300,y = 65)\r\nentry_2.place(x = 300,y = 115)\r\nentry_3.place(x = 300,y = 165)\r\n\r\nroot.mainloop()\r\n","repo_name":"keerthi132000/currency-converter","sub_path":"Assignment.py","file_name":"Assignment.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5345573594","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 1 11:34:45 2018\n\n@author: munyeen.chong\n\"\"\"\n\nguests = ['first', 'second','third']\n#print(guests[2])\n\n#count from right (the first one)\n#print(guests[-1])\n\n#print('First value default :' + guests[0])\n\nguests[0] = 'Steve'\n#print('First value is now :' + guests[0])\n\nguests.append('New Guy')\n\n#print('New value is now :' + guests[-1])\n\n#print('2nd Element is :' + guests[1])\n\n#guests.remove('second')\n#del guests[1]\n\n#print('2nd Element After remove is :' + guests[1])\n\n#check the index position\n#print(guests.index('third'))\n\n#for step in range(len(guests)):\n# print(guests[step])\n \n#scores = [78,68,88,98,24]\n#print(scores[3])\n\n#count from right (the 2nd one)\n#print(scores[-2])\n\n\"\"\"\n#guests.sort()\n#\n#for guest in guests:\n# print(guest)\n# \n#print(\"Done\") \n\"\"\"\n\nscores = [78,68,88,98,24]\nscores.sort()\nfor score in scores:\n print(score)\n\n\n","repo_name":"munyeenC/munyeen-kl-oct18","sub_path":"arrays.py","file_name":"arrays.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38349955625","text":"import os\nimport re\nimport unicodedata\nimport numpy as np\nimport wisardpkg as wsd\nimport spacy\nfrom collections import defaultdict\nfrom scipy.stats import rankdata\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier\n\ndef std_text(text):\n txt = unicodedata.normalize('NFD', text)\n return u''.join(ch for ch in txt if unicodedata.category(ch) != 'Mn')\n\ndef genThermometer(db):\n therm = []\n for cid in range(np.shape(dbmatrix)[1]):\n column = db[:, cid].todense()\n ranking = rankdata(column, method=\"min\")\n unique_values = np.unique(ranking).tolist()\n therm.append(int(np.ceil(np.log2(len(unique_values)))))\n return therm\n\ndirectory = [\"Fake.br-Corpus/full_texts/fake/\", \"Fake.br-Corpus/full_texts/true/\"]\n\ndocuments = []\nlabels = []\ndoc_ent = []\ndicio = defaultdict(set)\nnlp = spacy.load(\"pt_core_news_sm\")\nfor dir_ in directory:\n for filename in os.listdir(dir_):\n if \".txt\" in filename:\n with open(dir_+filename, \"r\", encoding=\"utf-8\") as rdb: \n d = rdb.read()\n dner = nlp(d)\n #for ent in dner.ents:\n # dicio[ent.label_].add(ent.text)\n #doc_ent.append(\" \".join([ent.text for ent in dner.ents]))\n labels.append(dir_.split(\"/\")[-2])\n documents.append(dner)\n\n# words = [\" \".join([en.ent_type_ if en.ent_type_ in [\"PER\", \"ORG\", \"LOC\", \"MISC\"] else \"Other\" for en in d]) for d in documents]\nwords = [\" \".join([en.ent_type_ if en.ent_type_ in [\"PER\", \"ORG\", \"LOC\", \"MISC\"] else en.pos_ for en in d]) for d in documents]\n\n\nregex = re.compile('[^a-zA-Z]')\ncntvec = CountVectorizer(ngram_range=((1,5)))\ndbmatrix = cntvec.fit_transform(words)\nfeatures = cntvec.get_feature_names()\n\nprint(words[0])\nprint(len(features))\n\nskf = StratifiedKFold(n_splits=10, shuffle=True)\n\ndensemtx = dbmatrix.todense()\ntherm = genThermometer(dbmatrix)\n\nmins = np.min(densemtx, axis=0).squeeze().tolist()[0]\nmaxs = np.max(densemtx, axis=0).squeeze().tolist()[0]\n\ndtherm = wsd.DynamicThermometer(therm, mins, maxs)\n\nbinX = [dtherm.transform(densemtx[i].tolist()[0]) for i in range(dbmatrix.shape[0])]\nfor train_index, test_index in skf.split(dbmatrix, labels):\n for n in range(1, 11):\n win = n * 3\n print(\"TRAIN - \"+str(len(train_index)))\n print(\"TEST - \"+str(len(test_index)))\n ds_train = wsd.DataSet([binX[ix] for ix in train_index],[labels[ix] for ix in train_index])\n ds_test = wsd.DataSet([binX[ix] for ix in test_index], [labels[ix] for ix in test_index])\n\n wisard = wsd.ClusWisard(win, 0.7, 20, 10)\n wisard.train(ds_train)\n outTrain = np.array(wisard.classify(ds_train))\n outTest = np.array(wisard.classify(ds_test))\n print('Train accuracy:', accuracy_score(outTrain, [labels[ix] for ix in train_index]))\n print('Test accuracy:', accuracy_score(outTest, [labels[ix] for ix in test_index]))\n\n wisard = wsd.ClusWisard(win, 0.7, 20, 10)\n wisard.train(ds_test)\n outTrain = np.array(wisard.classify(ds_train))\n outTest = np.array(wisard.classify(ds_test))\n print('Train accuracy:', accuracy_score(outTrain, [labels[ix] for ix in train_index]))\n print('Test accuracy:', accuracy_score(outTest, [labels[ix] for ix in test_index]))\n\n svm = SVC()\n svm.fit(dbmatrix[train_index], [labels[ix] for ix in train_index])\n res = svm.predict(dbmatrix[test_index])\n print(\"SVM-TRAIN\")\n print(accuracy_score(res, [labels[ix] for ix in test_index]))\n svm = SVC()\n svm.fit(dbmatrix[test_index], [labels[ix] for ix in test_index])\n res = svm.predict(dbmatrix[train_index])\n print(\"SVM-TEST\")\n print(accuracy_score(res, [labels[ix] for ix in train_index]))\n\n gbc = GradientBoostingClassifier()\n gbc.fit(dbmatrix[train_index], [labels[ix] for ix in train_index])\n res = gbc.predict(dbmatrix[test_index])\n print(\"GBC-TRAIN\")\n print(accuracy_score(res, [labels[ix] for ix in test_index]))\n gbc = GradientBoostingClassifier()\n gbc.fit(dbmatrix[test_index], [labels[ix] for ix in test_index])\n res = gbc.predict(dbmatrix[train_index])\n print(\"GBC-TEST\")\n print(accuracy_score(res, [labels[ix] for ix in train_index]))\n \n rdf = RandomForestClassifier()\n rdf.fit(dbmatrix[train_index], [labels[ix] for ix in train_index])\n res = rdf.predict(dbmatrix[test_index])\n print(\"RDF-TRAIN\")\n print(accuracy_score(res, [labels[ix] for ix in test_index]))\n\n rdf = RandomForestClassifier()\n rdf.fit(dbmatrix[test_index], [labels[ix] for ix in test_index])\n res = rdf.predict(dbmatrix[train_index])\n print(\"RDF-TEST\")\n print(accuracy_score(res, [labels[ix] for ix in train_index]))\n","repo_name":"marcosspalenza/FakeNewsDetection","sub_path":"WNN-UFRJ/fakeBR.py","file_name":"fakeBR.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"35702312272","text":"from django.urls import path\nfrom .views import HomePageView, InfoPageView, AboutMenuPageView, ProjectsMenuPageView\nfrom . import views\n\nurlpatterns = [\n path('', HomePageView.as_view(), name=\"home\"),\n path('info/', InfoPageView.as_view(), name=\"info\"),\n path('menu_about/', AboutMenuPageView.as_view(), name=\"menu_about\"),\n path('menu_projects/', ProjectsMenuPageView.as_view(), name=\"menu_projects\"),\n path('cmd_display/', views.cmdDisplayView, name=\"cmd_display\"),\n path('city3D_display/', views.city3DDisplayView, name=\"city3D_display\"),\n path('timeLine_display/', views.timeLineDisplayView, name=\"timeLine_display\"),\n path('techTree_display/', views.techTreeDisplayView, name=\"techTree_display\"),\n]","repo_name":"Jotamontiel/mysocialdistanceworkdir","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30890537031","text":"import os\nimport torch\nfrom torch import nn\nimport torch_mlu.core.mlu_model as ct\nos.environ['TORCH_MIN_CNLOG_LEVEL'] = '-1'\n\n# cnnl test\nx = torch.randn((1,2,3,3), dtype=torch.float, requires_grad=True)\ny = torch.randn((1,2,3,3), dtype=torch.float, requires_grad=True)\nx_mlu = x.to(ct.mlu_device())\ny_mlu = y.to(ct.mlu_device())\nres = x_mlu + y_mlu\n\nmodel = nn.AvgPool2d(2).train().float()\nmodel.to(ct.mlu_device())\nres = model(x_mlu)\ngrad_mlu = torch.randn((res.shape), dtype=torch.float).to(ct.mlu_device())\nres.backward(grad_mlu)\n\nres = torch.clamp(x_mlu, min=-1.0, max=1.0)\nres = torch.cat([x_mlu, y_mlu], dim=0)\n\nx = torch.randn((1,3,112,112), dtype=torch.float, requires_grad=True)\nx_mlu = x.to(ct.mlu_device())\nmodel = nn.Conv2d(3, 16, kernel_size=3).to(ct.mlu_device())\nres = model(x_mlu)\nmodel = nn.Conv2d(3, 16, kernel_size=3, bias=False).to(ct.mlu_device())\nres = model(x_mlu)\n","repo_name":"Cambricon/catch","sub_path":"test/data/cnlog/cnlog_cnnl.py","file_name":"cnlog_cnnl.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"12"} +{"seq_id":"40072991999","text":"# -*- coding:utf-8 -*-\nimport csv\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport numpy as np\nfrom matplotlib import pyplot\nimport matplotlib.pylab as plt\n\n#create a dataframe 'ts && Convert ts['date'] from string to datetime. You can use ts.index.\n\n#method 1 write data directly\n\nts = [\n {'date': '2016-05-01 10:23:05.069722', 'tick_numbers': 3213},\n {'date': '2016-05-01 10:23:05.119994', 'tick_numbers': 4324},\n {'date': '2016-05-02 10:23:05.178768', 'tick_numbers': 2132},\n {'date': '2016-05-02 10:23:05.230071', 'tick_numbers': 43242},\n {'date': '2016-05-02 10:23:05.230071', 'tick_numbers': 4234},\n {'date': '2016-05-02 10:23:05.280592', 'tick_numbers': 4324},\n {'date': '2016-05-03 10:23:05.332662', 'tick_numbers': 4324},\n {'date': '2016-05-03 10:23:05.385109', 'tick_numbers': 1245},\n {'date': '2016-05-04 10:23:05.436523', 'tick_numbers': 1555},\n {'date': '2016-05-04 10:23:05.486877', 'tick_numbers': 543345},\n]\ndef time_format(x):\n dt = datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')\n minute = (dt.minute // 15) * 15\n return datetime(dt.year, dt.month, dt.day, dt.hour, minute, dt.second, dt.microsecond) + timedelta(minutes=15)\n\nts = pd.DataFrame(ts).fillna(0)\nts['date'] = ts['date'].apply(time_format)\nprint (ts)\n\n#method 2 write a csv datafile\ndatafile = file('ts.csv', 'wb')\nwriter = csv.writer(datafile)\nwriter.writerow(['date', 'tick_numbers'])\ndata = [\n ('2016-05-01 10:23:05.069722', 3213),\n ('2016-05-01 10:23:05.119994', 4324),\n ('2016-05-02 10:23:05.178768', 2132),\n ('2016-05-02 10:23:05.230071', 43242),\n ('2016-05-02 10:23:05.230071', 4234),\n ('2016-05-02 10:23:05.280592', 4324),\n ('2016-05-03 10:23:05.332662', 4324),\n ('2016-05-03 10:23:05.385109', 1245),\n ('2016-05-04 10:23:05.436523', 1555),\n ('2016-05-04 10:23:05.486877', 543345)\n]\nwriter.writerows(data)\ndatafile.close()\ndateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d %H:%M:%S.%f')\nts = pd.read_csv('ts.csv',index_col='date',date_parser=dateparse)\nprint(ts.head())\nprint(ts.head().index)\n\n#Delete useless column with the command del\nfor col in ts.columns:\n if 'Unnamed' in col:\n del ts[col]\nprint(ts)\n\n#Print all data from 2016\nprint (ts['2016'])\n\n#Print all data from May 2016\nprint (ts['2016-05':])\n\n#Data after May 3rd, 2016\nprint (ts['2016-05-03':])\n\n#Remove all the data after May 2nd, 2016 using truncate\nprint(ts.truncate(after='2016-05-02'))\n\n#Count the number of data per timestamp\nprint(ts.index.value_counts().sort_index())\n\n#Mean value of ticks per day. You will use resample with a period of D and a method of mean.\nmean = ts.resample('D').mean()\nprint (mean)\n\n#Total value ticks per day. You will use sum and a period of D\ntotal = ts.resample('D').sum()\nprint(total)\n\n#Plot of the total of ticks per day\nper = ts.resample('D').sum()\nplt.plot(per)\nplt.show()\n#Create another dataframe\nidx = pd.date_range('4/1/2012', '6/1/2012')\ndf = pd.DataFrame({'ARCA': np.random.randint(low=20000,high=30000,size=62),\n 'BARX': np.random.randint(low=20000,high=30000,size=62)},\n index=idx)\nprint(df)\n\n#Truncate the dataframe to get data (before='2012-04-04',after='2012-05-24'),Change the offset of the dataframe by pd.DateOffset(months=1, days=1)\ndf=df.truncate(before='2012-04-04',after='2012-05-24')\ndf.index += pd.DateOffset(months=1, days=1)\nprint(df.head())\n\n#Shift the dataframe by 1 day\nprint(df.shift(1).head())\n\n#Lag a variable 1 day\nprint(df['ARCA'].shift(-1).head())\n\n#Aggregate into 2W-SUN (bi-weekly starting by Sunday) by summing up the value of each daily volumw\nprint(df.resample('2W-SUN').sum())\n\n#Aggregate into weeks by averaging up the value of each daily volume\nprint(df.resample('2W-SUN').mean())","repo_name":"EmmaSRH/Timeseries","sub_path":"work2.py","file_name":"work2.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"14681917998","text":"#match DNA sequence\n\nimport csv\nimport sys\nimport random\n\ndef main():\n if len(sys.argv) != 3:\n sys.exit(\"Usage: python dna.py data.csv sequence.txt\")\n\n #read database\n da = sys.argv[1]\n cnt = 0 #number of people in the data\n idx = 0 #number of dna sections\n section = []\n with open(da) as data:\n reader1 = csv.reader(data)\n for row1 in reader1:\n idx = len(row1)-1\n if cnt == 0:\n for idd in range(1, len(row1)):\n section.append(row1[idd])\n cnt += 1\n cnt -= 1\n names = [[] * (idx+1) for i in range(cnt)]\n cnt = 0\n data.close()\n with open(da) as dat:\n reader = csv.reader(dat)\n next(reader)\n for row in reader:\n names[cnt].append(row[0])\n for i in range(1, len(row)):\n names[cnt].append(row[i])\n cnt += 1\n\n #read dna sequence\n dna = []\n seq = sys.argv[2]\n with open(seq) as sequence:\n reader2 = csv.reader(sequence)\n for row2 in reader2:\n dna.append(row2[0])\n\n dnacount = []\n for i in range(0, idx):\n dnacount.append(find(dna[0], section[i]))\n #print(section[i])\n #print(dna[0])\n\n signal = False\n for i in range(0, cnt):\n if check(names[i], idx, dnacount):\n signal = True\n print(names[i][0])\n break\n if signal == False:\n print(\"No match\")\n\n\ndef find(DNA, STR):\n \"\"\"returns the maximum number of times that the STR consecutively repeats\"\"\"\n maxi = 0\n tmp = 0\n slen = len(STR)\n for i in range(len(DNA)):\n for j in range(slen):\n #print(DNA[i+j])\n if (i+j) < len(DNA) and DNA[i+j] != STR[j]:\n if tmp > maxi:\n maxi = tmp\n tmp = 0\n break\n tmp += 1\n i += slen\n if tmp > maxi:\n maxi = tmp\n return maxi\n\n\ndef check(names, idx, dnacount):\n \"\"\"returns if the sequence matches the person\"\"\"\n for i in range(1, idx + 1):\n if int(names[i]) != dnacount[i-1]:\n return False\n return True\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"jsdhwdmaL/Harvard-CS50-Projects","sub_path":"week6_python/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"13830748105","text":"\"\"\" From \"COMPUTER PROBLEMS in PHYSICS\"\r\n by RH Landau and MJ Paez\r\n Copyright R Landau, Oregon State Unv, MJ Paez, Univ Antioquia, 2020. \r\n Please respect copyright & acknowledge our work.\"\"\"\r\n\r\n# Wormhole.py: Symbolic evaluation of wormhole derivative\r\n\r\nfrom sympy import *\r\nL, x, M, rho, a, r, lp = symbols('L x M rho a r lp')\r\nx = (2 * L - a) / (pi * M)\r\nr = rho + M * (x * atan(x) - log(1 + x * x) / 2)\r\ndrdL = diff(r, L)\r\nprint('drdL(raw) = ', drdL)\r\ndrdL = simplify(drdL)\r\nprint(' And finally! dr/dL (simplified)=', drdL)\r\n","repo_name":"tnakaicode/OregoneState-CP","sub_path":"MatPlotLibCodes/Wormhole.py","file_name":"Wormhole.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21470603661","text":"import arrow\n\nfrom .stats_base import StatsBase\n\n\nclass DailyItemCollector(StatsBase):\n def __init__(self, date=None):\n if not date:\n self.date = arrow.now('Asia/Shanghai')\n else:\n self.date = arrow.get(date)\n\n def get_items(self, date=None):\n if not date:\n date = self.date\n\n code = 'iqg_prod.hsq_daily_items.find({\"date\": \"'\\\n + date.format('YYYY-MM-DD') \\\n + '\"})'\n\n result = self.get_mongo_result('iqg_mongo', code)\n if result:\n result = result[0]\n return result\n\n def get_diff(self, date=None):\n if not date:\n date = self.date\n before_date = date.replace(days=-1)\n\n after = self.get_items(date)\n before = self.get_items(before_date)\n\n if not after or not before:\n raise self.AppError('NO_DATA')\n\n after_ids = [i['item_id'] for i in after['item_data']]\n before_ids = [i['item_id'] for i in before['item_data']]\n\n result = {'online': [], 'offline': []}\n result['info'] = \"{} [ {} ] & {} [ {} ]\"\\\n \"\".format(after['item_count'],\n date.format('MM-DD'),\n before['item_count'],\n before_date.format('MM-DD'))\n\n online = set(after_ids) - set(before_ids)\n offline = set(before_ids) - set(after_ids)\n\n for item in after['item_data']:\n if item['item_id'] in online:\n result['online'].append(item)\n\n for item in before['item_data']:\n if item['item_id'] in offline:\n result['offline'].append(item)\n\n return result\n","repo_name":"Larryrun80/iqg_stats_new","sub_path":"stats/models/dailyitem.py","file_name":"dailyitem.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40675414897","text":"import unittest\n\nimport os\n\nimport configparser\n\nfrom atsim import pro_fit\nfrom . import testutil\n\n\ndef _getResourceDir():\n return os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"resources\",\n \"regex_evaluator\",\n )\n\n\nclass RegexEvaluatorTestCase(unittest.TestCase):\n def testEvaluator(self):\n \"\"\"Test atsim.pro_fit.evaluators.RegexEvaluator\"\"\"\n parser = configparser.ConfigParser()\n parser.optionxform = str\n with open(\n os.path.join(_getResourceDir(), \"job_files\", \"job.cfg\")\n ) as infile:\n parser.read_file(infile)\n\n evaluator = pro_fit.evaluators.RegexEvaluator.createFromConfig(\n \"regex\", _getResourceDir(), parser.items(\"Evaluator:regex\")\n )\n\n job = pro_fit.jobfactories.Job(None, _getResourceDir(), None)\n\n extractExpect = {\n \"first\": 1.234,\n \"second\": 5.678,\n \"third\": 9.1011,\n \"fourth\": 5.878,\n }\n\n evaluated = evaluator(job)\n actual = dict([(e.name, e.extractedValue) for e in evaluated])\n testutil.compareCollection(self, extractExpect, actual)\n\n meritExpect = {\n \"first\": ((1.234 - 10.0) ** 2) ** 0.5,\n \"second\": 2.0 * ((5.678 - 10.0) ** 2) ** 0.5,\n \"third\": 2.0 * ((9.1011 - 10.0) ** 2) ** 0.5,\n \"fourth\": ((5.878 - 10.0) ** 2) ** 0.5,\n }\n\n evaluated = evaluator(job)\n testutil.compareCollection(self, extractExpect, actual)\n actual = dict([(e.name, e.meritValue) for e in evaluated])\n testutil.compareCollection(self, meritExpect, actual)\n","repo_name":"mjdrushton/potential-pro-fit","sub_path":"tests/test_regexevaluator.py","file_name":"test_regexevaluator.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"70486322901","text":"import lib\nfrom autogluon.utils.tabular.ml.models.lr.lr_model import LinearModel\nfrom preprocessing_utils.featureGenerator import AutoMLFeatureGenerator\nimport numpy as np\nfrom data_config.data_config import load_data, data_config\nfrom sklearn.model_selection import StratifiedKFold\nimport torch, torch.nn as nn\nimport torch.nn.functional as F\nimport pickle\nfrom qhoptim.pyt import QHAdam\nfrom lib.utils import check_numpy, process_in_chunks\nfrom lib.nn_utils import to_one_hot\nfrom sklearn.metrics import roc_auc_score\n\n\ndef predict_logits(self, X_test, device, batch_size=512):\n X_test = torch.as_tensor(X_test, device=device)\n self.model.train(False)\n with torch.no_grad():\n logits = F.softmax(process_in_chunks(self.model, X_test, batch_size=batch_size), dim=1)\n logits = check_numpy(logits)\n return logits\n\n\nif __name__ == '__main__':\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n hours = 2 # 2 hours for training\n res = []\n for data_name, d in data_config.items():\n\n # split dataset train/test = 0.7:0.3\n X_train, X_test, y_train, y_test = load_data(data_name, combine_y=False, split_seed=2020, test_size=0.3)\n\n # general feature generator;\n feature_generator = AutoMLFeatureGenerator()\n\n print(\"#\"*50, 'training set preprocessing')\n X_train = feature_generator.fit_transform(X_train, drop_duplicates=False)\n print(\"#\" * 50, 'testing set preprocessing')\n X_test = feature_generator.transform(X_test)\n\n feature_types_metadata = feature_generator.feature_types_metadata\n\n problem_type = 'binary'\n path = f'LR-{data_name}'\n name = 'Onehot'\n eval_metric = 'roc_auc'\n stopping_metric = 'roc_auc'\n\n lr = LinearModel(problem_type=problem_type, path=path, name=name, eval_metric=eval_metric,\n feature_types_metadata=feature_types_metadata)\n\n hyperparams = lr.params.copy()\n\n X_train = lr.preprocess(X_train, is_train=True, vect_max_features=hyperparams['vectorizer_dict_size'],\n model_specific_preprocessing=True)\n X_test = lr.preprocess(X_test, is_train=False, vect_max_features=hyperparams['vectorizer_dict_size'],\n model_specific_preprocessing=True)\n\n y_train = y_train.values\n y_test = y_test.values\n\n # X_train = X_train.toarray()\n # X_test = X_test.toarray()\n\n skf = StratifiedKFold(shuffle=True, random_state=0)\n\n test_pred = np.zeros(shape=len(y_test))\n\n test_logits = None\n\n for train_index, val_index in skf.split(X_train, y_train):\n X_tr = X_train[train_index]\n y_tr = y_train[train_index]\n X_val = X_train[val_index]\n y_val = y_train[val_index]\n\n data = lib.Dataset(dataset=None, random_state=0, X_train=X_tr, y_train=y_tr, X_valid=X_val, y_valid=y_val, X_test=X_test, y_test=y_test)\n\n num_features = data.X_train.shape[1]\n num_classes = len(set(data.y_train))\n\n model = nn.Sequential(\n lib.DenseBlock(num_features, layer_dim=216, num_layers=1, tree_dim=num_classes + 1,\n flatten_output=False,\n depth=6, choice_function=lib.entmax15, bin_function=lib.entmoid15),\n lib.Lambda(lambda x: x[..., :num_classes].mean(dim=-2)),\n ).to(device)\n\n model = model.float()\n\n with torch.no_grad():\n res = model(torch.as_tensor(data.X_train[:1000], device=device).float())\n # trigger data-aware init\n\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n optimizer_params = {'nus': (0.7, 1.0), 'betas': (0.95, 0.998)}\n\n trainer = lib.Trainer(\n model=model, loss_function=F.cross_entropy,\n experiment_name=data_name + '5',\n warm_start=False,\n Optimizer=QHAdam,\n optimizer_params=optimizer_params,\n verbose=True,\n n_last_checkpoints=5\n )\n\n loss_history, err_history = [], []\n best_val_err = 1.0\n best_step = 0\n early_stopping_rounds = 10_000\n report_frequency = 100\n\n for batch in lib.iterate_minibatches(data.X_train, data.y_train, batch_size=512,\n shuffle=True, epochs=float('inf')):\n metrics = trainer.train_on_batch(*batch, device=device)\n\n loss_history.append(metrics['loss'])\n\n if trainer.step % report_frequency == 0:\n trainer.save_checkpoint()\n trainer.average_checkpoints(out_tag='avg')\n trainer.load_checkpoint(tag='avg')\n err = trainer.evaluate_classification_error(\n data.X_valid, data.y_valid, device=device, batch_size=1024)\n\n if err < best_val_err:\n best_val_err = err\n best_step = trainer.step\n trainer.save_checkpoint(tag='best')\n\n err_history.append(err)\n trainer.load_checkpoint() # last\n trainer.remove_old_temp_checkpoints()\n\n # clear_output(True)\n # plt.figure(figsize=[12, 6])\n # plt.subplot(1, 2, 1)\n # plt.plot(loss_history)\n # plt.grid()\n # plt.subplot(1, 2, 2)\n # plt.plot(err_history)\n # plt.grid()\n # plt.show()\n print(\"Loss %.5f\" % (metrics['loss']))\n print(\"Val Error Rate: %0.5f\" % (err))\n\n if trainer.step > best_step + early_stopping_rounds:\n print('BREAK. There is no improvment for {} steps'.format(early_stopping_rounds))\n print(\"Best step: \", best_step)\n print(\"Best Val Error Rate: %0.5f\" % (best_val_err))\n break\n trainer.load_checkpoint(tag='best')\n logits = predict_logits(data.X_test, device=device, batch_size=1024)\n if test_logits is None:\n test_logits = logits\n else:\n test_logits += logits\n test_logits /= 5\n auc = roc_auc_score(check_numpy(to_one_hot(y_test)), test_logits)\n print(data_name, auc)\n res[data_name] = auc\n\n print(res)\n with open(\"node.pickle\", \"wb\") as f:\n pickle.dump(res, f)\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Haozhuai/Automl-benchmarks","sub_path":"node_benchmarks.py","file_name":"node_benchmarks.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19838005083","text":"import cv2\nimport os\nimport numpy as np\nfrom PIL import Image\n\nclass Train:\n def __init__(self):\n self.path = './img/vip_customer'\n self.detector = cv2.CascadeClassifier('./FaceRecognition/haarcascade_frontalface_default.xml')\n self.recognizer = cv2.face.LBPHFaceRecognizer_create()\n \n def training(self, name):\n imagePath = os.path.join(self.path, name)\n faceSamples = []\n ids = []\n \n for i in range(1, 46):\n try:\n PIL_img = Image.open(f'{imagePath} ({i}).png').convert('L')\n img_numpy = np.array(PIL_img, 'uint8')\n\n path = os.path.split(f'{imagePath} ({i}).png')[-1].split('.')[0].split(' ')\n id = int(path[len(path) - 1].lstrip('(').rstrip(')'))\n faces = self.detector.detectMultiScale(img_numpy)\n \n for (x, y, w, h) in faces:\n faceSamples.append(img_numpy[y:y+h, x:x+w])\n ids.append(id)\n except:\n pass\n \n self.recognizer.train(faceSamples, np.array(ids))\n self.recognizer.write(f'./Classifiers/{name}.xml')","repo_name":"thevu29/SneakerShop","sub_path":"FaceRecognition/Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21807332394","text":"import pandas as pd\nimport datetime\nimport os\nimport numpy as np\n\nF1 = ['F03', 'F04', 'F05', 'F06', 'F07']\nF2 = ['F01', 'F02', 'F08', 'F09', 'F10']\n\naddr = \"/srv/project_data/EMR/able_data/\"\nable_file_list = os.listdir(addr)\n\nable = pd.DataFrame()\nfor able_file in able_file_list:\n able_xlsx = pd.read_excel(addr+able_file, engine='openpyxl')\n able_xlsx['로젯코드'] = [able_file.split(\"_\")[1] for data in range(len(able_xlsx))]\n able = pd.concat([able, able_xlsx])\n\nable['HTN'] = np.zeros(shape=(len(able),), dtype=np.int8)\nhtn_keys = [key for key in able.keys() if len(able[able[key] == 'HTN']) != 0]\ndupl_values = able['연구번호'][able.duplicated(['연구번호'])].unique()\ndate_values = [key for key in able.keys() if key.find('날짜') != -1]\n\ndupl_htn = able[(able['연구번호'].isin(dupl_values)) & ((able[htn_keys[0]] == 'HTN') | (able[htn_keys[1]] == 'HTN') | (able[htn_keys[2]] == 'HTN') | (able[htn_keys[3]] == 'HTN') | (able[htn_keys[4]] == 'HTN'))]\nable['검사일자'] = np.zeros(shape=(len(able),), dtype=np.string_)\nable['검사일자'][:] = np.nan\nable = able.reset_index(drop=True)\nfor i in date_values:\n temp_array = able[pd.isnull(able[i])==False].index\n able['검사일자'][temp_array] = able[i][temp_array]\nable['검사일자'] = pd.to_datetime(able['검사일자'], format='%Y/%m/%d')\nable['수술일자'] = pd.to_datetime(able['수술일자'], format='%Y%m%d')\nable = able.drop_duplicates(['연구번호', '수술일자'])\nable['HTN'][able['연구번호'].isin(dupl_htn['연구번호'].unique()) & (able['검사일자'] <= able['수술일자'])] = 1\nable['HTN'][(able[htn_keys[0]] == 'HTN') | (able[htn_keys[1]] == 'HTN') | (able[htn_keys[2]] == 'HTN') | (able[htn_keys[3]] == 'HTN') | (able[htn_keys[4]] == 'HTN')] = 1\nable = able.reset_index(drop=True)\n\nable.to_excel('/srv/project_data/EMR/jy/able/202300110_ABLE.xlsx', index=False)\n\n# all_able = pd.read_excel('/srv/project_data/EMR/20230105_All.xlsx', engine='openpyxl')\n# all_able = all_able[(all_able['연구번호'].isin(able['연구번호'])) & ((all_able[htn_keys[0]] == 'HTN') | (all_able[htn_keys[1]] == 'HTN') | (all_able[htn_keys[2]] == 'HTN') | (all_able[htn_keys[3]] == 'HTN') | (all_able[htn_keys[4]] == 'HTN'))]\n# all_able = all_able.unique()\n# test_able = able.copy()\n# test_able['HTN'][test_able['연구번호'].isin(all_able)] = 1\n# print( len(test_able[test_able['HTN'] == 1]), len(able[able['HTN'] == 1]) )\n# ## 결과 변함 없음 왜일까?\n#\n# able.to_excel('/srv/project_data/EMR/jy/able/202300106_ABLE.xlsx', index=False)\n#\n# sr_info = pd.read_excel('/srv/project_data/EMR/new_data/20220426_ANS_이상욱_2021-1352_(1.마취기본)_3차(7.7).xlsx', engine='openpyxl')\n#\n# sr_info.keys()\n#\n# len(able)\n# len(able[able['HTN'] == 1])\n#\n#\n# ###################### Test\n# able['F:간호정보조사지(일반성인) S:기본 E:과거력 Q:과거력 V:comment'][((able['F:간호정보조사지(일반성인) S:기본 E:과거력 Q:과거력 V:comment'].str.contains('HTN')) | able['F:간호정보조사지(일반성인) S:기본 E:과거력 Q:과거력 V:comment'].str.contains('혈압')) & (able['HTN'] == 0)]\n#\n# test = pd.read_excel('/srv/project_data/EMR/new_data/211015_ANS_이상욱_2021-1352_1차(3.14).xlsx', engine='openpyxl', sheet_name='대상환자&1')\n# test['사망여부'] = np.zeros(shape=(len(test),), dtype=np.int8)\n# test['사망여부'][(pd.isnull(test['암등록사망일자']) == False) | (pd.isnull(test['원내사망일자'] == False))] = 1\n# test = test[pd.isnull(test['최종퇴원일자']) == False]\n# test = test[test['최종퇴원일자'] <= 20550000]\n# test['최종내원일자'] = pd.to_datetime(test['최종내원일자'], format='%Y%m%d')\n# test['최종퇴원일자'] = pd.to_datetime(test['최종퇴원일자'], format='%Y%m%d')\n# test['재원기간'] = test['최종퇴원일자'] - test['최종내원일자']\n#\n# pd.crosstab(test['사망여부'])\n#\n# import matplotlib.pyplot as plt\n#\n# plt.figure(figsize=(12,8))\n# plt.plot(test['재원기간(일)'],test['사망여부'])\n# plt.show()\n#\n# plt.hist(test['사망여부'],test['재원기간(일)'])\n#\n# test = test.sort_values('재원기간(일)').reset_index(drop=True)\n#\n# test['재원기간(일)']\n#\n#\n# pd.crosstab(test['재원기간(일)'], test['사망여부'])\n# plt.show()\n#\n# test['재원기간(일)'] = test['재원기간'].astype(str).str.extract(r'(\\d+)').astype(int)\n#\n#\n# import seaborn as sns\n# plt.hist(test['사망여부'])\n# plt.show()\n#\n#\n# plt.figure(figsize=(12,8))\n# plt.title(label=\"??\")\n# sns.countplot(x='재원기간(일)', data=test)\n# plt.xlabel('hospitalization period')\n# plt.show()\n# np.mean(test['재원기간(일)'])\n# np.median(test['재원기간(일)'])\n#\n# len(test[(test['재원기간(일)'] > 9) & (test['재원기간(일)'] < 100)])\n# len(test[test['재원기간(일)'] >= 100])\n# len(test)\n# np.percentile(test['재원기간(일)'], [0, 12.5, 25, 37.5, 50, 62.5, 75, 87.5, 100])","repo_name":"sacross93/biosignalresearch","sub_path":"Master's Thesis/Pre-induction_BP/able_merge.py","file_name":"able_merge.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30391558168","text":"\n'''\nset is a really useful data structure. sets behave mostly like lists with the distinction\nthat they can not contain duplicate values. It is really useful in a lot of cases. For\ninstance you might want to check whether there are duplicates in a list or not. You\nhave two options. The first one involves using a for loop. Something like this:\n\n'''\n# First method for remove duplicate in list\nsome_list = ['a', 'b', 'c', 'b', 'd', 'm', 'n', 'n']\nduplicates =[]\nfor value in some_list:\n if some_list.count(value)>1:\n if value not in duplicates:\n duplicates.append(value)\n\n# print(f'Duplicates: ,{duplicates}')\n#output-- Duplicates: ,['b', 'n']\n\n\n# Second method through set\nsome_list = ['a', 'b', 'c', 'b', 'd', 'm', 'n', 'n']\nduplicates1 = set([x for x in some_list if some_list.count(x) > 1])\nprint(duplicates1)\n# Output: set(['b', 'n'])\n\n# duplicates1 = list(set(some_list))\n\n# Set have bulit in method 1. intersect 2. Difference\n# 1. Intersect\n\nvalid = set(['yellow', 'red', 'blue', 'green', 'black'])\ninput_set = set(['red', 'brown'])\nprint(f' intersect - {input_set.intersection(valid)}')\n\n# 2. Difference \nprint(f' Difference - {input_set.difference(valid)}')\n# output intersect - {'red'} , Difference - {'brown'}\n\n","repo_name":"mst101097/DSA_ProblemSolving_With_Python","sub_path":"PythonTestCase/Set_Data_Structure.py","file_name":"Set_Data_Structure.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"31894334743","text":"import pygame\nimport sys\nimport random\n\n#CONSTANTES\nANCHO = 800\nALTO = 600\ncolor_negro = (0,0,0)\n\ns = 50\n\n# ENEMIGO \nenemigo = pygame.image.load(\"img/hehe.png\")\nenemigo = pygame.transform.scale(enemigo, (s,s))\n\n\n# JUGADOR\njugador_pos = [ ANCHO/ 2, ALTO - s * 2]\n\n# ENEMIGOS\neSize = 50\nenemigo_pos = [random.randint(0, ANCHO- eSize),0]\n\n\n# VENTANA\nventana = pygame.display.set_mode((ANCHO,ALTO))\n\n\ngame_over = False\nclock = pygame.time.Clock()\n\n# FUNCIONES \ndef dectectar_colision(jugador_pos,enemigo_pos):\n jx = jugador_pos[0]\n jy = jugador_pos[1]\n ex = enemigo_pos[0]\n ey = enemigo_pos[1]\n\n if (ex >= jx and ex <(jx + s)) or (jx >= ex and jx < (ex + eSize)):\n if (ey >= jy and ey <(jy + s)) or (jx >= ey and jx < (ey + eSize)):\n return True\n return False\n\n\nwhile not game_over:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n if event.type == pygame.KEYDOWN:\n x = jugador_pos[0]\n if event.key == pygame.K_LEFT:\n x -= s\n if event.key == pygame.K_RIGHT:\n x += s\n\n jugador_pos[0] = x\n\n ventana.fill(color_negro)\n\n if enemigo_pos[1] >= 0 and enemigo_pos[1] < ALTO:\n enemigo_pos[1] += 20\n else:\n enemigo_pos[0] = random.randint(0, ANCHO - eSize)\n enemigo_pos[1] = 0\n\n # COLISIONES\n if dectectar_colision(jugador_pos,enemigo_pos):\n game_over = True\n\n\n # ENEMIGO \n ventana.blit(enemigo,(enemigo_pos[0],enemigo_pos[1],eSize,eSize))\n\n # JUGADOR \n imagen = pygame.image.load(\"img/comecome.png\")\n ventana.blit(imagen,(jugador_pos[0],jugador_pos[1],s,s))\n\n clock.tick(20)\n pygame.display.update()","repo_name":"Alucarxd/juegoPython","sub_path":"bloque.py","file_name":"bloque.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32639830581","text":"import numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport iris\nimport iris.plot as iplt\nimport cartopy.crs as ccrs\nimport os\nimport warnings\n\nfrom .ffff import nanMask_, kde__, flt_, flt_l, isIter_, rpt_, ind_inRange_\nfrom .cccc import y0y1_of_cube, extract_period_cube\n\n\n__all__ = ['aligned_cb_',\n 'aligned_tx_',\n 'annotate_heatmap',\n 'axColor_',\n 'axVisibleOff_',\n 'ax_move_',\n 'axs_abc_',\n 'axs_move_',\n 'axs_rct_',\n 'axs_shrink_',\n 'bp_cubeL_eval_',\n 'bp_dataLL0_',\n 'bp_dataLL1_',\n 'bp_dataLL_',\n 'cdf_iANDe_',\n 'distri_swe_',\n 'get_1st_patchCollection_',\n 'geoTkLbl_',\n 'hatch_cube',\n 'heatmap',\n 'hspace_ax_',\n 'init_fig_',\n 'imp_',\n 'imp_eur_',\n 'imp_ll_',\n 'imp_swe_',\n 'pdf_iANDe_',\n 'pstGeoAx_',\n 'ts_eCube_',\n 'wspace_ax_']\n\n\ndef init_fig_(\n fx=12,\n fy=6,\n h=0.075,\n w=0.075,\n t=0.98,\n b=0.075,\n l=0.075,\n r=0.98,\n ):\n fig = plt.figure(figsize=(fx, fy))\n fig.subplots_adjust(\n hspace=h,\n wspace=w,\n top=t,\n bottom=b,\n left=l,\n right=r,\n )\n return fig\n\n\ndef axVisibleOff_(ax, which='all'):\n which_ = 'tbrl' if which == 'all' else which\n tbrl = dict(t='top', b='bottom', r='right', l='left')\n for i in which_:\n ax.spines[tbrl[i]].set_visible(False)\n\n\ndef axColor_(ax, color):\n for child in ax.get_children():\n if isinstance(child, mpl.spines.Spine):\n child.set_color(color)\n\n\ndef _get_clo(cube):\n cs = cube.coord_system()\n if isinstance(cs, (iris.coord_systems.LambertConformal,\n iris.coord_systems.Stereographic)):\n clo = cs.central_lon\n elif isinstance(cs, iris.coord_systems.RotatedGeogCS):\n clo = rpt_(180 + cs.grid_north_pole_longitude, 180, -180)\n elif isinstance(cs, (iris.coord_systems.Orthographic,\n iris.coord_systems.VerticalPerspective)):\n clo = cs.longitude_of_projection_origin\n elif isinstance(cs, iris.coord_systems.TransverseMercator):\n clo = cs.longitude_of_central_meridian\n else:\n clo = np.floor(np.mean(cube.coord('longitude').points) / 5) * 5\n return clo\n\n\ndef imp2_swe_(\n cube0,\n cube1,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n rg=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n ext = _mapext(rg=rg, cube=cube0)\n if isinstance(clo_, (int, float)):\n clo = clo_\n elif clo_ == 'cs':\n clo = _get_clo(cube0)\n else:\n clo = _clo_ext(ext, h_=clo_)\n proj = ccrs.NorthPolarStereo(central_longitude=clo)\n return imp2_(cube0, cube1, *subplotspec,\n fig=fig,\n func=func,\n proj=proj,\n ext=ext,\n sc=sc,\n axK_=axK_,\n pK_=pK_,\n )\n\n\ndef imp_swe_(\n cube,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n rg=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n ext = _mapext(rg=rg, cube=cube)\n if isinstance(clo_, (int, float)):\n clo = clo_\n elif clo_ == 'cs':\n clo = _get_clo(cube)\n else:\n clo = _clo_ext(ext, h_=clo_)\n proj = ccrs.NorthPolarStereo(central_longitude=clo)\n return imp_(cube, *subplotspec,\n fig=fig,\n func=func,\n proj=proj,\n ext=ext,\n sc=sc,\n axK_=axK_,\n pK_=pK_,\n )\n\n\ndef imp2_eur_(\n cube0,\n cube1,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n rg=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n return imp2_(cube0, cube1, *subplotspec,\n fig=fig,\n func=func,\n proj=ccrs.EuroPP(),\n ext=_mapext(rg=rg, cube=cube0),\n sc=sc,\n axK_=axK_,\n pK_=pK_,\n )\n\n\ndef imp_eur_(\n cube,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n rg=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n return imp_(cube, *subplotspec,\n fig=fig,\n func=func,\n proj=ccrs.EuroPP(),\n ext=_mapext(rg=rg, cube=cube),\n sc=sc,\n axK_=axK_,\n pK_=pK_,\n )\n\n\ndef imp2_ll_(\n cube0,\n cube1,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n rg=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n return imp2_(cube0, cube1, *subplotspec,\n fig=fig,\n func=func,\n proj=ccrs.PlateCarree(),\n ext=_mapext(rg=rg, cube=cube0),\n sc=sc,\n axK_=axK_,\n pK_=pK_,\n )\n\n\ndef imp_ll_(\n cube,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n rg=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n return imp_(cube, *subplotspec,\n fig=fig,\n func=func,\n proj=ccrs.PlateCarree(),\n ext=_mapext(rg=rg, cube=cube),\n sc=sc,\n axK_=axK_,\n pK_=pK_,\n )\n\n\ndef imp2_(\n cube0,\n cube1,\n *subplotspec,\n fig=None,\n func=\"quiver\",\n proj=None,\n ext=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n fig = plt.gcf() if fig is None else fig\n ax = fig.add_subplot(*subplotspec, projection=proj)\n if ext:\n ax.set_extent(ext, crs=ccrs.PlateCarree())\n axK_.setdefault(\"frame_on\", False)\n ax.set(**axK_)\n o = _ll_cube2(cube0, cube1, axes=ax, func=func, sc=sc, **pK_)\n return (ax, o)\n\n\ndef imp_(\n cube,\n *subplotspec,\n fig=None,\n func=\"pcolormesh\",\n proj=None,\n ext=None,\n sc=1,\n axK_={},\n pK_={},\n ):\n fig = plt.gcf() if fig is None else fig\n ax = fig.add_subplot(*subplotspec, projection=proj)\n if ext:\n ax.set_extent(ext, crs=ccrs.PlateCarree())\n axK_.setdefault(\"frame_on\", False)\n ax.set(**axK_)\n o = _ll_cube(cube, axes=ax, func=func, sc=sc, **pK_)\n return (ax, o)\n\n\ndef _clo_ext(ext, h_=None):\n if h_ == 'human':\n clo = np.floor(np.mean(ext[:2]) / 5) * 5\n else:\n clo = np.floor(np.mean(ext[:2]))\n return clo\n\n\ndef _mapext(rg={}, cube=None):\n o = {}\n if cube:\n lo0 = cube.coord('longitude').points\n la0 = cube.coord('latitude').points\n o.update(dict(longitude=[lo0.min(), lo0.max()],\n latitude=[la0.min(), la0.max()]))\n if isinstance(rg, dict):\n o.update(**rg)\n if 'longitude' in o and 'latitude' in o:\n return flt_l([o['longitude'], o['latitude']])\n\n\ndef hatch_cube(cube, **kwArgs):\n kwArgs.setdefault('zorder', 5)\n kwArgs.setdefault('colors', 'none')\n return _ll_cube(cube, func='contourf', **kwArgs)\n\n\ndef _ll_cube2(\n cube0,\n cube1,\n axes=None,\n func='quiver',\n sc=1,\n **kwArgs,\n ):\n axes = plt.gca() if axes is None else axes\n support = ['quiver', 'barbs', 'streamplot']\n assert func in support, f\"func {func!r} not supported!\"\n _func = getattr(axes, func)\n lo0, la0 = cube0.coord('longitude'), cube0.coord('latitude')\n if lo0.ndim == 2:\n o = _func(lo0.points, la0.points, cube0.data*sc, cube1.data*sc,\n transform=ccrs.PlateCarree(),\n **kwArgs)\n else:\n if cube0.coord_dims(lo0)[0] > cube0.coord_dims(la0)[0]:\n x, y = np.meshgrid(lo0.points, la0.points)\n else:\n y, x = np.meshgrid(la0.points, lo0.points)\n o = _func(x, y, cube0.data*sc, cube1.data*sc,\n transform=ccrs.PlateCarree(),\n **kwArgs)\n return o\n\n\ndef _ll_cube(\n cube,\n axes=None,\n func='pcolormesh',\n sc=1,\n **kwArgs,\n ):\n axes = plt.gca() if axes is None else axes\n support = ['pcolor', 'pcolormesh', 'contour', 'contourf']\n assert func in support, f\"func {func!r} not supported!\"\n if func in support[-2:]:\n _func = getattr(iplt, func)\n o = _func(cube.copy(cube.data*sc), axes=axes, **kwArgs)\n else:\n lo0, la0 = cube.coord('longitude'), cube.coord('latitude')\n if lo0.ndim == 1:\n _func = getattr(iplt, func)\n o = _func(cube.copy(cube.data*sc), axes=axes, **kwArgs)\n else:\n if hasattr(lo0, 'has_bounds') and lo0.has_bounds():\n x, y = lo0.contiguous_bounds(), la0.contiguous_bounds()\n else:\n x, y = _2d_bounds(lo0.points, la0.points)\n _func = getattr(axes, func)\n o = _func(x, y, cube.data*sc,\n transform=ccrs.PlateCarree(),\n **kwArgs)\n return o\n\n\ndef _2d_bounds(x, y):\n def _extx(x2d):\n dx2d = np.diff(x2d, axis=-1)\n return np.hstack((x2d, x2d[:, -1:] + dx2d[:, -1:]))\n def _exty(y2d):\n dy2d = np.diff(y2d, axis=0)\n return np.vstack((y2d, y2d[-1:, :] + dy2d[-1:, :]))\n dx0 = _extx(np.diff(x, axis=-1))\n dx1 = _exty(np.diff(x, axis=0))\n x00 = x - .5 * dx0 - .5 * dx1\n x01 = _extx(x00)\n xx = _exty(x01)\n dy0 = _extx(np.diff(y, axis=-1))\n dy1 = _exty(np.diff(y, axis=0))\n y00 = y - .5 * dy0 - .5 * dy1\n y01 = _extx(y00)\n yy = _exty(y01)\n return (xx, yy)\n\n\ndef ax_move_(\n ax,\n dx=0.,\n dy=0.,\n ):\n axp = ax.get_position()\n axp.x0 += dx\n axp.x1 += dx\n axp.y0 += dy\n axp.y1 += dy\n ax.set_position(axp)\n\n\ndef axs_move_(\n axs,\n dx,\n d_='x',\n ):\n for i, ax in enumerate(axs):\n if 'x' in d_:\n ax_move_(ax, dx=dx * i)\n elif 'y' in d_:\n ax_move_(ax, dy=dx * i)\n\n\ndef axs_shrink_(\n axs,\n rx=1.,\n ry=1.,\n anc='tl',\n ):\n if anc[0] not in 'tbm':\n raise ValueError(\"anc[0] must be one of 't' ,'m', 'b'!\")\n if anc[1] not in 'lcr':\n raise ValueError(\"anc[1] must be one of 'l' ,'c', 'r'!\")\n x0, x1, y0, y1 = _minmaxXYlm(axs)\n for i in axs:\n x00, x11, y00, y11 = _minmaxXYlm(i)\n if anc[1] == 'l':\n dx = (x0 - x11) * (1 - rx) if x0 != x00 else 0.\n elif anc[1] == 'c':\n dx = (x0 + x1 - x00 - x11) * .5 * (1 - rx)\n else:\n dx = (x1 - x00) * (1 - rx) if x1 != x11 else 0.\n if anc[0] == 't':\n dy = (y1 - y00) * (1 - ry) if y1 != y11 else 0.\n elif anc[0] == 'm':\n dy = (y0 + y1 - y00 - y11) * .5 * (1 - ry)\n else:\n dy = (y0 - y11) * (1 - ry) if y0 != y00 else 0.\n ax_move_(i, dx, dy)\n\n\ndef _minmaxXYlm(ax):\n if isIter_(ax):\n xmin = min([i.get_position().x0 for i in flt_(ax)])\n ymin = min([i.get_position().y0 for i in flt_(ax)])\n xmax = max([i.get_position().x1 for i in flt_(ax)])\n ymax = max([i.get_position().y1 for i in flt_(ax)])\n else:\n xmin, ymin = ax.get_position().p0\n xmax, ymax = ax.get_position().p1\n return (xmin, xmax, ymin, ymax)\n\n\ndef axs_rct_(\n fig,\n ax,\n dx=.005,\n **kwArgs,\n ):\n xmin, xmax, ymin, ymax = _minmaxXYlm(ax)\n kD = dict(fill=False, color='k', zorder=1000,\n transform=fig.transFigure, figure=fig)\n kD.update(kwArgs)\n fx, fy = fig.get_size_inches()\n dy = dx * fx / fy\n fig.patches.extend(\n [plt.Rectangle(\n (xmin - dx, ymin -dy),\n xmax - xmin + 2*dx,\n ymax - ymin + 2*dy,\n **kDi\n )])\n\n\ndef wspace_ax_(ax0, ax1):\n return ax1.get_position().x0 - ax0.get_position().x1\n\n\ndef hspace_ax_(ax0, ax1):\n return ax0.get_position().y0 - ax1.get_position().y1\n\n\ndef aligned_cb_(\n fig,\n ax,\n ppp,\n iw,\n orientation='vertical',\n shrink=1.,\n side=1,\n ncx='c',\n ti=None,\n **cb_dict,\n ):\n cD = dict(orientation=orientation, **cb_dict)\n xmin, xmax, ymin, ymax = _minmaxXYlm(ax)\n shrink_ = 0 if ncx == 'n' else (1 if ncx=='x' else .5)\n if orientation == 'vertical':\n if side:\n caxb = [xmax + iw[0],\n ymin + (ymax - ymin) * (1. - shrink) * shrink_,\n iw[1],\n (ymax - ymin) * shrink]\n else:\n caxb = [xmin - iw[0] -iw[1],\n ymin + (ymax - ymin) * (1. - shrink) * shrink_,\n iw[1],\n (ymax - ymin) * shrink]\n elif orientation == 'horizontal':\n if side:\n caxb = [xmin + (xmax - xmin) * (1. - shrink) * shrink_,\n ymin - iw[0] - iw[1],\n (xmax - xmin) * shrink,\n iw[1]]\n else:\n caxb = [xmin + (xmax - xmin) * (1. - shrink) * shrink_,\n ymax + iw[1],\n (xmax - xmin) * shrink,\n iw[1]]\n cax = fig.add_axes(caxb)\n cb = plt.colorbar(ppp, cax, **cD)\n if not side:\n if orientation == 'vertical':\n cax.yaxis.tick_left()\n cax.yaxis.set_label_position('left')\n if orientation == 'horizontal':\n cax.xaxis.tick_top()\n cax.xaxis.set_label_position('top')\n if ti:\n cb.set_label(ti)\n return cb\n\n\ndef aligned_qk_(\n ax,\n q,\n U,\n s,\n pad=.02,\n rPos='NE',\n coordinates='figure',\n **kwArgs,\n ):\n # _get_xy():\n xmin, xmax, ymin, ymax = _minmaxXYlm(ax)\n if isIter_(pad) and len(pad) == 2:\n padx, pady = pad\n elif not isIter_(pad):\n padx = pady =pad\n else:\n raise(\"'pad' should be scalar (padx=pady) or arraylike (padx, pady)!\")\n if 'N' in rPos:\n y = ymax + pady\n elif 'n' in rPos:\n y = ymax - pady\n elif 'S' in rPos:\n y = ymin - pady\n elif 's' in rPos:\n y = ymin + pady\n else:\n y = (ymin + ymax) * .5\n if 'E' in rPos:\n x = xmax + padx\n elif 'e' in rPos:\n x = xmax - padx\n elif 'W' in rPos:\n x = xmin - padx\n elif 'w' in rPos:\n x = xmin + padx\n else:\n x = (xmin + xmax) * .5\n #print(f\"padx:{padx}; pady:{pady}\")\n #print(f\"xmin:{xmin}; xmax:{xmax}; ymin:{ymin}; ymax:{ymax};\")\n #print(f\"x:{x}, y:{y}\")\n qk = plt.quiverkey(\n q, x, y, U, s,\n coordinates=coordinates,\n **kwArgs\n )\n return qk\n\ndef axs_abc_(\n fig,\n ax,\n s='(a)',\n dx=.005,\n dy=.005,\n fontdict=dict(fontweight='bold'),\n **kwArgs,\n ):\n xmin, _, _, ymax = _minmaxXYlm(ax)\n kD = dict(ha='right') if dx > 0 else dict(ha='left')\n kD.update(kwArgs)\n fig.text(xmin - dx, ymax + dy, s, fontdict=fontdict, **kD)\n\n\n\ndef aligned_tx_(\n fig,\n ax,\n s,\n rpo='tl',\n itv=0.005,\n fontdict=None,\n **kwArgs,\n ):\n xmin, xmax, ymin, ymax = _minmaxXYlm(ax)\n if rpo[0].upper() in 'TB':\n xlm = [xmin, xmax]\n elif rpo[0].upper() in 'LR':\n xlm = [ymin, ymax]\n else:\n raise Exception('uninterpretable rpo!')\n\n if rpo[0].upper() == 'T':\n y = ymax + itv\n if itv >= 0:\n kwArgs.update({'verticalalignment': 'bottom'})\n else:\n kwArgs.update({'verticalalignment': 'top'})\n elif rpo[0].upper() == 'B':\n y = ymin - itv\n if itv >= 0:\n kwArgs.update({'verticalalignment': 'top'})\n else:\n kwArgs.update({'verticalalignment': 'bottom'})\n elif rpo[0].upper() == 'R':\n y = xmax + itv\n if itv >= 0:\n kwArgs.update({'verticalalignment': 'top'})\n else:\n kwArgs.update({'verticalalignment': 'bottom'})\n elif rpo[0].upper() == 'L':\n y = xmin - itv\n if itv >= 0:\n kwArgs.update({'verticalalignment': 'bottom'})\n else:\n kwArgs.update({'verticalalignment': 'top'})\n\n if rpo[1].upper() == 'L':\n x = xlm[0] + abs(itv)\n kwArgs.update({'horizontalalignment': 'left'})\n elif rpo[1].upper() == 'C':\n x = np.mean(xlm)\n kwArgs.update({'horizontalalignment': 'center'})\n elif rpo[1].upper() == 'R':\n x = xlm[1] - abs(itv)\n kwArgs.update({'horizontalalignment': 'right'})\n else:\n raise Exception('uninterpretable rpo!')\n\n if rpo[0].upper() in 'LR':\n x, y = y, x\n kwArgs.update({'rotation': 'vertical', 'rotation_mode': 'anchor'})\n\n tx = fig.text(x, y, s, fontdict=fontdict, **kwArgs)\n return tx\n\n\ndef _flt_cube(cube):\n data = nanMask_(cube.data).flatten()\n return data[~np.isnan(data)]\n\n\ndef pdf_iANDe_(\n ax,\n eCube,\n color,\n log_it=False,\n kopt={},\n ):\n if 'clip' in kopt:\n clip = np.array(kopt['clip'], dtype=np.float64)\n kopt.update({'clip': clip})\n ils = []\n if 'realization' in (i.name() for i in eCube.dim_coords):\n for c in eCube.slices_over('realization'):\n obs = _flt_cube(c)\n _, _, kdeo = kde__(obs.astype(np.float64), log_it=log_it, **kopt)\n #plot\n il, = ax.plot(kdeo.support, kdeo.density, lw=0.75, color=color,\n alpha=.25)\n ils.append(il)\n obs = _flt_cube(eCube)\n _, _, kdeo = kde__(obs.astype(np.float64), log_it=log_it, **kopt)\n el, = ax.plot(kdeo.support, kdeo.density, lw=1.5, color=color, alpha=.85)\n return (ils, el)\n\n\ndef cdf_iANDe_(\n ax,\n eCube,\n color,\n log_it=False,\n kopt={},\n ):\n if 'clip' in kopt:\n clip = np.array(kopt['clip'], dtype=np.float64)\n kopt.update({'clip': clip})\n ils = []\n if 'realization' in (i.name() for i in eCube.dim_coords):\n for c in eCube.slices_over('realization'):\n obs = _flt_cube(c)\n x, _, kdeo = kde__(obs.astype(np.float64), log_it=log_it, **kopt)\n #plot\n il, = ax.plot(x, kdeo.cdf, lw=0.75, color=color, alpha=.25)\n ils.append(il)\n obs = _flt_cube(eCube)\n x, _, kdeo = kde__(obs.astype(np.float64), log_it=log_it, **kopt)\n el, = ax.plot(x, kdeo.cdf, lw=1.5, color=color, alpha=.85)\n return (ils, el)\n\n\ndef ts_eCube_(ax, eCube, color):\n y0y1 = y0y1_of_cube(eCube)\n cl = []\n ils = []\n if isinstance(eCube, iris.cube.Cube):\n if 'realization' in (i.name() for i in eCube.coords()):\n cubes = eCube.slices_over('realization')\n #ax_r = eCube.coord_dims('realization')[0]\n #crd_r = eCube.coord('realization').points\n #cubes = [extract_byAxes_(eCube, ax_r, np.where(crd_r == i)[0][0])\n # for i in crd_r]\n cut = False\n else:\n cubes = []\n else:\n cubes, cut = eCube, True\n for c in cubes:\n tmp = extract_period_cube(c, *y0y1) if cut else c\n cl.append(tmp.data)\n #plot\n il, = iplt.plot(tmp, axes=ax, lw=.5, color=color, alpha=.25, zorder=0)\n ils.append(il)\n ets = tmp.copy(np.mean(np.array(cl), axis=0)) if cubes else eCube\n el, = iplt.plot(ets, axes=ax, lw=1.75, color=color, alpha=.8, zorder=9)\n return (ils, el)\n\n\ndef bp_dataLL_(ax, dataLL, labels=None):\n gn = len(dataLL)\n ng = len(dataLL[0])\n ax.set_xlim(.5, ng + .5)\n ww = .001\n wd = (.6 - (gn - 1) * ww) / gn\n p0s = np.arange(ng) + .7 + wd / 2\n\n cs = plt.get_cmap('Set2').colors\n bp_dict = {'notch': True,\n 'sym': '+',\n 'positions': p0s,\n 'widths': wd,\n 'patch_artist': True,\n 'medianprops': {'color': 'lightgray',\n 'linewidth': 1.5}}\n\n hgn = []\n for i, ii in enumerate(dataLL):\n ts_ = [np.ma.compressed(iii) for iii in ii]\n h_ = ax.boxplot(ts_, **bp_dict)\n for patch in h_['boxes']:\n patch.set_facecolor(cs[rpt_(i, len(cs))] + (.667,))\n hgn.append(h_['boxes'][0])\n p0s += ww + wd\n ax.set_xticks(np.arange(ng) + 1)\n if labels:\n ax.set_xticklabels(labels, rotation=60, ha='right',\n rotation_mode='anchor')\n else:\n ax.set_xticklabels([None] * ng)\n return hgn\n\n\ndef bp_dataLL0_(ax, dataLL, labels=None):\n gn = len(dataLL)\n ng = len(dataLL[0])\n dd0 = [np.ma.compressed(i) for i in dataLL]\n dd1 = [[np.ma.compressed(dd[i]) for dd in dataLL] for i in range(ng)]\n ax.set_xlim(.5, gn + .5)\n ww = .001\n wd = (.6 - (ng - 1) * ww) / ng\n p0s = np.arange(gn) + .7 + wd / 2\n wd0 = .667\n p0 = np.arange(gn) + 1.\n\n cs = plt.get_cmap('Set2').colors\n if gn <= 3:\n cs0 = ['b', 'g', 'r']\n else:\n cs0 = plt.get_cmap('tab10').colors\n bp_dict = {'notch': True,\n 'sym': '+',\n 'zorder': 15,\n 'positions': p0s,\n 'widths': wd,\n 'patch_artist': True,\n 'medianprops': {'color': 'lightgrey',\n 'linewidth': 1.5}}\n bp0_dict= {'positions': p0,\n 'widths': wd0,\n 'sym': '',\n 'zorder': 2,\n 'capprops': {'color': '#555555dd',\n 'linewidth': 3},\n 'boxprops': {'color': '#555555dd'},\n 'whiskerprops': {'color': '#555555dd',\n 'linewidth': 3},\n 'flierprops': {'color': '#555555dd'}}\n\n hgn = []\n for i, ii in enumerate(dd1):\n h_ = ax.boxplot(ii, **bp_dict)\n for patch in h_['boxes']:\n patch.set_facecolor(cs[rpt_(i, len(cs))] + (.667,))\n hgn.append(h_['boxes'][0])\n p0s += ww + wd\n\n bp_dict.update(bp0_dict)\n h_ = ax.boxplot(dd0, **bp_dict)\n\n for i, patch in enumerate(h_['boxes']):\n patch.set_facecolor('#555555dd')\n patch.set_zorder(bp_dict['zorder'] + 2 * i)\n hgn.append(h_['boxes'][0])\n eps = {}\n\n for i, md in enumerate(h_['medians']):\n if i == 0:\n y0 = md.get_ydata()[1]\n xd = md.get_xdata()\n xd[1] = ax.get_xlim()[1]\n md.set_xdata(xd)\n md.set_color(cs0[i])\n md.set_zorder(bp_dict['zorder'] + 1 + 2 * i)\n if i > 0:\n s = '${:+.2g}$'.format(md.get_ydata()[1] - y0)\n ax.text(xd[1], md.get_ydata()[1], s, va='center', color=cs0[i])\n\n ax.set_xticks(p0)\n if labels is not None:\n ax.set_xticklabels(labels, ha='center')\n for i, xtl in enumerate(ax.get_xticklabels()):\n xtl.set_color(cs0[i])\n\n return hgn\n\n\ndef bp_dataLL1_(ax, dataLL, labels=None):\n gn = len(dataLL)\n ng = len(dataLL[0])\n dd0 = [np.ma.compressed(i) for i in dataLL]\n dd1 = [[dd[i] for dd in dataLL] for i in range(ng)]\n ax.set_xlim(.5, gn + .5)\n wd0 = .667\n p0 = np.arange(gn) + 1.\n\n if gn <= 3:\n cs0 = ['b', 'g', 'r']\n else:\n cs0 = plt.get_cmap('tab10').colors\n bp_dict = {'notch': True,\n 'positions': p0,\n 'widths': wd0,\n 'sym': '',\n 'zorder': 5,\n 'patch_artist': True,\n 'capprops': {'color': '#555555dd',\n 'linewidth': 3},\n 'boxprops': {'color': '#555555dd'},\n 'whiskerprops': {'color': '#555555dd',\n 'linewidth': 3},\n 'flierprops': {'color': '#555555dd'},\n 'medianprops': {'color': 'lightgray',\n 'linewidth': 1.5}}\n\n hgn = []\n\n h_ = ax.boxplot(dd0, **bp_dict)\n for i, patch in enumerate(h_['boxes']):\n patch.set_facecolor('#555555dd')\n patch.set_zorder(bp_dict['zorder'] + 2 * i)\n hgn.append(h_['boxes'][0])\n\n for i, md in enumerate(h_['medians']):\n if i == 0:\n y0 = md.get_ydata()[1]\n xd = md.get_xdata()\n xd[1] = ax.get_xlim()[1]\n md.set_xdata(xd)\n md.set_color(cs0[i])\n md.set_zorder(bp_dict['zorder'] + 1 + 2 * i)\n if i > 0:\n s = '{:+.2g}'.format(md.get_ydata()[1] - y0)\n ax.text(xd[1], md.get_ydata()[1], s, va='center', color=cs0[i])\n\n ax.set_xticks(p0)\n if labels is not None:\n ax.set_xticklabels(labels, ha='center')\n for i, xtl in enumerate(ax.get_xticklabels()):\n xtl.set_color(cs0[i])\n\n return hgn\n\n\ndef bp_cubeL_eval_(ax, cubeL):\n XL = ['Simulations']\n dd0 = [flt_l([np.ma.compressed(i.data) for i in cubeL[:-2]])]\n if cubeL[-2]:\n XL.append('EOBS')\n dd0.append(np.ma.compressed(cubeL[-2].data))\n if cubeL[-1]:\n XL.append('ERA-Interim')\n dd0.append(np.ma.compressed(cubeL[-1].data))\n gn = len(XL)\n ng = len(cubeL) - 2\n dd1 = [i.data for i in cubeL[:-2]]\n ax.set_xlim(.5, gn + .5)\n ww = .001\n wd = (.6 - (ng - 1) * ww) / ng\n p0s = np.asarray([.7]) + wd / 2\n wd0 = .667\n p0 = np.arange(gn) + 1.\n\n cs = plt.get_cmap('Set2').colors\n bp_dict = {'notch': True,\n 'sym': '+',\n 'zorder': 15,\n 'positions': p0s,\n 'widths': wd,\n 'patch_artist': True,\n 'medianprops': {'color': 'lightgrey',\n 'linewidth': 1.5}}\n bp0_dict= {'positions': p0,\n 'widths': wd0,\n 'sym': '',\n 'zorder': 2,\n 'capprops': {'color': '#555555dd',\n 'linewidth': 3},\n 'boxprops': {'color': '#555555dd'},\n 'whiskerprops': {'color': '#555555dd',\n 'linewidth': 3},\n 'flierprops': {'color': '#555555dd'}}\n\n hgn = []\n for i, ii in enumerate(dd1):\n h_ = ax.boxplot(ii, **bp_dict)\n for patch in h_['boxes']:\n patch.set_facecolor(cs[rpt_(i, len(cs))] + (.667,))\n hgn.append(h_['boxes'][0])\n p0s += ww + wd\n\n bp_dict.update(bp0_dict)\n h_ = ax.boxplot(dd0, **bp_dict)\n\n for i, patch in enumerate(h_['boxes']):\n patch.set_facecolor('#555555dd')\n patch.set_hatch('x')\n patch.set_zorder(bp_dict['zorder'] + 2 * i)\n hgn.append(h_['boxes'][0])\n\n ax.set_xticks(p0)\n ax.set_xticklabels(XL, ha='center')\n\n return hgn\n\n\ndef distri_swe_(\n fig,\n nrow,\n ncol,\n n,\n df,\n pcho={},\n ti=None,\n **kwArgs,\n ):\n ax = fig.add_subplot(nrow, ncol, n)\n df.plot(ax=ax, **kwArgs, **pcho)\n ax.set_axis_off()\n if ti is not None:\n ax.set_title(ti)\n return ax\n\n\ndef get_1st_patchCollection_(ax):\n pc_ = None\n for i in ax.get_children():\n if isinstance(i, mpl.collections.PatchCollection):\n pc_ = i\n break\n return pc_\n\n\n#def heatmap(data, row_labels, col_labels, ax=None,\n# cbar_kw={}, cbarlabel=\"\", **kwArgs):\ndef heatmap(\n data,\n row_labels,\n col_labels,\n ax=None,\n tkD=None,\n **kwArgs,\n ):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwArgs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwArgs)\n\n # Create colorbar\n #cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n #cbar.ax.set_ylabel(cbarlabel)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels)\n ax.set_yticklabels(row_labels)\n\n # Let the horizontal axes labeling appear on top.\n if tkD:\n ax.tick_params(**tkD)\n rot=-45\n else:\n rot=45\n #ax.tick_params(top=True, bottom=False,\n # labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=rot, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n #return im, cbar\n return im\n\n\ndef annotate_heatmap(\n im,\n data=None,\n valfmt=\"{:.2f}\",\n data_=None,\n textcolors=(\"black\", \"white\"),\n threshold=None,\n middle_0=False,\n **textkw,\n ):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A pair of colors. The first is used for values below a threshold,\n the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwArgs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max())/2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n #if isinstance(valfmt, str):\n # valfmt = mpl.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n def _ccc(v):\n if middle_0:\n kw.update(color=textcolors[int(im.norm(abs(v)) > threshold)])\n else:\n kw.update(color=textcolors[int(im.norm(v) > threshold)])\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n _ccc(data[i, j])\n #kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n if data_ is None:\n #text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)\n text = im.axes.text(j, i, valfmt.format(data[i, j]), **kw)\n else:\n text = im.axes.text(j, i,\n valfmt.format(data[i, j], data_[i, j]),\n **kw)\n texts.append(text)\n\n return texts\n\n\ndef geoTkLbl_(ax):\n _lat = lambda x: rpt_(x, 180, -180)\n ax.xaxis.set_major_formatter(\n lambda x, pos: '{}\\xb0E'.format(_lat(x)) if _lat(x) > 0 else\n ('{}\\xb0W'.format(abs(_lat(x))) if _lat(x) < 0 else '0')\n )\n ax.yaxis.set_major_formatter(\n lambda x, pos: '{}\\xb0N'.format(x) if x > 0 else\n ('{}\\xb0S'.format(abs(x)) if x < 0 else 'Eq.')\n )\n\n\ndef pstGeoAx_(ax, delta=(30, 20), coastline=True, **kwArgs):\n proj = ccrs.PlateCarree()\n if coastline:\n ax.coastlines(linewidth=0.2, color=\"darkgray\")\n glD = dict(\n crs=ccrs.PlateCarree(),\n draw_labels={\"bottom\": \"x\", \"left\": \"y\"},\n xpadding=-.1,\n ypadding=-.1,\n xlabel_style=dict(color=\"dimgray\", fontsize=8),\n ylabel_style=dict(color=\"dimgray\", fontsize=8),\n dms=True,\n lw=.5,\n color=\"darkgray\",\n alpha=.5,\n )\n if delta:\n x0, x1, y0, y1 = ax.get_extent(crs=proj)\n if any(i > 180 for i in (x0, x1)):\n _xtks = np.arange(0, 360, 5)\n else:\n _xtks = np.arange(-180, 180, 5)\n _xind = ind_inRange_(_xtks, x0, x1)\n _ytks = np.arange(-90, 90, 5)\n _yind = ind_inRange_(_ytks, y0, y1)\n glD.update(dict(\n xlocs = [i for i in _xtks[_xind] if i%delta[0] == 0],\n ylocs = [i for i in _ytks[_yind] if i%delta[1] == 0],\n ))\n glD.update(kwArgs)\n gl = ax.gridlines(**glD)\n","repo_name":"ahheo/mypu","sub_path":"uuuu/pppp.py","file_name":"pppp.py","file_ext":"py","file_size_in_byte":33547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15849469284","text":"#服务端流程描述\nimport socket\n\n#1.创建服务端的socket对象\nserverSocket = socket.socket()\n\n#2.为socket绑定端口和ip地址\n\"\"\"\nbind(元组),将端口号和ip地址创建元组,然后传参\n(host,port)\n\"\"\"\n#查看ip地址:在终端输入ipconfig命令\nip_port = (\"10.36.131.32\",6666)\nserverSocket.bind(ip_port)\n\n#3.服务端监听请求,随时准备接受客户端发来的连接\n\"\"\"\nlisten(backlog)\nbacklog:在拒绝连接之前,可以挂起的最大连接数量\n注意:不能无限大\n\"\"\"\nserverSocket.listen(5)\n\nprint(\"server waiting~~~~~\")\n\n#4.服务端接收到客户端的请求,被动打开进行连接\n#accept();在连接的时候,会处于阻塞状态\n#返回值:conn,address,conn表示连接到的套接字对象,address表示连接到的客户端的地址\nconn,addr = serverSocket.accept()\n\n#5.服务端接收消息\n\"\"\"\nrecv(size)\n可以一次性接收到多大的数据\n\"\"\"\nclient_data = conn.recv(1024)\nprint(str(client_data,\"utf-8\"))\n\n#6.服务端关闭\nserverSocket.close()\n\n\n","repo_name":"zaoyuaner/Learning-materials","sub_path":"python1812/python_1/18_网络编程/代码/代码/tcp01/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"32157379164","text":"import subprocess\nimport glob\nimport random\nimport os as oslib\nimport linux\nimport windows\n\nMOUNT = \"/mnt/iamroot-{os}-{rng}/\"\n\ndef run(cmd):\n return subprocess.check_output(cmd, shell=True, stderr=open(\"/dev/null\", \"w\")).decode(\"utf-8\").rstrip(\"\\n\")\n\ndef joinparts(lst, maxparts, joiner=\" \"):\n return lst[:maxparts-1] + [joiner.join(lst[maxparts-1:])]\n\ndef get_partitions():\n # Please forgive me for this shitcode\n # Just never look here again\n fdisk = run(\"sudo fdisk -l\")\n line_splits = 0\n partition_table = False\n partitions = []\n current_disk = \"\"\n for line in fdisk.split(\"\\n\"):\n if line.startswith(\"Disk /dev\"):\n current_disk = line[4:line.find(\":\")]\n if not line or line.isspace():\n line_splits += 1\n if line_splits > 1:\n partition_table = False\n line_splits = 1\n continue\n if partition_table:\n if line:\n partitions.append(current_disk + \" \" + line)\n if line_splits == 1 and line.startswith(\"Device\"):\n line_splits = 0\n partition_table = True\n partitions = [\n joinparts(list(filter(lambda i: i != '', i.split(\" \"))), 7) for i in partitions\n ]\n return partitions\n\ndef get_mounts(device):\n try:\n return run(\"findmnt -nr -o target -S \"+device).split(\"\\n\")\n except subprocess.CalledProcessError:\n return []\n\ndef get_toolkit_mounts():\n return glob.glob(MOUNT.format(os=\"*\", rng=\"*\"))\n\ndef mount_device(os, device):\n mounts = get_mounts(device)\n if mounts:\n return mounts[0]\n target = MOUNT.format(os=os, rng=random.randint(1, 256))\n oslib.makedirs(target, exist_ok=True)\n run(\"mount \"+device+\" \"+target)\n return target\n\ndef main():\n if oslib.getuid() != 0:\n print(\"Not root\")\n print(\"You need to run this from a live usb on the computer you want to roothack\")\n print(\"You can either do this through BIOS (Google: \\\" boot from USB\\\")\")\n print(\"Or by pressing Shift+Reboot in windows start menu\")\n exit(-1)\n print(\"Loading existing iamroot mounts...\")\n toolkit_mounts = get_toolkit_mounts()\n print(f\"Found {len(toolkit_mounts)} iamroot mounts\")\n if toolkit_mounts:\n for mount in toolkit_mounts:\n print(\"Unmounting\", mount)\n if oslib.path.ismount(mount):\n run(\"sudo umount \"+mount)\n oslib.rmdir(mount)\n print(\"Done\")\n print(\"Loading partitions info...\")\n partitions = get_partitions()\n print(\"Searching for operating systems...\")\n oses = []\n for partdata in partitions:\n disk, device, start, end, sectors, size, type = partdata\n if type == \"Linux filesystem\":\n print(\"Linux compatible filesystem on\", device)\n if \"/\" in get_mounts(device):\n print(device, \"is mounted as root\")\n oses.append((\"Linux_self\", device))\n continue\n if linux.is_linux(device):\n print(\"Linux on\", device)\n oses.append((\"Linux\", device))\n elif type == \"Microsoft basic data\":\n print(\"Windows compatible filesystem on\", device)\n if windows.is_windows(device):\n print(\"Windows on\", device)\n oses.append((\"Windows\", device))\n print(\"=== OSs ===\")\n for ind, osdata in enumerate(oses):\n os, device = osdata\n if os == \"Linux_self\":\n os = \"Linux (current)\"\n print(f\"[{ind}]\", os, \"on\", device)\n osid = int(input(\"Select os to roothack: \"))\n os, device = oses[osid]\n mountpoint = mount_device(os, device)\n \n print(\"Mounted as\", mountpoint)\n if os == \"Windows\":\n tools = windows.TOOLS\n elif os.startswith(\"Linux\"):\n tools = linux.TOOLS\n\n for ind, title in enumerate(tools.keys()):\n print(f\"[{ind}] {title}\")\n ind = int(input(\"Select: \"))\n tool = list(tools.values())[ind]\n tool(mountpoint)\n \n\nif __name__ == \"__main__\":\n main()","repo_name":"highghlow/IAmRootTK","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"24456406905","text":"import time\r\nimport datetime\r\nimport random\r\nimport pandas as pd\r\nimport datetime\r\nfrom airflow import DAG\r\nfrom airflow.operators.python_operator import PythonOperator\r\n\r\nfrom activity_data import Get_Data\r\nfrom database import Mongo_Database\r\n\r\ndefault_args = {\r\n'owner': 'wells',\r\n'start_date': datetime.datetime(2021, 12, 31, 0, 0),\r\n'depends_on_past': False,\r\n'email': ['wellspitney@gmail.com'],\r\n'email_on_failure': True,\r\n'email_on_retry': True,\r\n'schedule_interval':'0 0 * * *',\r\n'retries': 1,\r\n'retry_delay': datetime.timedelta(minutes=10),\r\n}\r\n\r\ndef crawler():\r\n gt = Get_Data()\r\n today = str(datetime.date.today())\r\n all_activities = []\r\n start = 1\r\n while True:\r\n activities = gt.get_activities(start)\r\n if not activities:\r\n break\r\n all_activities+=activities.copy()\r\n print(f'get page {start} activities')\r\n start+=1\r\n for index, activity in enumerate(all_activities):\r\n reviews = gt.get_review(activity)\r\n all_activities[index]['reviews'] = reviews\r\n activity_id = activity['activity_id']\r\n print(f'get {activity_id} reviews')\r\n if index % 15 == 0:\r\n time.sleep(random.randint(5, 10))\r\n gt.driver.quit()\r\n mongo = Mongo_Database()\r\n mongo.connect_db(\"mongodb://mongo:27017/\", 'klook_commodity', \"Camping&Glamping\")\r\n mongo.insert_data({'date':today,\r\n 'value': all_activities})\r\n mongo.client.close()\r\n \r\ndef make_daily_csv():\r\n today = str(datetime.date.today())\r\n mongo = Mongo_Database()\r\n mongo.connect_db(\"mongodb://mongo:27017/\", 'klook_commodity', \"Camping&Glamping\")\r\n query = mongo.get_data({'date': today, 'value.reviews.rating': {'$lte': 80}})\r\n if query is None:\r\n print('error')\r\n data = pd.DataFrame.from_dict(list(query)[0]['value'])\r\n mongo.client.close()\r\n data['reviews'] = data['reviews'].apply(lambda x: list(filter(lambda y: y['rating'] <= 80, x)))\r\n data.to_csv('/usr/local/airflow/dags/Klook_Camping&Glamping_ratingUnder4.csv', index=False, encoding='utf-8-sig')\r\n \r\nwith DAG('klook', default_args=default_args) as dag:\r\n t1 = PythonOperator(task_id='crawler', python_callable=crawler, dag=dag)\r\n t2 = PythonOperator(task_id='make_csv', python_callable=make_daily_csv, dag=dag)\r\n t1 >> t2","repo_name":"wellslu/klook_crawler","sub_path":"code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23096356181","text":"import pygame\nfrom sys import exit, float_repr_style\nfrom random import randint, choice\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n player_walk_1 = pygame.image.load('graphics/Player/player_walk_1.png').convert_alpha()\n player_walk_2 = pygame.image.load('graphics/Player/player_walk_2.png').convert_alpha()\n self.player_walk = [player_walk_1, player_walk_2] #走路圖片\n self.player_index = 0 #控制哪一個圖片\n self.player_jump = pygame.image.load('graphics/Player/jump.png').convert_alpha()\n\n self.image = self.player_walk[self.player_index]\n self.rect = self.image.get_rect(midbottom = (80, 300))\n self.gravity = 0\n\n self.jump_sound = pygame.mixer.Sound('audio/jump.mp3') # input sound\n self.jump_sound.set_volume(0.5) # 聲音大小\n\n def player_input(self):\n keys = pygame.key.get_pressed()\n if keys[pygame.K_SPACE] and self.rect.bottom >= 300:\n self.gravity = -20\n self.jump_sound.play() # 播放\n\n def apply_gravity(self):\n self.gravity += 1\n self.rect.y += self.gravity\n if self.rect.bottom >= 300:\n self.rect.bottom = 300\n\n def animation_state(self):\n if self.rect.bottom < 300:\n self.image = self.player_jump\n else:\n self.player_index += 0.1\n if self.player_index >= len(self.player_walk):\n self.player_index = 0\n self.image = self.player_walk[int(self.player_index)]\n\n def update(self):\n self.player_input()\n self.apply_gravity()\n self.animation_state()\n\nclass Obstacle(pygame.sprite.Sprite):\n def __init__(self, type):\n super().__init__()\n\n if type == 'fly':\n fly_1 = pygame.image.load('graphics/fly/fly1.png').convert_alpha()\n fly_2 = pygame.image.load('graphics/fly/fly2.png').convert_alpha()\n self.frames = [fly_1, fly_2]\n y_pos = 210\n else:\n snail_1 = pygame.image.load('graphics/snail/snail1.png').convert_alpha()\n snail_2 = pygame.image.load('graphics/snail/snail2.png').convert_alpha()\n self.frames = [snail_1, snail_2]\n y_pos = 300\n \n self.anumation_index = 0\n\n self.image = self.frames[self.anumation_index]\n self.rect = self.image.get_rect(midbottom = (randint(900, 1100), y_pos))\n\n def animation_state(self):\n self.anumation_index += 0.1\n if self.anumation_index >= len(self.frames):\n self.anumation_index = 0\n self.image = self.frames[int(self.anumation_index)]\n\n def update(self):\n self.animation_state()\n self.rect.x -= 6\n self.destory()\n\n def destory(self):\n if self.rect.x <= -100:\n self.kill()\n\ndef display_score():\n current_time = int(pygame.time.get_ticks() / 1000) - start_time#時間(毫秒)\n score_surf = test_font.render(\"Score : %.0f\" % current_time, False, (64, 64, 64))\n score_rect = score_surf.get_rect(center = (400, 50))\n screen.blit(score_surf, score_rect)\n return current_time\n\ndef obstacle_movement(obstacle_list): #此函是用來讓多的snail出現\n\n if obstacle_list:\n for obstacle_rect in obstacle_list:\n obstacle_rect.x -= 5\n\n if obstacle_rect.bottom == 300:\n screen.blit(snail_surf, obstacle_rect)\n else:\n screen.blit(fly_surf, obstacle_rect) \n\n # screen.blit(snail_surf, obstacle_rect)\n\n obstacle_list = [obstacle for obstacle in obstacle_list if obstacle.x > -100]\n\n return obstacle_list\n else:\n return []\n\ndef collisions(player, obstacles): #判斷是否碰到\n if obstacles:\n for obstacle_rect in obstacles:\n if player.colliderect(obstacle_rect):\n return False\n return True\n\ndef collision_sprite():\n if pygame.sprite.spritecollide(player.sprite, obstacle_group, False): # 前兩這撞擊,若第三個值為True會刪除\n obstacle_group.empty()\n return False\n else:\n return True\n\ndef player_animation():\n global player_surf, player_index\n\n if player_rect.bottom < 300:\n # jump\n player_surf = player_jump\n else:\n # walk\n player_index += 0.1\n if player_index >= len(player_walk):\n player_index = 0\n player_surf = player_walk[int(player_index)]\n\n # play walking animation if the player is on floor\n # display the jump surface when player is not on floor\n\npygame.init() #灌入pygame \n\nscreen = pygame.display.set_mode((800, 400)) #設定視窗大小\npygame.display.set_caption('Runner') #視窗名稱\nclock = pygame.time.Clock() #FPS設定\n\ntest_font = pygame.font.Font('font/Pixeltype.ttf', 50) #pygame.font.Font(font type, font size)\n\ngame_active = False\n\nstart_time = 0\n\nscore = 0\n\nbg_Music = pygame.mixer.Sound('audio/music.wav')\nbg_Music.play(loops = -1) # play(播放幾次) if == -1持續撥放\n\n# Group\nplayer = pygame.sprite.GroupSingle()\nplayer.add(Player())\n\nobstacle_group = pygame.sprite.Group()\n\n\n# score_surf = test_font.render('My game', False, (64, 64, 64)) #test_font.render(text, AA, color)\n# score_rect = score_surf.get_rect(center = (400, 50))\n\nsky_surface = pygame.image.load('graphics/Sky.png').convert() #匯入圖片\nground_surface = pygame.image.load('graphics/ground.png').convert()\n\n\"\"\"\n# obstacles\n# snail\nsnail_frame_1 = pygame.image.load('graphics/snail/snail1.png').convert_alpha()\nsnail_frame_2 = pygame.image.load('graphics/snail/snail2.png').convert_alpha()\nsnail_frames = [snail_frame_1, snail_frame_2]\nsnail_frame_index = 0\nsnail_surf = snail_frames[snail_frame_index]\nsnail_rect = snail_surf.get_rect(bottomright = (600, 300))\n\n# fly\nfly_frame_1 = pygame.image.load('graphics/fly/fly1.png').convert_alpha()\nfly_frame_2 = pygame.image.load('graphics/fly/fly2.png').convert_alpha()\nfly_frames = [fly_frame_1, fly_frame_2]\nfly_frame_index = 0\nfly_surf = fly_frames[fly_frame_index]\n\nobstacle_rect_list = []\n\nplayer_walk_1 = pygame.image.load('graphics/Player/player_walk_1.png').convert_alpha()\nplayer_walk_2 = pygame.image.load('graphics/Player/player_walk_2.png').convert_alpha()\nplayer_walk = [player_walk_1, player_walk_2] #走路圖片\nplayer_index = 0 #控制哪一個圖片\nplayer_jump = pygame.image.load('graphics/Player/jump.png').convert_alpha()\n\nplayer_surf = player_walk[player_index]\nplayer_rect = player_surf.get_rect(midbottom = (80, 300)) #位置\nplayer_gravity = 0\n\"\"\"\n# 初始畫面\nplayer_stand = pygame.image.load('graphics/Player/player_stand.png').convert_alpha()\nplayer_stand = pygame.transform.rotozoom(player_stand, 0, 2) #pygame.transform.rotozoom(圖像, 旋轉角度, 放大倍率) \nplayer_stand_rect = player_stand.get_rect(center = (400, 200))\n# player_stand_scaled = pygame.transform.scale(player_stand, (200, 400)) # https://blog.csdn.net/Enderman_xiaohei/article/details/88282456\n\n# 遊戲名稱\ngame_name = test_font.render('Pixel Runner', False, (111, 196, 169))\ngame_name_rect = game_name.get_rect(center = (400, 80))\n\ngame_message = test_font.render('Press space to run', False, (111, 196, 169))\ngame_message_rect = game_message.get_rect(center = (400, 320))\n\n# timer\nobstacle_timer = pygame.USEREVENT + 1\npygame.time.set_timer(obstacle_timer, 1400)\n\nsnail_animation_timer = pygame.USEREVENT + 2\npygame.time.set_timer(snail_animation_timer, 500)\n\nfly_animation_timer = pygame.USEREVENT + 3\npygame.time.set_timer(fly_animation_timer, 200)\n\nwhile True:\n for event in pygame.event.get(): #在視窗中所有的動作ex按下按鍵\n if event.type == pygame.QUIT: #關閉視窗鍵是否被按下\n pygame.quit()\n exit() #關閉視窗\n \"\"\"if game_active:\n # if event.type == pygame.MOUSEMOTION:\n # if player_rect.collidepoint(event.pos) and player_rect.bottom >= 300: #滑鼠按下並且在player上\n # player_gravity = -20\n\n # if event.type == pygame.KEYDOWN: #鍵盤被按下\n # if event.key == pygame.K_SPACE and player_rect.bottom >= 300: #空白鍵被按下 player_rect.bottom >= 300偵測是否在地面\n # player_gravity = -20\n # \n \"\"\"\n if game_active:\n if event.type == obstacle_timer:\n obstacle_group.add(Obstacle(choice(['fly', 'snail', 'snail', 'snail'])))\n # if randint(0, 2): #隨機看是要放snail還是fly\n # obstacle_rect_list.append(snail_surf.get_rect(bottomright = (randint(900, 1100), 300)))\n # else:\n # obstacle_rect_list.append(fly_surf.get_rect(bottomright = (randint(900, 1100), 210)))\n else:\n if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:\n game_active = True\n\n # snail_rect.left = 800 # 將怪物設回原點,不會重疊\n # start_time = int(pygame.time.get_ticks() / 1000) # set up score\n \"\"\"\n if event.type == snail_animation_timer:\n if snail_frame_index == 0:\n snail_frame_index = 1\n else:\n snail_frame_index = 0\n snail_surf = snail_frames[snail_frame_index]\n\n if event.type == fly_animation_timer:\n if fly_frame_index == 0:\n fly_frame_index = 1\n else:\n fly_frame_index = 0\n fly_surf = fly_frames[fly_frame_index]\n \"\"\"\n #if event.type == pygame.MOUSEMOTION: #滑鼠移動\n # print(event.pos) #滑鼠位置\n\n ##if event.type == pygame.MOUSEBUTTONDOWN: #按下滑鼠\n # print('mouse down')\n\n #if event.type == pygame.MOUSEBUTTONUP: #抬起滑鼠鍵\n # print('mouse up')\n\n if game_active:\n #draw all our elements\n screen.blit(sky_surface, (0, 0)) #show 物件\n screen.blit(ground_surface, (0, 300))\n\n # pygame.draw.rect(screen, '#c0e8ec', score_rect, 10)\n # pygame.draw.rect(screen, '#c0e8ec', score_rect) #pygame.draw.rect畫長方形(screen, color, pos)\n #pygame.draw.line(screen, 'Gold', (0, 0), pygame.mouse.get_pos(), 10) #pygame.draw.line畫直線(screen, color, 開始pos, 結束pos, 粗度)\n #pygame.draw.ellipse(screen, 'Brown', pygame.Rect(50, 200, 100, 100))\n\n # screen.blit(score_surf, score_rect)\n\n score = display_score()\n\n # snail_rect.x -= 4 \n # if snail_rect.right <= 0:\n # snail_rect.left = 800 \n\n\n # screen.blit(snail_surf, snail_rect)\n\n # player\n # player_gravity += 1 #跳\n # player_rect.y += player_gravity\n\n # if player_rect.bottom >= 300: #讓player不跳出圖形之外\n # player_rect.bottom = 300\n # player_animation()\n # screen.blit(player_surf, player_rect)\n\n player.draw(screen)\n player.update()\n\n obstacle_group.draw(screen)\n obstacle_group.update()\n\n # obstacle movement\n # obstacle_rect_list = obstacle_movement(obstacle_rect_list)\n\n # collision\n\n game_active = collision_sprite()\n # game_active = collisions(player_rect, obstacle_rect_list)\n # if snail_rect.colliderect(player_rect): #判斷player碰到障礙物結束遊戲\n # game_active = False\n\n # if player_rect.colliderect(snail_rect): #碰撞 0 1\n # print('collision')\n \n #mouse_pos = pygame.mouse.get_pos() #滑鼠位置 (x, y)\n #if player_rect.collidepoint(mouse_pos): #碰點 0 1\n # print(pygame.mouse.get_pressed()) #三個bool(左鍵按下, 中鍵按下, 右鍵按下)\n\n else:\n screen.fill((94, 129, 162))\n screen.blit(player_stand, player_stand_rect)\n\n # obstacle_rect_list.clear() #剛死亡會重疊,所以清除\n # player_rect.midbottom = (80, 300)\n player_gravity = 0\n\n score_message = test_font.render('Your score: %.0f' % score, False, (111, 196, 169))\n score_message_rect = score_message.get_rect(center = (400, 330))\n\n screen.blit(game_name, game_name_rect)\n # screen.blit(game_message, game_message_rect)\n \n if score == 0:\n screen.blit(game_message, game_message_rect)\n else:\n screen.blit(score_message, score_message_rect)\n\n \n pygame.display.update() #update everying\n clock.tick(60) #FPS設定","repo_name":"Howardleejhenhao/NLHSgame","sub_path":"The ultimate introduction to Pygame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12516,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"2892888083","text":"import os\nimport math\nimport random\nimport json\n# n1(0), lv1(1), hp1(2), atk1(3), def1(4), spd1(5), dp1(6)\ndef start_buff(datas,buff,now,skill,txt):#開場 1\n number = skill[1]\n if(number == 100001):#召魂術 100001\n Ran = random.randint(1,4)\n if(Ran == 4):\n buff[now][now][3] = datas[now][3]\n skill[2] = 3\n txt += datas[now][0]+\"發動了技能「召魂術」,提升了自身攻擊力100%,效果將持續三回合\\n\"\n \n elif(number == 100002):#在這裡! 100002\n Ran = random.randint(1,2)\n if(Ran == 2 and datas[(now+1)%2][3]>datas[now][4]):\n buff[now][now][4] = datas[now][4]\n skill[2] = -1\n txt += datas[now][0]+\"發動了技能「在這裡!」,提升了自身防禦力100%\\n\"\n\n elif(number == 100003):#調虎離山 100003\n Ran = random.randint(1,20)\n if(Ran==20):\n buff[now][now][2] = datas[now][2]*5\n skill[2] = -1\n txt += datas[now][0]+\"發動了技能「調虎離山」,提升了自身血量500%\\n\"\n \n elif(number == 100004):#如虎添翼 100004\n buff[now][now][3] = int(datas[now][3]*0.1)\n buff[now][now][5] = int(datas[now][5]*0.15)\n skill[2] = -1\n txt += datas[now][0]+\"發動了技能「如虎添翼」,提升了攻擊力10%及速度15%\\n\"\n\n elif(number == 100005):#狐假虎威 100005\n buff[now][now][2] = datas[(now*-1)+1][2]-datas[now][2]\n buff[now][now][3] = datas[(now*-1)+1][3]-datas[now][3]\n buff[now][now][4] = datas[(now*-1)+1][4]-datas[now][4]\n buff[now][now][5] = datas[(now*-1)+1][5]-datas[now][5]\n skill[2] = 3\n txt += datas[now][0]+\"發動了技能「狐假虎威」,除了等級外,所有數值複製成對手數值了,維持三回合\\n\"\n\n elif(number == 100006):#虎頭蛇尾 100006\n buff[now][now][2] = datas[now][2]\n buff[now][now][3] = datas[now][3]\n buff[now][now][4] = datas[now][4]\n buff[now][now][5] = datas[now][5]\n skill[2] = random.randint(1,3)\n kkk = \"\"\n if(skill[2]==1):\n kkk = \"一\"\n elif(skill[2]==2):\n kkk = \"二\"\n else:\n kkk = \"三\"\n txt += datas[now][0]+\"發動了技能「虎頭蛇尾」,除了等級外,提升了自身所有數值100%,維持\"+kkk+\"回合\\n\"\n \n return buff,skill,txt\n\ndef self_buff(datas,buff,now,skill,txt):#主動 2\n number = skill[1]\n if(number == 200001):#金字塔的秘密 200001\n if(datas[now][2]<50 and datas[now][5]>=100):\n Ran = random.randint(1,2)\n if(Ran == 2):\n buff[now][now][2] += 100\n buff[now][now][5] -= 50\n skill[2] = -1\n txt += datas[now][0]+\"發動了技能「金字塔的秘密」,犧��了50點速度轉換成了100點血量\\n\"\n \n return buff,skill,txt\n\ndef other_buff(datas,buff,now_atk,now,skill,txt,Dam,Ran,hit):#被動 3\n number = skill[1]\n if(number == 300001):#黑夜之王 300001\n if(hit == 0 and now_atk==now):\n r = random.randint(1,2)\n if(r==2):\n buff[now][(now+1)%2][5] -= int(datas[(now+1)%2][5]*0.5)\n skill[2] = 3\n txt += datas[now][0]+\"發動了技能「黑夜之王」,降低了對手速度50%,效果將持續三回合\\n\"\n\n return buff,skill,txt\n\ndef damage(u1,u2,txt):\n #MLv:int,MATK:int,YDEF:int,MSPD:int,DP:int\n Ran = random.randint(50,150)\n Ran = Ran/100\n MLv,MATK,YDEF,MSPD,DP = u1[1],u1[3],u2[4],u1[5],u1[6]\n Dam = ((((2*MLv+10)/250)*(MATK/YDEF)*(Ran))*(MLv+MSPD+20)/4*DP)\n speed_past = random.randint(1,100)\n\n #txt += \"現在數值:\\n\"\n #tostr = [\"n\",\"lv\",\"hp\",\"atk\",\"def\",\"sdp\",\"dp\"]\n #for i in range (7):\n # txt += str(tostr[i])+\":\"+str(u1[i])+\",\"\n #txt += \"\\n\"\n #for i in range (7):\n # txt += str(tostr[i])+\":\"+str(u2[i])+\",\"\n #txt += \"\\n\"\n\n if(speed_past<=int(math.log(u2[5])/math.log(u1[5])*10)):\n txt += u2[0]+\"閃避了\"+u1[0]+\"的攻擊\\n\"\n return 0,1,0,txt\n elif(Ran>=1.25):\n txt += \"暴擊! \"+u1[0]+\"對\"+u2[0]+\"造成了\"+str(int(Dam))+\"點傷害\\n\"\n return int(Dam),Ran,1,txt\n else:\n txt += u1[0]+\"對\"+u2[0]+\"造成了\"+str(int(Dam))+\"點傷害\\n\"\n return int(Dam),Ran,1,txt\n #(((2*MLv+10)/250)*(MATK/YDEF)*(Ran))*(MLv+MSPD+20)/4*DP\n\ndef count_now_fight(datas,buff,now_fight):\n for j in range(7):\n now_fight[0][j] = datas[0][j] + buff[0][0][j] + buff[1][0][j]\n now_fight[1][j] = datas[1][j] + buff[0][1][j] + buff[1][1][j]\n return now_fight\n\ndef fight(n1:str,lv1:int,hp1:int,atk1:int,def1:int,spd1:int,n2:str,lv2:int,hp2:int,atk2:int,def2:int,spd2:int,dp1:float,dp2:float,arm1:int,arm2:int,skt1:int,skn1:int,skt2:int,skn2:int):\n if(arm1 != 0):\n hp1,atk1,def1,spd1 = arms(hp1,atk1,def1,spd1,arm1)\n if(arm2 != 0):\n hp2,atk2,def2,spd2 = arms(hp2,atk2,def2,spd2,arm2)\n datas = [[n1,lv1,hp1,atk1,def1,spd1,dp1],[n2,lv2,hp2,atk2,def2,spd2,dp2]]\n buff = [[[\"\",0,0,0,0,0,0],[\"\",0,0,0,0,0,0]],[[\"\",0,0,0,0,0,0],[\"\",0,0,0,0,0,0]]]\n now_fight = [[\"\",0,0,0,0,0,0],[\"\",0,0,0,0,0,0]]\n h1,h2 = hp1,hp2\n skill=[[skt1,skn1,0],[skt2,skn2,0]]\n #skill type,skill number,skill statue\n txt = \"\"\n list = {}\n now = 0\n if(skill[0][0]==1):\n now_fight = count_now_fight(datas,buff,now_fight)\n buff,skill[0],txt = start_buff(now_fight,buff,0,skill[0],txt)\n datas[0][2] += buff[0][0][2]\n datas[1][2] += buff[0][1][2]\n buff[0][0][2],buff[0][1][2] = 0,0\n if(skill[1][0]==1):\n now_fight = count_now_fight(datas,buff,now_fight)\n buff,skill[1],txt = start_buff(now_fight,buff,1,skill[1],txt)\n datas[0][2] += buff[1][0][2]\n datas[1][2] += buff[1][1][2]\n buff[1][0][2],buff[1][1][2] = 0,0\n while(datas[0][2]>0 and datas[1][2]>0):\n if(skill[0][2]==0):\n buff[0] = [[\"\",0,0,0,0,0,0],[\"\",0,0,0,0,0,0]]\n if(skill[1][2]==0):\n buff[1] = [[\"\",0,0,0,0,0,0],[\"\",0,0,0,0,0,0]]\n if(skill[0][2]>0):\n skill[0][2] -= 1\n if(skill[1][2]>0):\n skill[1][2] -= 1\n S1 = random.randint(1,3)*(datas[0][5]+buff[0][0][5]+buff[1][0][5])\n S2 = random.randint(1,3)*(datas[0][5]+buff[0][1][5]+buff[1][1][5])\n if(S1>=S2):#s1先攻\n now = 0\n else:#s2先攻\n now = 1\n for i in range(2):\n now = (now+i)%2\n if(skill[now][0]==2):\n now_fight = count_now_fight(datas,buff,now_fight)\n buff,skill[now],txt = self_buff(now_fight,buff,now,skill[now],txt)\n datas[0][2] += buff[now][0][2]\n datas[1][2] += buff[now][1][2]\n buff[now][0][2],buff[now][1][2] = 0,0\n now_fight = count_now_fight(datas,buff,now_fight)\n Dam,Ran,hit,txt = damage(now_fight[now],now_fight[(now+1)%2],txt)\n datas[(now+1)%2][2] -= Dam\n if(datas[(now+1)%2][2]<0):\n break\n if(skill[now][0]==3):\n now_fight = count_now_fight(datas,buff,now_fight)\n buff,skill[now],txt = other_buff(now_fight,buff, now,now,skill[now],txt,Dam,Ran,hit)\n datas[0][2] += buff[now][0][2]\n datas[1][2] += buff[now][1][2]\n if(skill[(now+1)%2][0]==3):\n now_fight = count_now_fight(datas,buff,now_fight)\n buff,skill[(now+1)%2],txt = other_buff(now_fight,buff, now,(now+1)%2,skill[(now+1)%2],txt,Dam,Ran,hit)\n datas[0][2] += buff[(now+1)%2][0][2]\n datas[1][2] += buff[(now+1)%2][1][2]\n buff[(now+1)%2][0][2],buff[(now+1)%2][1][2] = 0,0\n if(datas[0][2]<=0):\n txt += str(datas[1][0])+\"獲勝!\\n\"\n return txt,0,0,h1-datas[0][2],h2-datas[1][2]\n else:\n txt += str(datas[0][0])+\"獲勝!\\n\"\n return txt,0,1,h1-datas[0][2],h2-datas[1][2]\n\ndef arms(HP:int,ATK:int,DEF:int,SPD:int,number:int):\n with open(\"arms.json\",\"r\",encoding=\"utf-8\")as f:\n data = json.load(f)\n L = data[str(number)]\n if(\".\" in str(L[1])):\n HP *= L[1]\n else:\n HP += L[1]\n if(\".\" in str(L[2])):\n ATK *= L[2]\n else:\n ATK += L[2]\n if(\".\" in str(L[3])):\n DEF *= L[3]\n else:\n DEF += L[3]\n if(\".\" in str(L[4])):\n SPD *= L[4]\n else:\n SPD += L[4]\n return int(HP),int(ATK),int(DEF),int(SPD)","repo_name":"Ststone1687/mumi_bot","sub_path":"skill.py","file_name":"skill.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28441902837","text":"import pika\n\nconnection = pika.BlockingConnection(\n pika.ConnectionParameters(host='localhost'))\ndirect_channel = connection.channel()\n\n\ndirect_channel.exchange_declare(exchange='messages', exchange_type='direct')\n\nresult = direct_channel.queue_declare(queue='', exclusive=True)\nqueue_name = result.method.queue\n\ndirect_channel.queue_bind(\n exchange='messages', queue=queue_name, routing_key=\"red\")\ndirect_channel.queue_bind(\n exchange='messages', queue=queue_name, routing_key=\"white\")\n\ndef direct_callback(ch, method, properties, body):\n # print(method)\n if method.routing_key == \"red\":\n print(\"RED\")\n print(body)\n if method.routing_key == \"white\":\n print(\"WHITE\")\n if method.routing_key == \"blue\":\n print(\"BLUE\")\n # print(f\" [x] {method.routing_key}:{body}\")\n\n\ndirect_channel.basic_consume(queue=queue_name, on_message_callback=direct_callback, auto_ack=True)\n\nprint(' [*] Waiting for logs. To exit press CTRL+C')\ndirect_channel.start_consuming()\n\n","repo_name":"Nergy101/PyRMQ","sub_path":"direct_server.py","file_name":"direct_server.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"7207198686","text":"import sqlite3\n\nwith sqlite3.connect(\"cars.db\") as connection:\n\n c = connection.cursor()\n\n\n #c.execute(\"\"\"CREATE TABLE orders\n # (make TEXT, model TEXT, order_date TEXT)\n # \"\"\")\n\n orderdata = [\n (\"Ford\", \"Focus\", \"2003-01-08\"),\n (\"Ford\", \"Fiesta\", \"1999-08-11\"),\n (\"Ford\", \"Mustang\", \"2010-11-19\"),\n (\"Honda\", \"CR-V\", \"2018-01-13\"),\n (\"Honda\", \"Accord\", \"2017-03-03\"),\n (\"Ford\", \"Focus\", \"2018-05-30\"),\n (\"Ford\", \"Fiesta\", \"2017-02-02\"),\n (\"Ford\", \"Mustang\", \"2010-12-31\"),\n (\"Honda\", \"CR-V\", \"2018-08-16\"),\n (\"Honda\", \"Accord\", \"2016-07-12\"),\n (\"Ford\", \"Focus\", \"2015-07-17\"),\n (\"Ford\", \"Fiesta\", \"2011-11-01\"),\n (\"Ford\", \"Mustang\", \"2008-09-11\"),\n (\"Honda\", \"CR-V\", \"2005-04-02\"),\n (\"Honda\", \"Accord\", \"2012-10-09\")\n ]\n\n #c.executemany(\"INSERT INTO orders VALUES(?, ?, ?)\", orderdata)\n\n c.execute(\"\"\"SELECT orders.make, orders.model, inventory.quantity, orders.order_date \n FROM orders, inventory WHERE orders.model =\n inventory.model ORDER BY orders.make, orders.model ASC\"\"\")\n\n rows = c.fetchall()\n\n prevmake = \"\"\n\n for r in rows:\n if (prevmake != r[1]):\n print(r[0], r[1])\n print(r[2])\n #print(r[3])\n print(r[3])\n prevmake = r[1]\n","repo_name":"abergmanson/sql1","sub_path":"orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"69903734102","text":"import copy\r\nclass Solution(object):\r\n def update(self,ck,i,j,n):\r\n for a in range(n):\r\n ck[i][a] = False\r\n for b in ck:\r\n b[j] = False\r\n c,d = i,j\r\n while c=0: ck[c][d]=False; c+=1; d-=1\r\n c,d = i,j\r\n while c>=0 and d>=0: ck[c][d]=False; c-=1; d-=1\r\n c,d = i,j\r\n while c>=0 and d0:\r\n tmp[i][j]='Q'\r\n if q==1:\r\n new = []\r\n for k in tmp:\r\n new.append(''.join(k))\r\n r.append(new)\r\n q-=1\r\n snap = copy.deepcopy(ck)\r\n self.update(ck,i,j,n)\r\n if q>0:\r\n s,g = i+1,j\r\n self.find(r,tmp,ck,n,q,s,g)\r\n q+=1; ck = snap; tmp[i][j]='.'\r\n j+=1\r\n \r\n def solveNQueens(self, n):\r\n r = []\r\n tmp = [['.' for a in range(n)] for b in range(n)]\r\n ck = [[True for a in range(n)] for b in range(n)]\r\n q = n\r\n i,j = 0,0\r\n self.find(r,tmp,ck,n,q,i,j)\r\n return r\r\n \"\"\"\r\n :type n: int\r\n :rtype: List[List[str]]\r\n \"\"\"","repo_name":"garyblocks/leetcode","sub_path":"round1/51_N-Queens.py","file_name":"51_N-Queens.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"17638504975","text":"import sys\nimport io\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.models import load_model\nfrom konlpy.tag import Mecab\nimport csv\nimport re\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding=\"utf-8\")\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding=\"utf-8\")\n# print(sys.argv[1])\n\n# 받아온 리뷰 전처리\ndef predict(before_sentence, new_sentence):\n before_sentence.drop_duplicates(subset=['Review'], inplace=True)\n before_sentence = before_sentence.dropna(axis=0)\n\n before_sentence['Review'] = before_sentence['Review'].str.replace(\"[^ㄱ-ㅎㅏ-ㅣ가-힣]\", \"\")\n before_sentence['Review'].replace('', np.nan, inplace=True)\n before_sentence = before_sentence.dropna(axis=0)\n\n mecab = Mecab(dicpath=r\"C:\\mecab\\mecab-ko-dic\")\n stopwords = ['도', '는', '다', '의', '가', '이', '은', '한', '에', '하', '고', '을', '를', '인', '듯', '과', '와', '네', '들', '듯', '지', '임', '게', '사료']\n \n before_sentence['tokenized'] = before_sentence['Review'].apply(mecab.morphs)\n before_sentence['tokenized'] = before_sentence['tokenized'].apply(lambda x: [item for item in x if item not in stopwords])\n\n X_train = before_sentence['tokenized'].values\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(X_train)\n\n threshold = 2\n total_cnt = len(tokenizer.word_index) # 단어의 수\n rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트\n\n # 단어와 빈도수의 쌍(pair)을 key와 value로 받는다.\n for key, value in tokenizer.word_counts.items():\n # 단어의 등장 빈도수가 threshold보다 작으면\n if(value < threshold):\n rare_cnt = rare_cnt + 1\n\n vocab_size = total_cnt - rare_cnt + 2\n tokenizer = Tokenizer(vocab_size, oov_token = 'OOV') \n tokenizer.fit_on_texts(X_train)\n\n # 여기서부터\n new_sentence = re.sub(r'[^ㄱ-ㅎㅏ-ㅣ가-힣 ]','', new_sentence)\n new_sentence = mecab.morphs(new_sentence)\n new_sentence = [word for word in new_sentence if not word in stopwords]\n\n encoded = tokenizer.texts_to_sequences([new_sentence])\n pad_new = pad_sequences(encoded, maxlen = 416)\n\n loaded_model = load_model('best_model4.h5')\n score = float(loaded_model.predict(pad_new))\n\n f = open('new_result.csv', 'w', encoding='utf8', newline='')\n wr = csv.writer(f)\n if(score > 0.5):\n print(\"{:.2f}% 확률로 긍정 리뷰입니다.\".format(score * 100))\n wr.writerow([\"{:.2f}% 확률로 긍정 리뷰입니다.\".format(score * 100)])\n else:\n print(\"{:.2f}% 확률로 부정 리뷰입니다.\".format((1 - score) * 100))\n wr.writerow([\"{:.2f}% 확률로 부정 리뷰입니다.\".format((1 - score) * 100)])\n\n\n# 변환한 review csv 파일 읽기\ndf1 = pd.read_csv(sys.argv[1])\ndf2 = pd.read_csv(sys.argv[2])\n# print(df['review'].values[0])\npredict(df1, df2['review'].values[0])\n\n\n# for i in sys.argv:\n# print(\"반복문\",i)\n\n\n# def print_test(arg):\n# print(arg)\n\n# if __name__ == '__main__':\n# print_test(sys.argv[1])","repo_name":"gyeongyeonlee/dogfood","sub_path":"new_review_predict.py","file_name":"new_review_predict.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"43885284125","text":"def merge(nums1, m, nums2, n):\n # Initialize pointers for nums1, nums2, and the merged array\n p1 = m - 1\n p2 = n - 1\n p = m + n - 1\n \n # Merge elements from the end\n while p1 >= 0 and p2 >= 0:\n if nums1[p1] <= nums2[p2]:\n nums1[p] = nums2[p2]\n p2 -= 1\n else:\n nums1[p] = nums1[p1]\n p1 -= 1\n p -= 1\n \n # Copy any remaining elements from nums2 to nums1\n nums1[:p2 + 1] = nums2[:p2 + 1]\nnums1 = [1, 2, 3, 0, 0, 0]\nm = 3\nnums2 = [2, 5, 6]\nn = 3\nmerge(nums1, m, nums2, n)\nprint(nums1) # Output: [1, 2, 2, 3, 5, 6]\n","repo_name":"RRaghulRajkumar/Python_dsa","sub_path":"question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"19538863634","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def averageOfSubtree(self, root: Optional[TreeNode]) -> int:\n \n counter = 0\n\n\n def calculateAvg(root):\n \n nonlocal counter\n\n if root == None:\n return (0,0)\n\n left = calculateAvg(root.left)\n right = calculateAvg(root.right)\n\n amount = left[0] + right[0] + 1\n sum = left[1] + right[1] + root.val\n\n avg = sum // amount\n \n if avg == root.val:\n counter += 1\n \n \n return (amount, sum)\n\n calculateAvg(root)\n \n return counter\n","repo_name":"surafel58/A2SV-progress-sheet","sub_path":"Camp Progress sheet/ount Nodes Equal to Average of Subtree.py","file_name":"ount Nodes Equal to Average of Subtree.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"10771540142","text":"\"\"\"\nhttps://nlp100.github.io/ja/ch01.html#04-%E5%85%83%E7%B4%A0%E8%A8%98%E5%8F%B7\n\"\"\"\n\nif __name__ == '__main__':\n q = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations\" \\\n \" Might Also Sign Peace Security Clause. Arthur King Can.\"\n index = [1, 5, 6, 7, 8, 9, 15, 16, 19]\n result = {}\n words = q.split()\n for idx, word in enumerate(words, 1):\n if idx in index:\n result[word[0]] = idx\n else:\n result[word[:2]] = idx\n print(result)\n","repo_name":"wakame1367/nlp_q100_2020","sub_path":"ch01/04_answer.py","file_name":"04_answer.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"34637653312","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pandas_market_calendars as mcal\n\nfrom scipy import signal\nfrom matplotlib import style, gridspec\nfrom alpaca_trade_api.rest import REST, TimeFrame\nfrom config import API_KEY_ID, SECRET_KEY_ID, BASE_URL\n\nfrom helper_classes import (\n StockHelper,\n AvoAttributesCalculator,\n CircleCalculator,\n PriceCalculator,\n Plotter,\n)\n\n# Magic number replaced with a constant\nWINDOW_RATIO = 22.90\n\nstyle.use(\"dark_background\")\n\n# Adjust display options\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\npd.set_option(\"display.width\", None)\n\nAPI_KEY_ID = os.getenv(\"APCA_API_KEY_ID\")\nSECRET_KEY_ID = os.getenv(\"APCA_API_SECRET_KEY\")\nBASE_URL = os.getenv(\"APCA_API_BASE_URL\")\n\nrest_api = REST(API_KEY_ID, SECRET_KEY_ID, BASE_URL)\n\n# Create instances of the classes\nstock_helper = StockHelper(rest_api)\navo_attributes_calculator = AvoAttributesCalculator()\ncircle_calculator = CircleCalculator()\nprice_calculator = PriceCalculator()\nplotter = Plotter()\n\n# Set up argparse for command line arguments\nparser = argparse.ArgumentParser(\n description=\"Process stock market data from Alpaca Markets\"\n)\nparser.add_argument(\"--command\", type=str, help=\"The command to run\")\nparser.add_argument(\n \"-s\", \"--symbol\", type=str, required=True, help=\"The stock symbol\"\n)\nparser.add_argument(\n \"-t\", \"--tail\",\n type=int,\n default=252,\n nargs=\"?\",\n help=\"The tail value (default: 252)\",\n)\nparser.add_argument(\n \"-w\", \"--window\", type=int, default=None, nargs=\"?\", help=\"The window value\"\n)\nparser.add_argument(\n \"-f\", \"--factor\",\n type=float,\n default=0.200,\n nargs=\"?\",\n help=\"The factor value (default: 0.20)\",\n)\nparser.add_argument(\n \"-p\", \"--plot\",\n type=int,\n default=1,\n nargs=\"?\",\n help=\"The plot value (default: 1)\",\n)\n\nargs = parser.parse_args()\n\nsymbol = args.symbol.upper()\ntail = args.tail\nwindow = args.window if args.window else round(tail // WINDOW_RATIO)\nfactor = args.factor\nplot_switch = args.plot\n\nif symbol is None:\n raise ValueError(\"Must enter symbol eg: AAPL\")\n\n# Find company name for the given stock symbol\nco_name = stock_helper.find_co_name(symbol)\n\n# Generate DataFrame with NYSE trading days for the specified tail length\ndf, start_date, end_date, ndays = stock_helper.nyse_trading_days_dataframe(tail)\n\n# Fetch historical data for the given symbol from Alpaca API\nhistorical_data = stock_helper.fetch_alpaca_data(\n symbol, start_date, end_date, tail\n)\n\n# Combine the df and bars DataFrames, keeping only rows with values in the close column\ndf = df.merge(historical_data, left_index=True, right_index=True, how=\"inner\")\n\n# Remove any blank rows from the combined DataFrame\ndf.dropna(axis=0, inplace=True)\n\n# Fetch the current price\ncurrent_price = rest_api.get_latest_trade(symbol).price\nif df['close'].iloc[-1] != current_price:\n df.loc[end_date] = current_price\ndf = df.tail(tail)\n\ndf = price_calculator.calculate_returns(df)\ndf = price_calculator.convert_to_cumulative_percent_change(df)\n\n# Set the first daily return to zero\ndaily_returns = df.daily_returns.values\ndaily_returns[0] = 0.0\n\n# Get the percent change for the current day\npercent_change_today = df[\"daily_returns\"].iloc[-1]\n\n# Create a temporary DataFrame to store AVO attributes\ndf_temp = pd.DataFrame(\n index=[symbol],\n columns=[\n \"nsamps\",\n \"filter\",\n \"mean\",\n \"std_dev\",\n \"last_vel\",\n \"detect_value\",\n \"factor\",\n \"trend_diff\",\n \"action\",\n \"action_price\",\n \"price\",\n \"isamp_ago\",\n \"percent\",\n ],\n)\n\n# Compute AVO attributes, including filter_close, final_min, final_max, velocity, and df_temp\n(\n df[\"filter_close\"],\n final_min,\n final_max,\n df[\"velocity\"],\n df_temp,\n) = circle_calculator.compute_circles(symbol, df, window, factor, current_price, df_temp)\n\n# Print the last 10 rows of the DataFrame and the df_temp DataFrame\nprint(df.tail(10))\nprint(\" \")\nprint(df_temp)\n\n# Add a column of zeros to the DataFrame\ndf[\"zero\"] = 0.0\n\n# Plot the graph based on the computed data\nplotter.plot_graph(\n df,\n final_min,\n final_max,\n symbol,\n co_name,\n percent_change_today,\n current_price,\n plot_switch,\n)\n\n\nif __name__ == \"__main__\":\n parser.parse_args()\n","repo_name":"regholl/StockTrendAnalyzer","sub_path":"StockTrendAnalyzer.class.py","file_name":"StockTrendAnalyzer.class.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18474438619","text":"#!/usr/bin/env python3\n\nchecksum = 0\n\nwith open(\".//input/input.txt\") as file:\n for line in file:\n nums = [int(n) for n in line.strip().split()]\n mx = max(nums)\n mn = min(nums)\n checksum += (mx - mn)\n\nprint(checksum)\n","repo_name":"doon/aoc","sub_path":"day2/py/day2a.py","file_name":"day2a.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6513399438","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.key = val\n self.left = left\n self.right = right\nclass Solution: \n def height(self,root):\n \n # base condition when binary tree is empty\n if root is None:\n return 0\n return max(self.height(root.left), self.height(root.right)) + 1\n def isBalanced(self,root):\n \n # Base condition\n if root is None:\n return True\n \n # for left and right subtree height\n lh = self.height(root.left)\n rh = self.height(root.right)\n \n # allowed values for (lh - rh) are 1, -1, 0\n if (abs(lh - rh) <= 1) and self.isBalanced(\n root.left) is True and self.isBalanced( root.right) is True:\n return True\n \n # if we reach here means tree is not\n # height-balanced tree\n return False\n \n\n#Driver Code\nobj = Solution()\nobj2 = Solution()\nroot = TreeNode(3)\nroot.left = TreeNode(9)\nroot.right = TreeNode(20)\nroot.right.left = TreeNode(15)\nroot.right.right = TreeNode(7)\nif obj.isBalanced(root):\n print(\"Tree is balanced\")\nelse:\n print(\"Tree is not balanced\")\n# second tree\nroot2 = TreeNode(1)\nroot2.left = TreeNode(2)\nroot2.right = TreeNode(2)\nroot2.left.left = TreeNode(3)\nroot2.right.right = TreeNode(3)\nroot2.left.left.left =TreeNode(4) \nroot2.left.left.right =TreeNode(4) \nif obj2.isBalanced(root2):\n print(\"Tree is balanced\")\nelse:\n print(\"Tree is not balanced\")\n","repo_name":"crupib/python","sub_path":"challenges/balbintree.py","file_name":"balbintree.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41853400687","text":"# criação da classe para definir cada nó da lista ligada\nclass Node:\n def __init__(self, key, value):\n # cada nó armazena uma chave (key) e um valor (value)\n self.key = key\n self.value = value\n # o próximo nó da lista ligada\n self.next = None\n\n# criação da classe da tabela hash com encadeamento externo\nclass HashTable:\n def __init__(self, size=1000):\n # inicialização da tabela hash com um tamanho específico (padrão: 1000)\n self.size = size\n self.table = [None] * self.size\n \n # criação da função de espalhamento\n def hash_function(self, key):\n # calculará o valor hash da chave usando uma função simples (soma do valor ASCII de cada caractere)\n # e divide por 6 (número escolhido para tentar minimizar colisões)\n # em seguida, usará o operador de módulo para garantir que o resultado esteja dentro do tamanho da tabela\n hash_value = sum(ord(char) for char in key) // 6 % self.size\n return hash_value\n \n # função para inserir um novo elemento na tabela hash\n def insert(self, key, value=1):\n # calculará o valor hash da chave\n hash_value = self.hash_function(key)\n # se a posição na tabela hash estiver vazia, insere o novo nó nessa posição\n if self.table[hash_value] is None:\n self.table[hash_value] = Node(key, value)\n # se a posição já estiver ocupada, percorrerá a lista ligada até encontrar o nó com a chave ou chegar ao final da lista\n else:\n current_node = self.table[hash_value]\n while True:\n # se encontrar o nó com a chave, incrementa o valor\n if current_node.key == key:\n current_node.value += value\n break\n # se chegar ao final da lista, insere o novo nó\n elif current_node.next is None:\n current_node.next = Node(key, value)\n break\n # senão, continua percorrendo a lista\n else:\n current_node = current_node.next\n \n # função que busca a contagem de um elemento na tabela hash\n def search(self, key):\n # calcula o valor hash da chave\n hash_value = self.hash_function(key)\n # se a posição na tabela hash estiver vazia, retorna 0\n if self.table[hash_value] is None:\n return 0\n # se a posição estiver ocupada, percorre a lista ligada até encontrar o nó com a chave ou chegar ao final da lista\n else:\n current_node = self.table[hash_value]\n while current_node is not None:\n # se encontrar o nó com a chave, retorna o valor\n if current_node.key == key:\n return current_node.value\n # senão, continua percorrendo a lista\n else:\n current_node = current_node.next\n # se chegar ao final da lista sem encontrar o nó, retorna 0\n return 0\n \n \n def items(self):\n items_list = []\n # percorre todos os índices da tabela hash\n for item in self.table:\n current_node = item\n # percorre a lista ligada de cada índice, adicionando as chaves e valores à lista de tuplas\n while current_node is not None:\n items_list.append((current_node.key, current_node.value))\n current_node = current_node.next\n return items_list\n\n# leitura do arquivo \"sequences.fasta\" com o genoma do coronavírus\nwith open('sequences.fasta', 'r') as f:\n genome = f.read().replace('\\n', '')\n\n# cria a tabela hash\nhash_table = HashTable()\n\n# ler os blocos de 6 bases nitrogenadas e inserir na tabela hash\nfor i in range(0, len(genome) - 5, 6):\n block = genome[i:i+6] # obter o bloco de 6 bases nitrogenadas\n hash_table.insert(block) # inserir o bloco na tabela hash\n\n# contar a quantidade de blocos repetidos\nrepeated_blocks = {}\nfor i in range(0, len(genome) - 5, 6):\n block = genome[i:i+6] # obter o bloco de 6 bases nitrogenadas\n count = hash_table.search(block) # obter a contagem de ocorrências do bloco na tabela hash\n if count > 1: # se o bloco se repetir mais de uma vez\n repeated_blocks[block] = count # adicionar o bloco e a contagem no dicionário repeated_blocks\n\n# Imprimi os blocos repetidos e suas contagens em ordem crescente\n# Este \"for\" serve APENAS para mostrar o resultado na IDE, caso queira visualiza-lo, só tirar o # do comando\n#for block, count in sorted(repeated_blocks.items(), key=lambda x: x[1]):\n# print(f'O bloco {block} se repete {count} vezes no genoma.') \n\n# escreve o arquivo \"count_blocos_genomas_covid.txt\" em ordem crescente\nwith open(\"count_blocos_genomas_covid.txt\", \"w\") as file:\n file.write('Bloco\\tOcorrências\\n') # escreve a primeira linha no arquivo\n for key, value in sorted(hash_table.items(), key=lambda x: x[1]): # itera sobre os itens da tabela hash em ordem crescente\n file.write(f\"{key}\\t{value}\\n\") # escreve o bloco e sua contagem no arquivo\n","repo_name":"Taciana3090/Algoritmos-e-Estrutura-de-Dados","sub_path":"Semana 12/tabelaHash_genoma_covid.py","file_name":"tabelaHash_genoma_covid.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5312674719","text":"import RPi.GPIO as GPIO\nfrom hx711 import HX711\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\ndef calculate_calibration_factor(berat_tanpa_beban, berat_dengan_beban):\n # Hitung faktor skala\n calibration_factor = (berat_dengan_beban - berat_tanpa_beban) / (berat_dengan_beban - 0)\n return calibration_factor\n\ndef main():\n # Konfigurasi pin DT dan SCK\n DT = 6\n SCK = 5\n\n # Inisialisasi HX711\n hx = HX711(DT, SCK)\n\n hx.reset()\n\n print(\"Letakkan beban pada sensor dan masukkan beratnya.\")\n berat_dengan_beban = float(input(\"Berat dengan beban (gram): \"))\n \n print(\"Tunggu sebentar, lepaskan beban dari sensor.\")\n input(\"Tekan tombol Enter setelah melepaskan beban...\")\n\n # Membaca berat tanpa beban\n berat_tanpa_beban = hx.get_weight_mean(5)\n\n # Hitung faktor skala\n calibration_factor = calculate_calibration_factor(berat_tanpa_beban, berat_dengan_beban)\n \n print(f\"Calibration Factor: {calibration_factor:.2f}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Alta02/SIC4-Stempels","sub_path":"Script/Testing/Berat/kalibrasi.py","file_name":"kalibrasi.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"4079864038","text":"'''\nRuntime: 20 ms, faster than 100.00% of Python online submissions for Search a 2d Matrix.\n'''\n\nclass Solution(object):\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n if not matrix:\n return False\n\n h = len(matrix)\n if h == 1:\n return target in matrix[0]\n\n w = len(matrix[0])\n for row in matrix:\n if row[0] <= target <= row[w-1]:\n return self.searchMatrix([row], target)\n\n return False\n","repo_name":"SnapWars/CodingWar","sub_path":"david/sessions/2018/medium/74_search_a_2d_matrix.py","file_name":"74_search_a_2d_matrix.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18369517946","text":"\"\"\"\nFile: titanic_level1.py\nName: Rita Tang\n----------------------------------\nThis file builds a machine learning algorithm from scratch \nby Python codes. We'll be using 'with open' to read in dataset,\nstore data into a Python dict, and finally train the model and \ntest it on kaggle. This model is the most flexible one among all\nlevels. You should do hyperparameter tuning and find the best model.\n\"\"\"\n\nimport math\nimport util as util\n\nTRAIN_FILE = 'titanic_data/train.csv'\nTEST_FILE = 'titanic_data/test.csv'\n\n\ndef data_preprocess(filename: str, data: dict, mode='Train', training_data=None):\n \"\"\"\n :param filename: str, the filename to be processed\n :param data: dict[str: list], key is the column name, value is its data\n :param mode: str, indicating the mode we are using\n :param training_data: dict[str: list], key is the column name, value is its data\n (You will only use this when mode == 'Test')\n :return data: dict[str: list], key is the column name, value is its data\n \"\"\"\n # PassengerId, Survived, Pclass, Name, Sex, Age, SibSp, Parch, Ticket, Fare, Cabin, Embarked\n\n with open(filename, 'r') as f:\n first = True\n for line in f:\n if first:\n lst1 = line.strip().split(',')\n if mode == 'Train':\n for i in range(len(lst1)):\n if i == 0 or i == 3 or i == 8 or i == 10:\n pass\n else:\n data[lst1[i]] = []\n else:\n for i in range(len(lst1)):\n if i == 0 or i == 2 or i == 7 or i == 9:\n pass\n else:\n data[lst1[i]] = []\n first = False\n else:\n lst2 = line.strip().split(',')\n if mode == 'Train':\n if lst2[6] == '' or lst2[12] == '': # Age, embarked\n continue\n data[lst1[1]].append(int(lst2[1]))\n start = 2\n else:\n start = 1\n\n for j in range(len(lst2)):\n if j == start: # Pclass\n data['Pclass'].append(int(lst2[j]))\n elif j == start+3: # Sex\n if lst2[j] == 'male':\n data['Sex'].append(1)\n else:\n data['Sex'].append(0)\n elif j == start+4: # Age\n if lst2[j] != '':\n data['Age'].append(float(lst2[j]))\n else:\n mean = round((sum(training_data['Age']) / len(training_data['Age'])), 3)\n data['Age'].append(mean)\n elif j == start+5: # SibSp\n data['SibSp'].append(int(lst2[j]))\n elif j == start+6: # Parch\n data['Parch'].append(int(lst2[j]))\n elif j == start+8: # Fare\n if lst2[j] != '':\n data['Fare'].append(float(lst2[j]))\n else:\n mean = round((sum(training_data['Fare']) / len(training_data['Fare'])), 3)\n data['Fare'].append(mean)\n elif j == start+10: # Embarked\n if lst2[j] == 'S':\n data['Embarked'].append(0)\n elif lst2[j] == 'C':\n data['Embarked'].append(1)\n elif lst2[j] == 'Q':\n data['Embarked'].append(2)\n else:\n data['Embarked'].append(0)\n return data\n\n\ndef one_hot_encoding(data: dict, feature: str):\n \"\"\"\n :param data: dict[str, list], key is the column name, value is its data\n :param feature: str, the column name of interest\n :return data: dict[str, list], remove the feature column and add its one-hot encoding features\n \"\"\"\n if feature == 'Sex':\n data['Sex_0'] = []\n data['Sex_1'] = []\n for sex in data[feature]:\n data['Sex_0'].append(1) if sex == 0 else data['Sex_0'].append(0)\n data['Sex_1'].append(1) if sex == 1 else data['Sex_1'].append(0)\n elif feature == 'Pclass':\n data['Pclass_0'] = []\n data['Pclass_1'] = []\n data['Pclass_2'] = []\n for pcl in data[feature]:\n data['Pclass_0'].append(1) if pcl == 1 else data['Pclass_0'].append(0)\n data['Pclass_1'].append(1) if pcl == 2 else data['Pclass_1'].append(0)\n data['Pclass_2'].append(1) if pcl == 3 else data['Pclass_2'].append(0)\n elif feature == 'Embarked':\n data['Embarked_0'] = []\n data['Embarked_1'] = []\n data['Embarked_2'] = []\n for emb in data[feature]:\n data['Embarked_0'].append(1) if emb == 0 else data['Embarked_0'].append(0)\n data['Embarked_1'].append(1) if emb == 1 else data['Embarked_1'].append(0)\n data['Embarked_2'].append(1) if emb == 2 else data['Embarked_2'].append(0)\n data.pop(feature)\n return data\n\n\ndef normalize(data: dict):\n \"\"\"\n :param data: dict[str, list], key is the column name, value is its data\n :return data: dict[str, list], key is the column name, value is its normalized data\n \"\"\"\n\n for key, value in data.items():\n max_v = max(data[key])\n min_v = min(data[key])\n for i in range(len(value)):\n new_val = (value[i] - min_v) / (max_v - min_v)\n value[i] = new_val\n\n return data\n\n\ndef learnPredictor(inputs: dict, labels: list, degree: int, num_epochs: int, alpha: float):\n \"\"\"\n :param inputs: dict[str, list], key is the column name, value is its data\n :param labels: list[int], indicating the true label for each data\n :param degree: int, degree of polynomial features\n :param num_epochs: int, the number of epochs for training\n :param alpha: float, known as step size or learning rate\n :return weights: dict[str, float], feature name and its weight\n \"\"\"\n # Step 1 : Initialize weights\n weights = {} # feature => weight\n keys = list(inputs.keys())\n if degree == 1:\n for i in range(len(keys)):\n weights[keys[i]] = 0\n elif degree == 2:\n for i in range(len(keys)):\n weights[keys[i]] = 0\n for i in range(len(keys)):\n for j in range(i, len(keys)):\n weights[keys[i] + keys[j]] = 0\n # Step 2 : Start training\n for epoch in range(num_epochs):\n # Step 3 : Feature Extract\n for i in range(len(labels)):\n feature_v = {}\n if degree == 1:\n for j in range(len(keys)):\n feature_v[keys[j]] = inputs[keys[j]][i]\n else:\n for j in range(len(keys)):\n feature_v[keys[j]] = inputs[keys[j]][i]\n for k in range(j, len(keys)):\n feature_v[keys[j] + keys[k]] = inputs[keys[j]][i] * inputs[keys[k]][i]\n # Step 4 : Update weights\n y = labels[i]\n k = util.dotProduct(feature_v, weights)\n h = 1/(1 + math.exp(-k))\n util.increment(weights, -alpha * (h - y), feature_v)\n\n return weights\n","repo_name":"Rita-Ning/AI-Project","sub_path":"TitanicSurvival_Prediction/titanic_level1.py","file_name":"titanic_level1.py","file_ext":"py","file_size_in_byte":7439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"37046825384","text":"from sklearn import linear_model \n\nX = [[200,2, 8], [300,3, 10]]\n\nY = [[4,7], [5,8]]\n\nregr = linear_model.LinearRegression()\nregr.fit(X, Y) \n\n\n#predict the CO2 emission of a car where the weight is 2300kg, and the volume is 1300cm3:\npredictedCO2 = regr.predict([[2300, 1300, 3000]])\n\nprint(predictedCO2)","repo_name":"kiteros/MPIK_PMTsim","sub_path":"tests/pytorchtest.py","file_name":"pytorchtest.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22573632778","text":"# import modules & set up logging\nimport gensim, logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n\nsentences = gensim.models.word2vec.LineSentence(\"sentences.txt\")\n\nf = open('sentences.txt')\nlines = f.readlines();\nall_words = \"\"\nfor line in lines:\n\twords = line.split(' ')\n\tfor word in words:\n\t\tif not word.strip() == \"\":\n\t\t\tall_words += word + \" \"\n\nall_words = all_words.split(' ')\n\nmodel = gensim.models.Word2Vec(sentences, size=50, min_count = 1)\nf = open('vocab.txt')\nvocab = f.readlines()\nf.close()\n\nf = open('temp.txt','w')\nword2vec_vocab = \"\"\nfor word in vocab:\n\tif word.strip() in all_words and not word.strip().isspace():\n\t\tword2vec_vocab += word.strip() + \" \"\n\t\ts = str(model[word.strip()])\n\t\tf.write(s + \"\\n\")\nf.close()\n\nword2vec_vocab = word2vec_vocab.split(\" \")\nword2vec_vocab = word2vec_vocab[:-1]\n\n# gather trained vocab\nf = open('word2vecVocab.txt', 'w')\nfor word in word2vec_vocab:\n\tf.write(word + \"\\n\")\nf.close()\n\nf = open('temp.txt')\nlines = f.readlines();\nf.close()\n\n# format output\nf = open('word2vec.txt', 'w')\nvec = \"\"\nfor line in lines:\n\tline = line.rstrip('\\n')\n\tif line.endswith(\"]\"):\n\t\tvec += line\n\t\tvec = vec[2:-1]\n\t\tf.write(vec + \"\\n\")\n\t\tvec = \"\"\n\telse:\n\t\tvec += line\nf.close()","repo_name":"Jasonvdm/neuralnet","sub_path":"java/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"33015986904","text":"def ngr(arr1,arr2):\n stack = [arr2[0]]\n dic = dict()\n n1 = len(arr1)\n n2 = len(arr2)\n for i in range(1,n2):\n while(stack and stack[-1] < arr2[i]):\n val = stack.pop()\n dic[val] = arr2[i]\n stack.append(arr2[i])\n while(stack):\n val = stack.pop()\n dic[val] = -1\n\n\n ans = [0]*n1\n for key,val in enumerate(arr1):\n ans[key] = dic[val]\n return ans\n\na1 = [2,4]\na2 = [1,2,3,4]\nprint(ngr(a1,a2))\n\n ","repo_name":"KisloTAooAnkit/Python-Programs","sub_path":"SQ/ngr1.py","file_name":"ngr1.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"30822687333","text":"import pygame\nimport sys\nimport random\nimport time\nfrom pygame.locals import *\nfrom spaceship import *\nfrom missile1 import *\nfrom missile2 import *\nfrom alien import *\n\n\nclock = pygame.time.Clock()\nbegin = time.time()\n\ndis_width = 800\ndis_height = 800\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\nred = (255, 0, 0)\ngreen = (0, 155, 0)\nFPS = 60\n\nblock_size = 15\nscore = 0\n\n\nclass Game_SPACE_INVADERS():\n\n def __init__(self):\n pygame.init()\n dis_width = 800\n dis_height = 800\n self.gameDisplay = pygame.display.set_mode([dis_width, dis_height])\n pygame.mouse.set_visible(0)\n pygame.display.set_caption('Space Invaders')\n\n self.spaceship = Spaceship(self.gameDisplay.get_rect())\n\n self.aliens = []\n\n xpos = random.randint(1, 7) * 100\n ypos = random.randint(1, 2) * 100\n start = time.time()\n self.aliens.append(Alien(xpos, ypos, start))\n\n def gameLoop(self):\n global begin\n gameExit = False\n\n while not gameExit:\n clock.tick(FPS)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameExit = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n gameExit = True\n self.spaceship.event_handler(event)\n\n self.spaceship.update_event()\n\n self.spaceship.Missile1_collision(self.aliens)\n\n for i in range(len(self.aliens)-1, -1, -1):\n if not self.aliens[i].active:\n del self.aliens[i]\n\n self.spaceship.Missile2_collision(self.aliens)\n\n for i in range(len(self.aliens)-1, -1, -1):\n end = time.time()\n if not self.aliens[i].active:\n del self.aliens[i]\n elif end - self.aliens[i].start > self.aliens[i].life:\n del self.aliens[i]\n\n self.gameDisplay.fill((white))\n self.spaceship.display(self.gameDisplay)\n\n end = time.time()\n if end - begin > 10:\n xpos = random.randint(1, 7) * 100\n ypos = random.randint(1, 2) * 100\n start = time.time()\n self.aliens.append(Alien(xpos, ypos, start))\n begin = time.time()\n\n for alien in self.aliens:\n alien.draw(self.gameDisplay)\n\n pygame.display.update()\n\n # display score\n text = 'Score: ' + str(self.spaceship.score)\n font = pygame.font.SysFont('arial', 30)\n text = font.render(text, True, black)\n self.gameDisplay.blit(text, (20, 20))\n\n pygame.display.update()\n\n pygame.quit()\n\nGame_SPACE_INVADERS().gameLoop()\n","repo_name":"Akshayy99/Space-Invaders-Game","sub_path":"main_game.py","file_name":"main_game.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25686118798","text":"st = input()\nlst = []\nfor i in range(len(st)):\n if st[i].isupper(): # 대문자\n if ord(st[i]) + 13 > ord('Z'):\n elem = chr((ord('A') + ord(st[i]) - ord('Z') + 12))\n else:\n elem = chr(ord(st[i]) + 13)\n elif st[i].islower(): # 소문자\n if ord(st[i]) + 13 > ord('z'):\n elem = chr((ord('a') + ord(st[i]) - ord('z') + 12))\n else:\n elem = chr(ord(st[i]) + 13)\n else: # 알파벳 제외 문자(숫자,공백)\n elem = st[i]\n\n lst.append(elem)\n\nprint(''.join(lst))\n","repo_name":"KINHYEONJI/mad-algorithm","sub_path":"AUGUST/shlee/BOJ11655.py","file_name":"BOJ11655.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"74517141780","text":"'''\r\nRead data files from source folder and write to master file\r\n'''\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport json\r\nfrom datetime import datetime\r\n\r\n\r\n# Function for data ingestion\r\ndef merge_multiple_dataframe(input_folder_path,\r\n output_folder_path,\r\n output_file_name):\r\n # check for datasets and record\r\n filenames = os.listdir(os.getcwd() + '/' + input_folder_path + '/')\r\n\r\n # initialize empty df\r\n global compiled_df\r\n compiled_df = pd.DataFrame()\r\n\r\n # import and compile datasets\r\n if filenames:\r\n for file in filenames:\r\n data = pd.read_csv(os.path.join(os.getcwd(), input_folder_path, file))\r\n compiled_df = pd.concat([compiled_df, data], ignore_index=True)\r\n\r\n # drop duplicates and write to output path\r\n compiled_df.drop_duplicates(inplace=True)\r\n compiled_df.to_csv(os.path.join(os.getcwd(), output_folder_path, output_file_name))\r\n\r\n\r\n# Function for recording ingestion\r\ndef record_ingestion(input_folder_path, output_folder_path):\r\n\r\n # get file names\r\n location = os.path.join(os.getcwd(), input_folder_path)\r\n filenames = os.listdir(os.getcwd() + '/' + input_folder_path + '/')\r\n time = str(datetime.now())\r\n record = {'location':location,\r\n 'files': filenames,\r\n 'time at ingestion':time}\r\n\r\n # write to txt file\r\n with open(os.path.join(output_folder_path, 'ingestedfiles.txt'), 'w') as f:\r\n f.write(json.dumps(record))\r\n\r\n\r\nif __name__ == '__main__':\r\n # Load config file\r\n with open('config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n # Get input and output paths\r\n input_folder_path = config['input_folder_path']\r\n output_folder_path = config['output_folder_path']\r\n output_file_name = 'finaldata.csv'\r\n\r\n # Ingest data\r\n merge_multiple_dataframe(input_folder_path,\r\n output_folder_path,\r\n output_file_name)\r\n\r\n # Record ingestion\r\n record_ingestion(input_folder_path,\r\n output_folder_path)\r\n","repo_name":"imanzaf/udacity-dynamic-risk-assessment","sub_path":"src/ingestion.py","file_name":"ingestion.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"11663189146","text":"# Write a Python function that takes a number as a parameter and check the number is prime or not.\n\n\ndef isPrimeNumber(number):\n if number > 1:\n for item in range(2, number):\n if (number % item) == 0:\n print(\"Number is not prime\")\n break\n else:\n print(\"Number is prime\")\n\n else:\n print(\"Number is not prime\")\n\n\nisPrimeNumber(13)\n","repo_name":"AashishTuladhar/IWAcademyAssignments","sub_path":"Python Assignment I/Functions/Q09.py","file_name":"Q09.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74840295380","text":"def Pancreases(functions, health, disease, recover, adaptations,timeChart, misc):\r\n\r\n # functions\r\n growth_rate = None\r\n pain_perception = None\r\n insulin_production = None\r\n glucose_regulation = None\r\n digestive_enzyme_production = None\r\n\r\n # health\r\n pancreases_health = None\r\n insulin_health = None\r\n enzyme_health = None\r\n\r\n # disease\r\n cancer = None\r\n disorder = None\r\n genetic_disease = None\r\n diabetes = None\r\n susceptibility_disease = None\r\n allergies = None\r\n wound_density = None\r\n\r\n # recover\r\n resistance_genetic_disease = None\r\n wound_heal = None\r\n\r\n # adaptations\r\n environmental_adaptations = None\r\n\r\n # timeChart\r\n developmental_timing = None\r\n\r\n # misc\r\n size = None\r\n shape = None\r\n strength = None\r\n age_factor = None\r\n\r\n functions = [growth_rate, pain_perception, insulin_production, glucose_regulation, digestive_enzyme_production]\r\n health = [pancreases_health, insulin_health, enzyme_health]\r\n disease = [cancer, disorder, genetic_disease, diabetes, susceptibility_disease, allergies, wound_density]\r\n recover = [resistance_genetic_disease, wound_heal]\r\n adaptations = [environmental_adaptations]\r\n timeChart = [developmental_timing]\r\n misc = [size, shape, age_factor, strength]","repo_name":"uswamaryam12/TrueDNA","sub_path":"TrueDNA v2/Internal Organs/Least Important/pancrease.py","file_name":"pancrease.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27206945353","text":"import os\n\ndef parse_line(line):\n fields = []\n field = \"\"\n in_quotes = False\n\n for char in line.strip(): # strip the line to remove the newline character at the end\n if char == '\"':\n in_quotes = not in_quotes # toggle in_quotes status\n elif char == ',' and not in_quotes:\n fields.append(field) # dont strip here, it removes intended spaces\n field = \"\"\n else:\n field += char\n\n fields.append(field) # add the last field, which could be empty if the line ends with a comma\n\n return fields\n\ndef load_data(filepath):\n try:\n with open(filepath, \"r\") as file:\n lines = file.readlines()\n data = [parse_line(line) for line in lines]\n return data\n except FileNotFoundError:\n print(f\"The file {filepath} was not found.\")\n return []\n\ndef add_column(data):\n header = data[0]\n rows = data[1:]\n\n new_header = \"ESTIMATE_ROUNDED\"\n header.append(new_header)\n\n estimate_index = header.index(\"ESTIMATE\")\n\n for row in rows:\n try:\n # try to convert to float and round to the nearest whole number\n estimate_value = float(row[estimate_index])\n new_value = round(estimate_value)\n except ValueError:\n # if it's not a number just use the original value\n new_value = row[estimate_index]\n\n row.append(str(new_value))\n\n\n return [header] + rows\n\ndef save_data(directory, filename, data):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_header = \"Death Rates for Suicide by Sex, Race, Hispanic Origin, and Age\"\n file_footer = \"Report by: Cayden Young, Date: 10/31/2023\"\n\n with open(os.path.join(directory, filename), \"w\") as file:\n file.write(file_header + \"\\n\") # file header\n for row in data:\n formatted_row = []\n for field in row:\n # check if the field contains a comma or if its an empty field that should remain empty\n if ',' in field:\n formatted_row.append(f'\"{field}\"') # wrap field in quotes if it contains a comma\n else:\n formatted_row.append(field) # keep field as is (which could be an empty string)\n file.write(\",\".join(formatted_row) + \"\\n\")\n file.write(file_footer) # file footer\n\n","repo_name":"lavyyy/pui-cs-monorepo","sub_path":"assignments/CSCI 23000/week10/lab9_utils.py","file_name":"lab9_utils.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"31075837767","text":"# Author : Joon Son\n# Date : 17/Nov/2017\n# Description :\n# read multiple choice questions from csv file\n# display question and get answer from the user\n# calculate score as a percentage and display it\n\n# pseudo code\n# 1. open a file which has series of information regarding questions\n# 2. iterate below code for each questions\n# 3. count up value of numberofQ (number of question)\n# 4. print question\n# 5. print multiple choices with mark\n# 6. get answer from the user with validity test\n# 7. compare user input with the answer from the content\n# 8. if the input is correct, count up a value of score\n# 9. after end of quiz\n# 10. if the number of questions is greater than zero\n# 11. calculate percentage score\n# 12. and print out the result\n\nimport csv\nmarks = ['a', 'b', 'c', 'd'] # list of marks for multiple choices\nscore = 0\nnumberOfQ = 0 # number of questions\nQUIZ5 = \"questions/questions_5.txt\"\nQUIZ3 = \"questions/questions_3.txt\"\nACCESS_READ = \"r\"\n\ndef get_anwer(str):\n # validity test\n # allow only [a, b, c, d]\n while True:\n result = input(\"Enter choice (a-d): \")\n if result not in marks:\n print(\"Invalid Input !!! ***Please enter choice (a, b, c, d)***\")\n continue\n else:\n break\n return result\n\nwith open(QUIZ3, ACCESS_READ) as quizCSV:\n questions = csv.reader(quizCSV)\n for question in questions:\n numberOfQ += 1\n for i in range(len(question)-1): # question + multiple choices\n if i == 0: # position for question\n print(question[i])\n else: # multiple choices\n print(marks[i-1]+\") \"+question[i])\n u_answer = get_anwer(\"Enter choice (a-d): \") # get user answer\n if marks.index(u_answer) == int(question[len(question)-1]):\n score += 1\n print(\"\")\n if numberOfQ > 0:\n score_per = score / numberOfQ * 100\n print(\"Your score is: {correct}/{total} ({percent:.1f}%)\".format(correct=score, total=numberOfQ, percent=score_per))\n\n\n","repo_name":"joooooonson/LOGICnPROG_2017_Fall","sub_path":"Assignment5/quiz.py","file_name":"quiz.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5201507956","text":"from aiogram import types\n\nimport config\n\n\ndef mainkb(user_id):\n kb = types.ReplyKeyboardMarkup(resize_keyboard=True)\n kb.row(\"🕐 Записаться к пациенту\")\n kb.row(\"ℹ Информация\", \"📗 Статистика\")\n kb.row(\"🖥 Отделения\", \"🧑 Пациенты\")\n kb.row(\"🔍 Поиск\", \"👨‍⚕️ Врачи\")\n if user_id in config.ADMINS:\n kb.row(\"👥 Посетители\", \"📕 Панель\")\n kb.row(\"➕ Добавить пациента\")\n return kb\n\ndef department():\n inline_kb_full = types.InlineKeyboardMarkup()\n inline_kb_full.row(types.InlineKeyboardButton(\"Эндокринология\", callback_data=\"endocrinology\"), \n types.InlineKeyboardButton(\"Терапия\", callback_data=\"therapy\"))\n inline_kb_full.row(types.InlineKeyboardButton(\"Кардиология\", callback_data=\"cardiology\"), \n types.InlineKeyboardButton(\"Неврология\", callback_data=\"neurology\")) \n inline_kb_full.row(types.InlineKeyboardButton(\"Хирургическая\", callback_data=\"surgical\"))\n return inline_kb_full\n\n\n# Информация\n# Статистика\n# Отделения\n# Поиск\n# Запись к пациенту\n# Врачи\n# Панель\n# Пациенты\n\n####\n\n# Посетители\n# Добавить пациента","repo_name":"Makhkets/Hospital","sub_path":"telegramBot/keyboards/kb.py","file_name":"kb.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"25188076059","text":"import csv\n\nORDER_FILE_PATH = r'C:\\Users\\mylaptop\\Desktop\\brew_app\\data_\\order_file.csv'\n\ndef read_csv():\n with open(ORDER_FILE_PATH, 'r') as file:\n reader = csv.reader(file)\n temp_dict = {}\n for row in reader:\n temp_dict[row[0]] = [row[1]]\n\n'Coffee' == '2.50'\n'Tea' == '2.00'\n'Green Tea' == '2.00'\n'Water' == '1.00'\n\nprint(file.values())","repo_name":"Caitland/Brewapp","sub_path":"total.py","file_name":"total.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4610918453","text":"import matplotlib.pyplot as plt # testing file edit\nfrom matplotlib.backends.backend_tkagg import (FigureCanvasTkAgg, NavigationToolbar2Tk)\nimport xlrd2 # the og xlrd doesn't support .xlsx files but this one is more actively maintained\nimport os # to check directory\nimport numpy as np\nimport tkinter as tk\nfrom tkinter import ttk\nfrom file2dataset import file2dataset\nfrom dataProcesses import *\nimport matplotlib as mpl\nimport logging\nlogging.getLogger('matplotlib.font_manager').disabled = True #because it prints out a ton of warnings if the selected font is not found\nfrom Graphing import plotSpecCapCurves, plotMeanVoltageCurves, plotVoltCurves, plotdQdVCurves, plotCoulombicEfficiencyCurves\nfrom Export2Excel import createSheet\nimport batteryDataSet\nfrom pickle import dump,load\nglobal batteryData\nglobal dataSets\nglobal max_cycle\nglobal max_cycle_list\nglobal plotfig\nglobal fontEntryMode\nglobal is_bdms\nglobal combined_bdms_files\nglobal dataset_selection\nglobal data_selection_tabs\nglobal dataset_checkboxes\nglobal screen_size\nglobal data_labels\nglobal set_data_label\nglobal update_data_label\n\nis_bdms = False\ncombined_bdms_files = []\ndata_selection_tabs = []\ndataset_selection = []\ndataset_checkboxes = []\ndata_labels = []\nset_data_label = []\nupdate_data_label = []\n\n#Landt Format\n#SHEET_NAMES_LANDT = np.array(['Cycle-Tab','Step-Tab','Record-Tab'])\n#COL_NAMES_LANDT = np.array([['Cycle','CapC','CapD','SpeCapC','SpeCapD','Efficiency','EnergyC','EnergyD','MedVoltC','MedVoltD','CC-Cap','CC-Perc',\\\n\t#'PlatCapD','PlatSpeCapD','PlatPercD','PlatTimeD','CaptnC','CaptnD','rd','rd2','SpeEnergyC','SpeEnergyD','EndVoltD','RetentionD','DCIR_C',\\\n\t#'DCIR_D'],\\\n\t#['Step','Mode','Period','Capacity','SpeCap','Power','Capacitance','SpeEnergy','MedVolt','StartVolt','EndVolt','','','','','','','','','',\n\t#'','','','','',''],\\\n\t#['Record','Test Time',\\\n\t#'Current','Capacity','SpeCap','SOC|DOD','Voltage','Energy','SpeEnergy','AuxTemp','AuxVolt','SysTime','[All Auxiliary-Chl]*','Cycle-Index',\\\n\t#'Step-Index','Step-State','','','','','','','','','']]) #add empty strings for consistent array dimensions\n\n#Arbin Format\n#COL_NAMES_ARBIN = np.array(['Data_Point','Test_Time(s)','Date_Time','Step_Time(s)','Step_Index','Cycle_Index','Current(A)','Voltage(V)',\\\n\t#'Charge_Capacity(Ah)','Discharge_Capacity(Ah)','Charge_Energy(Wh)','Discharge_Energy(Wh)','dV/dt(V/s)']) #a few other column names but they aren't typically populated\n\n#import N files with electrochemical datasets\n#total_files = 2\n#file_names = empty([total_files,2]) #second column to specify number of datasets for each file \n\n#for fn in range(total_files):\n\t\t#find number of datasets in file and construct array\n\t#\tfile_names[fn,:] ='ANO Nb doping Data/Half Cell Tests/Temperature Optimization/093_ANO_LNO_700-r2_004_3-1.xlsx'\n\t\t\n#wb = xlrd2.open_workbook(workbook_location)\n#cyclestats=wb.sheet_by_index(0)\n#cc = cyclestats.ncols\n#dataset=np.array([[float(cyclestats.cell_value(row+1,col)) for col in range(cyclestats.ncols)]for row in range(cyclestats.nrows-2)]) #import data from xlsx spreadsheet to np array format\n#cycnum = dataset[1:cyclestats.nrows-1,0] #account for column titles\n#spec_caps = dataset[1:cyclestats.nrows-1,2]\n\ndef show_plot():\n global dataSets, batteryData, active_mass, nominal_capacity, combined_bdms_files, is_bdms, dataset_selection, screen_size, data_labels\n try:\n nominal_capacity = int(set_nominal_capacity.get())\n except:\n nominal_capacity = 180\n set_nominal_capacity.delete(0, tk.END)\n set_nominal_capacity.insert(0, \"180\")\n font = chooseFont()\n titleSize, axesSize, legendSize, tickSize = fontScale(title_font_size_scale, axes_font_size_scale, legend_font_size_scale, ticks_font_size_scale)\n\n if batteryData[0].sysFormat == 'Arbin' or batteryData[0].sysFormat == 'Land':\n if selectedEntryMode.get() == 'Calculate (enter initial C-rate):':\n # print(np.nonzero(batteryData[0].currentData))\n acive_masses = []\n if batteryData[0].sysFormat == 'Arbin':\n for file in combined_bdms_files:\n for dataset in range(len(file)):\n active_mass = abs(file[dataset].currentData[np.nonzero(file[dataset].currentData)[0][2]] / (float(mass_entry.get()) * nominal_capacity/1000))\n file[dataset].recalculate(active_mass)\n\n elif selectedEntryMode.get() == 'Enter Mass Manually:':\n for dataset in range(num_datasets):\n active_mass = float(mass_entry.get()) / 1000 # assume entry in mg\n batteryData[dataset].recalculate(active_mass)\n for file in combined_bdms_files:\n for dataset in range(len(file)):\n dataSets[combined_bdms_files.index(file)][dataset]['Specific Capacity'] = specificCapacity(file[dataset].cyclenumbers,\n file[dataset].currentData,file[dataset].speCapData)\n dataSets[combined_bdms_files.index(file)][dataset]['Coulombic Efficiency'] = specificCapacity(file[dataset].cyclenumbers,\n file[dataset].currentData,\n file[dataset].speCapData)\n dataSets[combined_bdms_files.index(file)][dataset]['Mean Voltage'] = meanVoltage(file[dataset].cyclenumbers,file[dataset].currentData,file[dataset].voltageData)\n #[combined_bdms_files.index(file)][dataset]['Voltage Curve'] = voltageCurve(3, combined_bdms_files[combined_bdms_files.index(file)][dataset].cyclenumbers,\n # combined_bdms_files[combined_bdms_files.index(file)][dataset].speCapData,\n # combined_bdms_files[combined_bdms_files.index(file)][dataset].voltageData)\n # dataSets[combined_bdms_files.index(file)][file][dataset]['dQ/dV curve'] = dQdVcurve(3, combined_bdms_files[combined_bdms_files.index(file)][dataset].cyclenumbers,combined_bdms_files[combined_bdms_files.index(file)][dataset].speCapData,combined_bdms_files[combined_bdms_files.index(file)][dataset].voltageData)\n\n plotfig = plt.figure(figsize=(7, 4.5), dpi=100)\n x_axis = set_domain.get().split(',')\n y_axis = set_range.get().split(',')\n\n global pane1\n pane1 = plotfig.add_subplot(1, 1, 1)\n datasetName = selectedData.get()\n colors = [(0, 0, 0), (0.5, 0, 0), (0, 0.5, 0), (0, 0, 0.5), (0.5, 0.5, 0), (0, 0.5, 0.5),\n (0.5, 0, 0.5)]\n counter = 7\n if datasetName == \"Specific Capacity\":\n #if is_bdms:\n for file in combined_bdms_files:\n for dataset in range(len(file)):\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotSpecCapCurves(is_bdms, combined_bdms_files.index(file), dataset,colors, datasetName,counter, batteryData, pane1, displayLegend, dataSets, data_labels)\n #else:\n # dataset = 0\n # counter = plotSpecCapCurves(is_bdms, combined_bdms_files.index(file), dataset, colors, datasetName, counter, batteryData, pane1, displayLegend,\n # dataSets)\n\n\n # if len(combined_bdms_files) == 0:\n # if batteryData[0].sysFormat == 'Arbin':\n # for dataset in range(num_datasets):\n # counter = plotSpecCapCurves(dataset,colors, datasetName,counter, batteryData, pane1, displayLegend, dataSets)\n # # apply default format settings for specific capacity plot\n # else:\n # dataset = 0\n # counter = plotSpecCapCurves(dataset,colors, datasetName,counter, batteryData, pane1, displayLegend, dataSets)\n #else:\n # for file in combined_bdms_files:\n # for dataset in range(len(file)):\n # counter = plotSpecCapCurves(dataset, colors, datasetName, counter, batteryData, pane1,\n # displayLegend, dataSets)\n pane1.set_xlabel('Cycle Number', fontname = font, fontsize=axesSize)\n pane1.set_ylabel('Specific Discharge Capacity (mAh g'+'\\u207b'+'\\u00b9'+')', fontname=font, fontsize=axesSize)\n pane1.set_title('Specific Capacity',fontname = font, fontsize = titleSize)\n set_axes(x_axis, y_axis)\n\n if datasetName == \"Coulombic Efficiency\":\n #if is_bdms:\n for file in combined_bdms_files:\n if batteryData[0].sysFormat == 'Arbin':\n for dataset in range(len(file)):\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotCoulombicEfficiencyCurves(is_bdms, combined_bdms_files.index(file),dataset,colors, datasetName,counter, batteryData, pane1, displayLegend, dataSets, data_labels)\n else:\n dataset = 0\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotCoulombicEfficiencyCurves(is_bdms, combined_bdms_files.index(file),dataset, colors, datasetName, counter, batteryData, pane1,displayLegend, dataSets, data_labels)\n\n\n pane1.set_xlabel('Cycle Number', fontname = font, fontsize=axesSize)\n pane1.set_ylabel('Coulombic Efficiency', fontname=font, fontsize=axesSize)\n pane1.set_title('Coulombic Efficiency',fontname = font, fontsize = titleSize)\n set_axes(x_axis, y_axis)\n\n\n elif datasetName == \"Mean Voltage\":\n #if is_bdms:\n for file in combined_bdms_files:\n if batteryData[0].sysFormat == 'Arbin':\n for dataset in range(len(file)):\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotMeanVoltageCurves(is_bdms,combined_bdms_files.index(file),dataset, colors, datasetName, counter, batteryData, pane1, displayLegend, dataSets, data_labels)\n else:\n dataset = 0\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotMeanVoltageCurves(is_bdms,combined_bdms_files.index(file),dataset, colors, datasetName, counter, batteryData, pane1,displayLegend, dataSets, data_labels)\n\n\n #pane1.plot(dataSets.get(datasetName)[0],dataSets.get(datasetName)[1],'o',c=[0,0,0])\n #pane1.plot(dataSets.get(datasetName)[0],dataSets.get(datasetName)[2],'*',c=[0,0,0])\n pane1.set_xlabel('Cycle Number',fontname=font,fontsize=axesSize)\n pane1.set_ylabel('Mean Voltage (V)',fontname=font,fontsize=axesSize)\n pane1.set_title('Mean Voltage', fontname=font, fontsize=titleSize)\n set_axes(x_axis, y_axis)\n\n elif datasetName == \"Voltage Curve\":\n cycle_numbers = ValidateCycleInput(set_cycle_numbers)\n\n #if is_bdms:\n for file in combined_bdms_files:\n if batteryData[0].sysFormat == 'Arbin':\n for dataset in range(len(file)):\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotVoltCurves(is_bdms,combined_bdms_files,combined_bdms_files.index(file),cycle_numbers, dataset, dataSets,colors,datasetName,counter, batteryData, pane1, displayLegend, data_labels)\n else:\n dataset = 0\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotVoltCurves(is_bdms,combined_bdms_files,combined_bdms_files.index(file),cycle_numbers, dataset, dataSets, colors, datasetName, counter, batteryData, pane1, displayLegend, data_labels)\n\n\t\t#Step 1: validate input (a comma-separated list of integers is an acceptable input)\n\t\t#Step 2: convert string input to a list (e.g. numpy array) of cycle numbers to be plotted\n\t\t#Step 3: obtain dataset for each cycle specified and add to plot\n #symbols = ['o', '*', '.']\n pane1.set_xlabel('Specific Capacity (mAh g'+'\\u207b'+'\\u00b9'+')',fontname=font,fontsize=axesSize)\n pane1.set_ylabel('Voltage (V)',fontname=font,fontsize=axesSize)\n pane1.set_title('Voltage', fontname=font, fontsize=titleSize)\n set_axes(x_axis, y_axis)\n\n\n\n elif datasetName == \"dQ/dV curve\":\n cycle_numbers = ValidateCycleInput(set_cycle_numbers)\n #if is_bdms:\n for file in combined_bdms_files:\n if batteryData[0].sysFormat == 'Arbin':\n for dataset in range(len(file)):\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotdQdVCurves(is_bdms,combined_bdms_files,combined_bdms_files.index(file),cycle_numbers, dataset, dataSets, colors, datasetName, counter, batteryData, pane1, displayLegend, data_labels)\n else:\n dataset = 0\n if dataset_selection[combined_bdms_files.index(file)][dataset].get():\n counter = plotdQdVCurves(is_bdms,combined_bdms_files,combined_bdms_files.index(file),cycle_numbers, dataset, dataSets, colors, datasetName, counter, batteryData, pane1, displayLegend, data_labels)\n\n pane1.set_xlabel('Voltage (V)', fontname=font, fontsize=axesSize)\n pane1.set_ylabel('dQ/dV (mAh g' +'\\u207b'+'\\u00b9'+ 'V'+'\\u207b'+'\\u00b9'+')', fontname=font, fontsize=axesSize)\n pane1.set_title('dQ/dV', fontname=font, fontsize=titleSize)\n set_axes(x_axis, y_axis)\n\n displayTicks(tickSize)\n canvas = FigureCanvasTkAgg(plotfig,master = main_window)\n canvas.draw()\n canvas.get_tk_widget().grid(column = 1, row = 1, columnspan = 2, rowspan = 2)\n selectFolderButton['state'] = tk.NORMAL\n\t\ndef importDataFile():\n global batteryData\n global dataSets\n global num_datasets\n global data_selected\n global data_selection_tabs\n global dataset_selection\n global dataset_checkboxes\n global infile_name\n global combined_bdms_files\n global is_bdms\n global data_labels\n global set_data_label\n global update_data_label\n\n if not is_bdms:\n while True:\n try: #added try/except in case user cancels file input\n infile_name = tk.filedialog.askopenfile().name\n import_control = tk.Label(init_import_tab, text=infile_name[-20:], bg=\"#cfe2f3\")\n import_control.grid(column=1, row=1, columnspan=2, padx=5, pady=5)\n batteryData = file2dataset(infile_name)\n break\n except:\n print(\"No file provided\")\n return\n else:\n while True:\n try:\n import_control = tk.Label(open_saved_bdms, text=infile_name[-20:], bg=\"#ffffe3\")\n import_control.grid(column=1, row=1, padx=5, pady=5)\n break\n except:\n print(\"No file provided\")\n return\n data_selection_tabs.append(ttk.Frame(DS_Tab_Control)) #add ui tabs for imported dataset\n dataset_selection.append(np.zeros(len(batteryData)).tolist())\n dataset_checkboxes.append(np.zeros(len(batteryData)).tolist())\n data_labels.append(infile_name[-20:])\n DS_Tab_Control.add(data_selection_tabs[-1],text = infile_name[-20:])\n set_data_label.append(tk.Entry(data_selection_tabs[-1]))\n set_data_label[-1].insert(0,infile_name[-20:])\n set_data_label[-1].grid(column = 1, columnspan = 2, row = 1, rowspan = 1, padx = 5, pady = 5)\n update_data_label.append(tk.Button(data_selection_tabs[-1],command = SetDataLabel))\n update_data_label[-1]['text'] = 'Set'\n update_data_label[-1].grid(column = 3, columnspan = 1, row = 1, rowspan = 1, padx = 5, pady = 5)\n for index in range(len(batteryData)):\n dataset_selection[-1][index] = tk.BooleanVar()\n dataset_checkboxes[-1][index] = tk.Checkbutton(data_selection_tabs[-1], text='Cell ' + str(index + 1), variable = dataset_selection[-1][index], onvalue=True,\n offvalue=False)\n dataset_checkboxes[-1][index].grid(column = 1, columnspan = 3, row = index + 2, rowspan = 1, padx=5, pady=5)\n\n combined_bdms_files.append(batteryData)\n dataSets = []\n for file in combined_bdms_files:\n dataSets.append([])\n for dataset in file:\n dataSets[combined_bdms_files.index(file)].append({})\n if not isinstance(batteryData, list):\n Format = batteryData.sysFormat\n\n else: # list type dataset from arbin\n Format = batteryData[0].sysFormat\n num_datasets = len(batteryData)\n mass_entry['state'] = tk.NORMAL\n if batteryData[0].sysFormat == 'Arbin':\n set_nominal_capacity['state'] = tk.NORMAL\n set_nominal_capacity.delete(0, tk.END)\n set_nominal_capacity.insert(0, \"180\")\n else:\n set_nominal_capacity.delete(0, tk.END)\n set_nominal_capacity.insert(0, \"180\")\n set_nominal_capacity['state'] = tk.DISABLED\n\n\n print('Detected %s file format' % Format)\n print('%d dataset(s) imported' % num_datasets)\n plotDataSetButton['state'] = tk.NORMAL\n bdms_filename_entry['state'] = tk.NORMAL\n bdms_filename_entry.insert(0, infile_name)\n save_bdms_file_button['state'] = tk.NORMAL\n data_selected = True\n is_bdms = False\n return batteryData\n\ndef BDMSfile():\n global is_bdms\n global infile_name, infile_counter\n global batteryData\n while True:\n try:\n infile_name = tk.filedialog.askopenfile().name\n break\n except:\n print(\"No file provided\")\n return\n is_bdms = True\n with open(infile_name, 'rb') as dataSetFile: # load the saved data back in as a batteryDataSet class object\n batteryData = load(dataSetFile)\n batteryData = importDataFile()\n return batteryData\n\ndef onSelectData(self):\n if (selectedData.get() == 'Voltage Curve' or selectedData.get() == 'dQ/dV curve') and data_selected == True:\n set_cycle_numbers['state'] = tk.NORMAL\n findMaxCycle()\n message = \"Max Cycles: \" + str(findMaxCycle())\n set_cycle_numbers.delete(0, tk.END)\n set_cycle_numbers.insert(0, message)\n #set_cycle_numbers_prompt['state'] = tk.NORMAL\n else:\n set_cycle_numbers['state'] = tk.DISABLED\n #set_cycle_numbers_prompt['state'] = tk.DISABLED\n\ndef onSelectEntryMode(self):\n if selectedEntryMode.get() == 'Enter Mass Manually:':\n mass_entry.delete(0, tk.END)\n mass_entry.insert(0, 'Enter mass (mg)')\n elif selectedEntryMode.get() == 'Calculate (enter initial C-rate):':\n # theor_capac_entry['state'] = tk.NORMAL\n mass_entry.delete(0, tk.END)\n mass_entry.insert(0, '0.1')\n # make prompts & extra textbox active\n\ndef displayLegend():\n if legend_on.get():\n pane1.legend(prop={'size': legendSize})\n\ndef displayTicks(tickSize):\n if toptick_on.get():\n pane1.xaxis.set_tick_params(top=True, direction=\"in\")\n plt.xticks(fontsize=tickSize)\n else:\n pane1.xaxis.set_tick_params(top=False, direction=\"in\")\n plt.xticks(fontsize=tickSize)\n if righttick_on.get():\n pane1.yaxis.set_tick_params(right=True, direction=\"in\")\n plt.yticks(fontsize=tickSize)\n else:\n pane1.yaxis.set_tick_params(right=False, direction=\"in\")\n plt.yticks(fontsize=tickSize)\n\ndef findMaxCycle():\n max_cycle_list = []\n if batteryData[0].sysFormat == 'Arbin':\n for dataset in range(num_datasets):\n max_cycle = max(batteryData[dataset].cyclenumbers)\n max_cycle_list.append(int(max_cycle))\n else:\n max_cycle = max(batteryData[0].cyclenumbers)\n max_cycle_list.append(int(max_cycle))\n return max_cycle_list\n\ndef set_axes(x_axis, y_axis):\n try:\n for i in range(len(x_axis)):\n x_axis[i] = float(x_axis[i])\n plt.xlim(x_axis)\n except:\n pass\n try:\n for i in range(len(y_axis)):\n y_axis[i] = float(y_axis[i])\n plt.ylim(y_axis)\n except:\n if selectedData.get() == 'dQ/dV curve':\n plt.ylim([-1000,1000])\n\ndef selectFolder():\n global folder_name\n folder_name = tk.filedialog.askdirectory()\n save_figure_button['state'] = tk.NORMAL\n folder_control = tk.Label(saveImageFrame, text=folder_name[-20:], bg=\"#e7fcde\")\n folder_control.grid(column=3, row=2, columnspan=2, padx=5, pady=5)\n\ndef saveFigure():\n filename = figure_filename_entry.get()\n plt.savefig(str(folder_name) + '/' + filename + '.png')\n return\n\ndef chooseFont():\n global font\n font = str(fontEntryMode.get())\n mpl.rc('font', family=font) #https://stackoverflow.com/questions/21933187/how-to-change-legend-fontname-in-matplotlib\n return font\n\ndef fontScale(title_font_size_scale, axes_font_size_scale, legend_font_size_scale, ticks_font_size_scale):\n global titleSize, axesSize, legendSize, tickSize\n titleSize = int(title_font_size_scale.get())\n axesSize = int(axes_font_size_scale.get())\n legendSize = int(legend_font_size_scale.get())\n tickSize = int(ticks_font_size_scale.get())\n return titleSize, axesSize, legendSize, tickSize\n\ndef ValidateCycleInput(set_cyle_numbers):\n cycle_numbers_string = set_cycle_numbers.get() # retrieve user input providing cycle numbers\n # convert string input to list of integers\n cycle_numbers_strings = cycle_numbers_string.split(\",\")\n cycle_numbers = []\n for cycle in cycle_numbers_strings:\n try:\n cycle_numbers.append(int(cycle))\n except:\n print(\"Please enter valid input. This is a list of comma separated integers.\")\n return cycle_numbers\n\n\ndef saveBDMSFile():\n global batteryData\n with open(bdms_filename_entry.get(), 'wb') as dataSetFile: # save the batteryDataSet class object(s) to a file\n dump(batteryData, dataSetFile)\n return batteryData, dataSetFile\n\ndef SetDataLabel():\n global data_labels,data_selection_tabs,set_data_label\n for index in range(len(data_selection_tabs)):\n data_labels[index] = set_data_label[index].get()\n DS_Tab_Control.tab(data_selection_tabs[index],text = data_labels[index])\n return\n\nmain_window = tk.Tk()\nmain_window.title('Battery Data Manager')\nscreen_size = [main_window.winfo_screenwidth(),main_window.winfo_screenheight()]\nmain_window.geometry(\"%ix%i\" %(screen_size[0]*3/4,screen_size[1]*3/4))\n\n#control frame\ncontrolframe = tk.Frame(main_window,bg=\"#0b60ad\")\ncontrolframe.grid(column = 0, row = 0, padx = 10, pady = 10, ipadx = 10, ipady = 10)\n\nCF_Tab_Control = ttk.Notebook(controlframe)\ninit_import_tab = ttk.Frame(CF_Tab_Control)\nopen_saved_bdms = ttk.Frame(CF_Tab_Control)\nCF_Tab_Control.add(init_import_tab,text = \"Initial Excel File Import\")\nCF_Tab_Control.add(open_saved_bdms,text = \"Open Saved BDMS File\")\nCF_Tab_Control.grid(column = 0, row = 3, columnspan = 3 , rowspan = 3, padx = 5, pady = 5)\n\nimport_control = tk.Label(controlframe,text = \"File and Dataset Control\", bg = \"#9fc5e8\")\nimport_control.grid(column = 0, row = 0, columnspan = 2, padx = 5, pady = 5)\n\nplotDataSetButton = tk.Button(controlframe,command = show_plot,state=tk.DISABLED)\nplotDataSetButton['text'] = 'Plot Data'\nplotDataSetButton.grid(column = 0,row = 1,padx = 5, pady = 5)\n\naddDataFileButton = tk.Button(init_import_tab,command = importDataFile)\naddDataFileButton['text'] = 'Select Excel File'\naddDataFileButton.grid(column = 0,row = 1,padx = 5,pady = 5)\n#addDataFileButton.place(relx = 0.1, rely = 0.2, anchor = 'center')\n\nselectedEntryMode = tk.StringVar(main_window)\nselectedEntryMode.set('Calculate (enter initial C-rate):')\nmass_entry_mode = tk.OptionMenu(init_import_tab, selectedEntryMode, 'Enter Mass Manually:', 'Calculate (enter initial C-rate):',\n command=onSelectEntryMode)\nmass_entry_mode.grid(column = 0,row = 2, padx = 5, pady = 5)\n#mass_entry_mode.place(relx=0.15, rely=0.4, anchor='center')\nmass_entry_mode.config(width=25)\nmass_entry = tk.Entry(init_import_tab)\nmass_entry.insert(0,'0.1') #\"Initial C-rate\"\nmass_entry.grid(column = 1, row = 2, padx = 5, pady = 5)\n\nselectedData = tk.StringVar(main_window) #the selected string from the dropdown list will be stored in this variable\nselectedData.set('Specific Capacity') #this is the default dataset\ndataSelect = tk.OptionMenu(controlframe, selectedData, 'Specific Capacity', 'Coulombic Efficiency', 'Mean Voltage', 'Voltage Curve', 'dQ/dV curve',command = onSelectData)\ndataSelect.grid(column = 0, row = 2, padx = 5, pady = 5)\n#dataSelect.place(relx = 0.1, rely = 0.3, anchor = 'center')\ndataSelect.config(width = 15)\n\nset_cycle_numbers = tk.Entry(controlframe, state = tk.DISABLED)\nset_cycle_numbers.insert(0,\"Cycle number(s):\")\nset_cycle_numbers.grid(column = 1,row = 2)\n\n#formatframe\nformatFrame = tk.Frame(main_window, bg = \"#674ea7\")\nformatFrame.grid(column = 0, row = 1, padx = 10, pady = 10, ipadx = 10, ipady = 10)\n\ngraph_control = tk.Label(formatFrame,text = \"Graph Control\", bg = \"#b4a7d6\")\ngraph_control.grid(column = 0, row = 0, columnspan = 2, padx = 5, pady = 5)\n\nlegend_on = tk.BooleanVar()\nlegend_checkbox = tk.Checkbutton(formatFrame, text='Display Legend', variable=legend_on, onvalue=True, offvalue=False, command=displayLegend, bg=\"#e9e0ff\")\nlegend_checkbox.grid(column=0, row=10, columnspan = 2, padx=5, pady=5)\n\ntoptick_on = tk.BooleanVar()\ntoptick_checkbox = tk.Checkbutton(formatFrame, text='Top Ticks', variable=toptick_on, onvalue=True, offvalue=False, command=displayTicks, bg=\"#e9e0ff\")\ntoptick_checkbox.grid(column=0, row=9, padx=5, pady=5)\n\nrighttick_on = tk.BooleanVar()\nrighttick_checkbox = tk.Checkbutton(formatFrame, text='Right Ticks', variable=righttick_on, onvalue=True, offvalue=False, command=displayTicks, bg=\"#e9e0ff\")\nrighttick_checkbox.grid(column=1, row=9, padx=5, pady=5)\n\n#toplabel_on = tk.BooleanVar()\n#toplabel_checkbox = tk.Checkbutton(formatFrame, text='Top Labels', variable=toplabel_on, onvalue=True, offvalue=False, command=displayTicks, bg=\"#e9e0ff\")\n#toplabel_checkbox.grid(column=0, row=9, padx=5, pady=5)\n\n#rightlabel_on = tk.BooleanVar()\n#rightlabel_checkbox = tk.Checkbutton(formatFrame, text='Right Labels', variable=rightlabel_on, onvalue=True, offvalue=False, command=displayTicks, bg=\"#e9e0ff\")\n#rightlabel_checkbox.grid(column=1, row=9, padx=5, pady=5)\n\n#font_control = tk.Label(formatFrame,text = \"Font\", bg = \"#e9e0ff\", width = 5)\n#font_control.grid(column = 0, row = 4, padx = 5, pady = 5)\n\nfont_control = tk.Label(formatFrame,text = \"Font Style\", bg = \"#e9e0ff\", width = 10)\nfont_control.grid(column = 0, row = 4,padx = 5, pady = 5)\n\nfontEntryMode = tk.StringVar(main_window)\nfontEntryMode.set('Arial')\nfont_selected = tk.OptionMenu(formatFrame, fontEntryMode, 'Arial', 'Times New Roman', 'Calibri','Helvetica', 'Serif', 'Algerian', 'Wingdings', command=chooseFont)\nfont_selected.grid(column = 1,row = 4, padx = 6, pady = 5)\n\ndomain_control = tk.Label(formatFrame,text = \"Domain \", bg = \"#e9e0ff\", width = 15)\ndomain_control.grid(column = 0, row = 2, padx = 5, pady = 5)\nset_domain = tk.Entry(formatFrame)\nset_domain.insert(0,\"Min, Max\")\nset_domain.grid(column = 1,row = 2)\n\nrange_control = tk.Label(formatFrame,text = \" Range \", bg = \"#e9e0ff\", width = 10)\nrange_control.grid(column = 0, row = 3, padx = 5, pady = 5)\nset_range = tk.Entry(formatFrame)\nset_range.insert(0,\"Min, Max\")\nset_range.grid(column = 1,row = 3)\n\nnominal_capacity_control = tk.Label(formatFrame,text = 'Nominal Capacity (mAh g'+'\\u207b'+'\\u00b9'+')', bg = \"#e9e0ff\")\nnominal_capacity_control.grid(column = 0, row = 1, padx = 5, pady = 5)\nset_nominal_capacity = tk.Entry(formatFrame,state=tk.DISABLED)\nset_nominal_capacity.grid(column = 1,row = 1)\n\ntitle_font_size = tk.Label(formatFrame, text = \"Title Font Size:\", bg = \"#e9e0ff\")\ntitle_font_size.grid(column = 0, row = 5, padx = 5, pady = 5)\n\ntitle_font_size_scale = tk.Scale(formatFrame, from_ = 1, to_ = 20, orient = 'horizontal')\ntitle_font_size_scale.grid(column = 0, row = 6, padx = 5, pady = 5)\ntitle_font_size_scale.set(15)\n\naxes_font_size = tk.Label(formatFrame, text = \"Axes Font Size:\", bg = \"#e9e0ff\")\naxes_font_size.grid(column = 1, row = 5, padx = 5, pady = 5)\n\naxes_font_size_scale = tk.Scale(formatFrame, from_ = 1, to_ = 20, orient = 'horizontal')\naxes_font_size_scale.grid(column = 1, row = 6, padx = 5, pady = 5)\naxes_font_size_scale.set(14)\n\nlegend_font_size = tk.Label(formatFrame, text = \"Legend Font Size:\", bg = \"#e9e0ff\")\nlegend_font_size.grid(column = 0, row = 7, padx = 5, pady = 5)\n\nlegend_font_size_scale = tk.Scale(formatFrame, from_ = 1, to_ = 20, orient = 'horizontal')\nlegend_font_size_scale.grid(column = 0, row = 8, padx = 5, pady = 5)\nlegend_font_size_scale.set(12)\n\nticks_font_size = tk.Label(formatFrame, text = \"Ticks Font Size:\", bg = \"#e9e0ff\")\nticks_font_size.grid(column = 1, row = 7, padx = 5, pady = 5)\n\nticks_font_size_scale = tk.Scale(formatFrame, from_ = 1, to_ = 20, orient = 'horizontal')\nticks_font_size_scale.grid(column = 1, row = 8, padx = 5, pady = 5)\nticks_font_size_scale.set(14)\n\nsaveImageFrame = tk.Frame(main_window, bg = \"#00b809\")\nsaveImageFrame.grid(column = 1, row = 0, padx = 10, pady = 10, ipadx = 10, ipady = 10)\n\nSIF_Tab_Control = ttk.Notebook(saveImageFrame)\nsaveImageTab = ttk.Frame(SIF_Tab_Control)\nsaveFileTab = ttk.Frame(SIF_Tab_Control)\nSIF_Tab_Control.add(saveImageTab,text = \"Save Image\")\nSIF_Tab_Control.add(saveFileTab,text = \"Save File\")\nSIF_Tab_Control.grid(column = 2, row = 1, columnspan = 3 , rowspan = 3, padx = 5, pady = 5)\n\ngeneral_save_control = tk.Label(saveImageFrame,text = \"Save Figures and Datasets\", bg = \"#a5f584\")\ngeneral_save_control.grid(column = 2, row = 0, columnspan = 3, rowspan = 1, padx = 5, pady = 5)\n\nsave_bdms_file_button = tk.Button(saveFileTab,command = saveBDMSFile, state = tk.DISABLED)\nsave_bdms_file_button['text'] = 'Save BDMS File'\nsave_bdms_file_button.grid(column = 0, row = 3, columnspan = 2, padx = 5, pady = 5)\n\nbdms_file_save_control = tk.Label(saveFileTab,text = \"File Name\")\nbdms_file_save_control.grid(column = 0, row = 2, padx = 5, pady = 5)\n\nbdms_filename_entry = tk.Entry(saveFileTab, state=tk.DISABLED)\nbdms_filename_entry.grid(column = 1,row = 2, padx = 5, pady = 5)\n\nsave_figure_button = tk.Button(saveImageTab,command = saveFigure, state = tk.DISABLED)\nsave_figure_button['text'] = 'Save Figure'\nsave_figure_button.grid(column = 2, row = 3, padx = 5, pady = 5)\n\nfigure_filename_entry = tk.Entry(saveImageTab)\nfigure_filename_entry.insert(0,'Figure Filename')\nfigure_filename_entry.grid(column = 3,row = 3, padx = 5, pady = 5)\n\nselectFolderButton = tk.Button(saveImageTab,command = selectFolder, state = tk.DISABLED)\nselectFolderButton['text'] = 'Select Folder'\nselectFolderButton.grid(column = 2, row = 2, padx = 5, pady = 5)\n\nselectDataFrame = tk.Frame(main_window, bg = \"#FFFF00\")\nselectDataFrame.grid(column = 2, row = 0, padx = 10, pady = 10, ipadx = 10, ipady = 10)\n\nDS_Tab_Control = ttk.Notebook(selectDataFrame)\nDS_Tab_Control.grid(column = 0, row = 1, rowspan = 5, columnspan = 3, padx = 5, pady = 5)\n\nbdms_control = tk.Label(selectDataFrame,text = \"Data To Be Displayed\", bg=\"#ffffa3\")\nbdms_control.grid(column = 0, row = 0, columnspan = 2, padx = 5, pady = 5)\n\naddBDMSDataFileButton = tk.Button(open_saved_bdms,command = BDMSfile)\naddBDMSDataFileButton['text'] = 'Select Data File'\naddBDMSDataFileButton.grid(column = 0,row = 1,padx = 5,pady = 5)\n\nmain_window.mainloop() #keep window open until closed by user","repo_name":"obersd00/battery-data-manager","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":31444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"131613761","text":"import tflearn\r\nimport nltk\r\nimport numpy as np\r\nfrom preprocessData import words, training,output\r\nfrom nltk.stem.lancaster import LancasterStemmer\r\n\r\nstemmer = LancasterStemmer()\r\n\r\ndef bag_of_words(s):\r\n bag = [0 for _ in range(len(words))]\r\n\r\n s_words = nltk.word_tokenize(s)\r\n s_words = [stemmer.stem(word.lower()) for word in s_words]\r\n\r\n for se in s_words:\r\n for i, w in enumerate(words):\r\n if w == se:\r\n bag[i] = 1\r\n\r\n return np.array(bag)\r\n\r\ndef create_model():\r\n from tensorflow.python.framework import ops\r\n ops.reset_default_graph()\r\n net = tflearn.input_data(shape=[None, len(training[0])])\r\n net = tflearn.fully_connected(net, 8)\r\n net = tflearn.fully_connected(net, 8)\r\n net = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\r\n net = tflearn.regression(net)\r\n\r\n return tflearn.DNN(net)","repo_name":"AbhiGaunker/FirstProject-ChatBot","sub_path":"Mmodel.py","file_name":"Mmodel.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"24271423500","text":"import lib\nfrom lib.salty_scrape import openSaltySession, waitForMatchEnd, waitForMatchStart, bet, postBet, checkWinner\nfrom lib.salty_file_io import saveBettingData, loadBettingData, loadLoginCredentials\n\n\ndef main(session): \n betting_data = loadBettingData()\n\n #main loop\n while(True):\n bet_payload = bet(session, betting_data)\n postBet(session, bet_payload)\n\n timeStart = waitForMatchStart()\n timeEnd = waitForMatchEnd()\n\n betting_data = checkWinner(timeStart, timeEnd, betting_data)\n saveBettingData(betting_data)\n\nif __name__ == \"__main__\":\n waitForMatchEnd()\n session = openSaltySession()\n main(session)","repo_name":"Sour/Salty","sub_path":"salty_main.py","file_name":"salty_main.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23796687007","text":"\"\"\"Create a meme from a given image URL.\"\"\"\n\n# Import required libraries\nfrom wand.image import Image\nfrom wand.drawing import Drawing\nfrom wand.display import display\nimport requests\nimport yaml_data\n\n\n# Parse YAML data\nDATA = yaml_data.get_data()\n\nIMAGE_URL = DATA['IMAGE_URL']\nTEXT = DATA['TEXT']\nFONT_SIZE = DATA['FONT_SIZE']\nHEIGHT = DATA['HEIGHT']\nDEEP_FRY = DATA['DEEP_FRY']\nCOMPRESSION_LEVEL = DATA['COMPRESSION_LEVEL']\nCONTRAST_INCREASE_PERCENT = DATA['CONTRAST_INCREASE_PERCENT']\n\n# Create a new image on file system from a given image URL\nwith Image(blob=requests.get(IMAGE_URL)) as img:\n # Create a new drawing (on top of image)\n with Drawing() as draw:\n # Impact font is important for making meme look proper\n draw.font = 'fonts/impact.ttf'\n draw.font_size = FONT_SIZE\n draw.fill_color = 'white'\n draw.text_alignment = 'center'\n # Put our text in the center of the image and at the specified height (in pixels)\n draw.text(int(img.width / 2), HEIGHT, TEXT)\n draw(img)\n # Apply heavy JPG compression and contrast if the user chooses the DEEP_FRY option\n if DEEP_FRY:\n img.compression = 'jpeg'\n img.compression_quality = COMPRESSION_LEVEL\n img.modulate(100, 100 + CONTRAST_INCREASE_PERCENT, 100)\n img.save(filename='output/meme.jpg')\n else:\n img.save(filename='output/meme.png')\n # You have the option of displaying the image via ImageMagick/Wand\n # display(img)\n","repo_name":"Vampire-Computer-People/art_cmdline_magick","sub_path":"scripts/meme_machine.py","file_name":"meme_machine.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"2753180161","text":"MAX = 6\nque = [0]*MAX\nhead = 0\ntail = 0\n\ndef get_queue():\n i = head\n q = []\n while i%MAX != tail:\n q.append(que[i])\n i = (i+1)%MAX\n return q\n\ndef enqueue(d):\n global tail\n nt = (tail+1)%MAX\n\n if nt == head:\n print('待ち列がいっぱいです')\n else:\n que[tail] = d\n tail = nt\n count = len(get_queue())\n print(f'待ち時間: {count * 15}分')\n\ndef dequeue():\n global head\n if head == tail:\n print('取り出すデータが存在しません')\n return None\n else:\n d = que[head]\n que[head] = 0\n head = (head + 1)%MAX\n print(f'{d}が列から出ました')\n return d\n\nfor i in range(3):\n enqueue(i)\n\nfor i in range(2):\n dequeue()\n\nenqueue(i)\n","repo_name":"itc-s21009/algorithm_class","sub_path":"Chapter3/Lesson3-2/queue_1_2.py","file_name":"queue_1_2.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40802937035","text":"from sklearn import tree\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.metrics import accuracy_score\r\n\r\n# [height, weight, shoe_size]\r\nX = [[181, 80, 44], [177, 70, 43], [160, 60, 38], [154, 54, 37], [166, 65, 40],\r\n [190, 90, 47], [175, 64, 39],\r\n [177, 70, 40], [159, 55, 37], [171, 75, 42], [181, 85, 43]]\r\n\r\nY = ['male', 'male', 'female', 'female', 'male', 'male', 'female', 'female',\r\n 'female', 'male', 'male']\r\n\r\n# Algorithm Model\r\nclf = tree.DecisionTreeClassifier()\r\nclf2 = GaussianNB()\r\n\r\n# Training data\r\nclf = clf.fit(X,Y)\r\nclf2 = clf2.fit(X,Y)\r\n\r\n# Prediction\r\nprediction = clf.predict([[170, 70, 44]])\r\nprediction2 = clf2.predict([[170, 70, 44]])\r\n\r\n# Accuracy Score\r\n#accuracy = accuracy_score(prediction2, X)\r\n#accuracy2 = accuracy_score(prediction2, X)\r\nprint(prediction2, prediction)\r\n","repo_name":"nanto88/ml-python-test","sub_path":"PredictGender.py","file_name":"PredictGender.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"45158590418","text":"\"\"\"\nThe dataset given, records data of city temperatures over the years 2014 and 2015.\nPlot the histogram of the temperatures over this period for the cities of San Francisco and Moscow\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv('CityTemps.csv')\n\nfig, ax = plt.subplots(2, 2)\nfig.tight_layout(h_pad=2)\n\nplt.subplot(2, 2, 1)\nplt.hist(df[df['Year']==2014]['Moscow'], bins=5)\nplt.title('Moscow 2014')\n\nplt.subplot(2, 2, 2)\nplt.hist(df[df['Year']==2014]['San Francisco'], bins=5)\nplt.title('San Francisco 2014')\n\nplt.subplot(2, 2, 3)\nplt.hist(df[df['Year']==2015]['Moscow'], bins=5)\nplt.title('Moscow 2015')\n\nplt.subplot(2, 2, 4)\nplt.hist(df[df['Year']==2015]['San Francisco'], bins=5)\nplt.title('San Francisco 2015')\n\nplt.subplots_adjust(top=0.85)\nplt.suptitle('City Temperatures')\nplt.show()\nplt.close()\n","repo_name":"sushkiran/pyScript","sub_path":"5_Data_Visualisation/CaseStudy1/2_CityTemps.py","file_name":"2_CityTemps.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"70327112021","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: Ming\n# @Date: 2019-07-12 16:01:58\n# @Last Modified by: Ming\n# @Last Modified time: 2019-07-15 21:39:03\nfrom collections import defaultdict\n\nimport matplotlib as mpl\n\nmpl.use('Agg')\nimport pandas as pd\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport click\n\n\n####\ndef parse_relation(file_in):\n \"\"\"\n\n :param file_in:\n :return:\n \"\"\"\n res = defaultdict(dict)\n with open(file_in, 'r') as IN:\n for line in IN:\n arr = line.strip().split('\\t')\n res[arr[0]][arr[1]] = float(arr[2])\n return res\n\n\n####\n\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n\n@click.command(context_settings=CONTEXT_SETTINGS)\n@click.option('-i', '--input',\n required=True,\n type=click.Path(),\n help=\"The table input to plot box\")\n@click.option('-r', '--relation',\n required=True,\n type=click.Path(),\n help=\"The relationshi file.\")\n@click.option('--xname',\n required=True,\n help=\"The x column name for the plot.\")\n@click.option('--yname',\n required=True,\n help=\"The y column name for the plot.\")\n@click.option('--huename',\n required=False,\n help=\"The hue column name for the plot.\")\n@click.option('--xorder',\n required=False,\n help=\"The order of x axis names(sep by ,)\")\n@click.option('--hueorder',\n required=False,\n help=\"The order of hue names(sep by ,).\")\n@click.option('--huecolors',\n required=False,\n help=\"The colors for hue infos(sep by ,).\")\n@click.option('-y', '--ylab',\n required=False,\n help=\"The ylab for the plot.\")\n@click.option('-t', '--title',\n required=False,\n help=\"The title for the plot.\")\n@click.option('--pearson',\n default=0.4,\n type=float,\n show_default=True,\n help=\"The min pearson relationship.\")\n@click.option('-p', '--prefix',\n default='result',\n help=\"The out prefix.\")\ndef cli(input, relation, xname, yname, huename, xorder, hueorder, huecolors,\n ylab, title, pearson, prefix):\n \"\"\"\n Mix Plot of box plot and rectangle.\n\n \"\"\"\n df = pd.read_csv(input, sep='\\t')\n relation_info = parse_relation(relation)\n\n x_order = xorder.strip().split(',') if xorder else None\n hue_order = hueorder.strip().split(',') if hueorder else None\n hue_color = huecolors.strip().split(',') if huecolors else None\n\n # Draw\n plt.style.use('ggplot')\n figure, axis = plt.subplots(2, 1, figsize=(10, 6), dpi=300,\n gridspec_kw={'height_ratios': [3, 1]})\n # 上部盒形图\n sns.boxplot(data=df, x=xname, y=yname, hue=huename, order=x_order,\n hue_order=hue_order, color=hue_color, ax=axis[0])\n\n if x_order is None:\n x_order = [i.get_text() for i in axis[0].get_xticklabels()]\n # 下部位置关系\n # 在不调整 width 的情况下,盒形图的 bar 宽度之和都为0.5,左右各有0.1的空白\n # 从-0.5开始。\n axis[1].set_xlim(axis[0].get_xlim())\n axis[1].axhline(0.5, lw=1, c='black', zorder=0)\n # 虚线的参数\n style = dict(arrowstyle=\"Simple,head_width=4,head_length=6\",\n linestyle='--', lw=2, color=\"r\")\n for i in range(len(x_order)):\n gene = x_order[i]\n x, y, width, height = -0.3 + i, 0.4, 0.6, 0.2\n color = 'red' if gene in relation_info else 'blue'\n rect_plot = patches.Rectangle((x, y),\n width=width, height=height,\n fill=True, color=color)\n axis[1].add_patch(rect_plot)\n if color == 'red':\n for mRNA, pearson_val in relation_info[gene].items():\n if abs(pearson_val) > pearson:\n end = x_order.index(mRNA)\n rad = 0.2 if i < end else -0.2\n arc_plot = patches.FancyArrowPatch((i, 0.4), (end, 0.4),\n connectionstyle=f\"arc3,rad={rad}\",\n **style)\n axis[1].add_patch(arc_plot)\n\n # 杂项\n axis[0].set(xlabel=\"\", ylabel=ylab)\n axis[0].set_title(title, fontdict={'size': 22})\n axis[1].axis('off')\n\n plt.savefig(prefix + '.svg', bbox_inches='tight')\n plt.savefig(prefix + '.png', dpi=300, bbox_inches='tight')\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"XINBADA426/PlotDemo","sub_path":"mix/box_rectangle.py","file_name":"box_rectangle.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29744091307","text":"# Programmers_level_3_배달\n# 플로이드 와샬\n\ndef solution(N, road, K) :\n import sys\n INF = sys.maxsize\n answer = [[INF for _ in range(N+1)] for _ in range(N+1)]\n for i,j,k in road : \n answer[i][j] = min(k,answer[i][j])\n answer[j][i] = min(k,answer[j][i])\n # 플로이드 워셜 알고리즘 차용\n for k in range(1, N+1) : \n for i in range(1, N+1) : \n for j in range(i, N+1) : \n if i == j : \n answer[i][j] = 0\n else : \n answer[i][j] = min(answer[i][j], answer[i][k] + answer[k][j])\n answer[j][i] = min(answer[j][i], answer[j][k] + answer[k][i])\n answer = len([i for i in answer[1] if i <= K])\n return answer","repo_name":"thomas783/coding_test","sub_path":"programmers/level_2/배달.py","file_name":"배달.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"14962353096","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.utils.project import get_project_settings\nfrom acaSpider.items import AcaspiderItem\nimport logging\nimport re\nimport datetime\nfrom acaSpider.proxyDownloader import getProxy\n\n'''\n title = scrapy.Field()\n authors = scrapy.Field()\n year = scrapy.Field()\n typex = scrapy.Field()\n subjects = scrapy.Field()\n url = scrapy.Field()\n abstract = scrapy.Field()\n citation = scrapy.Field()\n'''\n\n\nclass AAAISpider(scrapy.Spider):\n # for 2019-2010\n name = \"AAAI_Spider\"\n allowed_domains = [\"aaai.org\"]\n start_urls = get_project_settings().get('AAAI_URL')\n\n def __init__(self):\n super(AAAISpider, self).__init__()\n self.startTime = get_project_settings().get('START_TIME')\n self.proxyUpdateDelay = get_project_settings().get('PROXY_UPDATE_DELAY')\n getProxy().main()\n\n def parse(self, response):\n item = AcaspiderItem()\n\n item['title'] = list(map(self.remove_html, response.xpath('//p[@class=\"left\"]/a[not(contains(text(), \"PDF\"))]').extract()))\n item['authors'] = response.xpath('//p[@class=\"left\"]/i/text()').extract()\n if '19' in AAAISpider.start_urls[0]:\n item['year'] = (response.xpath('//div[@id=\"box6\"]/p/text()').extract() * len(item['title']))[1::2] # 存疑\n elif '18' in AAAISpider.start_urls[0]:\n item['year'] = ['New Orleans, Louisiana USA — February 2–7, 2018'] * len(item['title'])\n elif '17' in AAAISpider.start_urls[0]:\n item['year'] = ['February 4 –9, 2017, San Francisco, California USA'] * len(item['title'])\n elif '16' in AAAISpider.start_urls[0]:\n item['year'] = ['February 12 –17, 2016, Phoenix, Arizona USA'] * len(item['title'])\n elif '15' in AAAISpider.start_urls[0]:\n item['year'] = ['January 25 –30, 2015, Austin, Texas USA'] * len(item['title'])\n elif '14' in AAAISpider.start_urls[0]:\n item['year'] = ['July 27 –31, 2014, Québec City, Québec, Canada'] * len(item['title'])\n elif '13' in AAAISpider.start_urls[0]:\n item['year'] = ['July 14 –18, 2013, Bellevue, Washington, USA'] * len(item['title'])\n elif '12' in AAAISpider.start_urls[0]:\n item['year'] = ['July 22 –126, 2012, Toronto, Ontario, Canada'] * len(item['title'])\n elif '11' in AAAISpider.start_urls[0]:\n item['year'] = ['August 7 –11, 2011, San Francisco, California USA'] * len(item['title'])\n elif '10' in AAAISpider.start_urls[0]:\n item['year'] = ['July 11–15, 2010, Atlanta, Georgia'] * len(item['title'])\n elif '08' in AAAISpider.start_urls[0]:\n item['year'] = ['July 13–17, 2008, Chicago, Illinois'] * len(item['title'])\n item['typex'] = ['Association for the Advancement of Artificial Intelligence (AAAI)'] * len(item['title'])\n if '19' in AAAISpider.start_urls[0]:\n item['url'] = (response.xpath('//p[@class=\"left\"]/a/@href').extract())[::2]\n else:\n item['url'] = (response.xpath('//p[@class=\"left\"]/a/@href').extract())\n\n item['abstract'] = [' '] * len(item['title'])\n item['citation'] = [str(-1)] * len(item['title'])\n item['subjects'] = []\n raw_subjects = response.xpath('//p[@class=\"left\"]/preceding-sibling::h4/text()').extract() #\n tmp_subjects_cnt = []\n subjects_cnt = []\n for i in raw_subjects:\n tmp_subjects_cnt.append(self.get_subjects_cnt(response, i))\n for i in range(len(tmp_subjects_cnt) - 1):\n subjects_cnt.append(tmp_subjects_cnt[i + 1] - tmp_subjects_cnt[i])\n subjects_cnt.append(len(item['title']) - sum(subjects_cnt))\n if '17' in AAAISpider.start_urls[0]:\n subjects_cnt = [11, 26, 2, 1, 65, 25, 3, 3, 6, 33, 57, 182, 9, 11, 38, 18, 21, 16, 7, 14, 53, 16, 14, 8, 2, 17, 2, 9, 4, 1, 5, 7, 64, 16, 7, 13]\n elif '15' in AAAISpider.start_urls[0]:\n subjects_cnt = [59, 8, 2, 14, 20, 2, 44, 24, 8, 3, 10, 38, 43, 22, 8, 22, 16, 105, 30, 29, 5, 8, 18, 6, 13, 3, 20, 45, 16, 19, 6, 8]\n elif '11' in AAAISpider.start_urls[0]:\n subjects_cnt = [19, 6, 20, 48, 29, 14, 13, 14, 9, 7, 28, 18, 5, 10, 12, 19, 6, 44, 15, 8]\n for i in range(len(raw_subjects)):\n item['subjects'].extend(self.duplicate_subjects(raw_subjects[i], subjects_cnt[i]))\n if '13' in AAAISpider.start_urls[0]:\n item['title'] = item['title'][8:]\n item['authors'] = item['authors'][8:]\n item['year'] = item['year'][8:]\n item['typex'] = item['typex'][8:]\n item['url'] = item['url'][8:]\n item['abstract'] = item['abstract'][8:]\n item['citation'] = item['citation'][8:]\n print(raw_subjects)\n print(tmp_subjects_cnt)\n print(subjects_cnt)\n yield item\n\n def get_subjects_cnt(self, response, subject):\n return len(response.xpath('//div[@class=\"content\"]//h4[contains(text(), \"' + subject + '\")]/preceding-sibling::p').extract())\n\n def duplicate_subjects(self, string, num):\n return [string] * num\n\n def remove_html(self, string):\n pattern = re.compile(r'<[^>]+>')\n return (re.sub(pattern, '', string).replace('\\n', '').replace(' ', '')).strip()\n","repo_name":"xyjigsaw/ACM-IEEE-arXiv-Spider","sub_path":"acaSpider/spiders/AAAI_Spider.py","file_name":"AAAI_Spider.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"12"} +{"seq_id":"32836725302","text":"'''Faça um programa que solicita ao usuário a entrada de uma matriz quadrada usando o seguinte\nformato ilustrado para uma matrix quadrada de ordem 3: 1 2 3; 4 5 6; 7 8 9 Ou seja, as linhas são\nseparadas por ponto-e-vírgula e os elementos nas colunas são separados por espaço em branco.\nO programa deve calcular e imprimir a soma dos elementos que não fazem parte da diagonal principal.\n(Conceito utilizado: entrada e caminhamento na matriz)'''\n\nentrada = input('\\nEntre com uma matriz quadrada: ')\nlinhas = entrada.split(';')\nmatriz = []\n\nfor l in range(len(linhas)):\n colunas = linhas[l].split()\n matriz.append([])\n\n for c in range(len(colunas)):\n matriz[l].append(float(colunas[c]))\n\nsoma = 0\nprint('\\n')\n\nfor l in range(len(matriz)):\n for c in range(len(matriz[l])):\n print(f'{matriz[l][c]}', end=' ')\n if l != c:\n soma += matriz[l][c]\n \n print()\n\nprint(f'\\nSoma sem os elementos da diagonal principal: {soma}\\n')\n","repo_name":"hilariogrossi/CODEFICO","sub_path":"Módulo-1/Aula-18/usuario-matriz-quadrada.py","file_name":"usuario-matriz-quadrada.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70467322901","text":"import pandas as pd\n\nimport seaborn as sns\nfrom seaborn import FacetGrid\n\nclass TestFigures():\n def __init__(self, test_df: pd.DataFrame) -> None:\n self.test_df = test_df\n \n def r2_per_metabolite(self):\n _df = self.test_df.sort_values(['pathway', 'metabolite_id'])\n g = sns.barplot(\n data=_df,\n x='metabolite_id',\n y='r2',\n hue='pathway',\n dodge=False,\n palette='deep',\n )\n # g.set(yscale='symlog')\n g.set_xticklabels(g.get_xticklabels(), rotation=90)\n \n def r2_per_tier(self, plot_args):\n _df = self.test_df.sort_values(['pathway', 'metabolite_id'])\n g: FacetGrid = sns.catplot(\n data=_df, \n kind='bar',\n x='metabolite_id',\n y='r2',\n hue='strategy',\n palette='deep',\n **plot_args\n )\n g.despine(left=True)\n # g.set(yscale='symlog')\n\n for axes in g.axes.flat:\n _ = axes.set_xticklabels(axes.get_xticklabels(), rotation=45)\n\n return g\n # g.set_xticklabels(g.get_xticklabels(), rotation=90)\n \n","repo_name":"trdvangraft/thesis","sub_path":"metaengineering/src/visualizers/test_figures.py","file_name":"test_figures.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4728048954","text":"from typing import List, Union\nfrom sqlalchemy.orm import Session\nfrom fastapi import HTTPException, status\nfrom botocore.exceptions import ClientError\nimport logging\n\nfrom .schemas import ServerResumeUpdate, Resume, ResumeFull\nfrom ...db.crud import get_resume, update_resume\nfrom ...util.defs import update_existing_resource\n\n\ndef remove_object_from_bucket(storage_client, bucket: str, key: str):\n try:\n storage_client.delete_object(Bucket=bucket, Key=key)\n except ClientError as e:\n logging.error(e)\n\n\ndef reorganize(\n requested_order: List[Union[str, int]],\n order: List[Union[str, int]],\n):\n processed_requested_order = set(requested_order)\n processed_order = set(order)\n\n if bool(processed_requested_order.difference(processed_order)) or bool(\n processed_order.difference(processed_requested_order)):\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Bad request\")\n\n return requested_order\n\n\ndef is_valid_column(target):\n if target not in ['order', 'mainOrder', 'secondaryOrder']:\n raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Bad request\")\n\n\ndef adjust_resume_orders(\n db: Session,\n resume: ResumeFull,\n action: str,\n target: str,\n creation_update: bool = False,\n):\n exception = HTTPException(status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Bad request\")\n full_layout_in_use = resume.meta[\"paper\"][\n \"layout\"] == \"full\" and not resume.meta[\"template\"] in [\"calm\"]\n full_content = resume.meta[\"content\"][\"full\"]\n split_content = resume.meta[\"content\"][\"split\"]\n is_in_main_order = target in split_content[\"mainOrder\"]\n is_in_secondary_order = target in split_content[\"secondaryOrder\"]\n split_order = \"mainOrder\" if is_in_main_order else \"secondaryOrder\" if is_in_secondary_order else target\n\n content_update = None\n\n if (action == 'remove'):\n if target not in full_content[\n 'unlisted'] or target not in split_content['unlisted']:\n raise exception\n\n content_update = {\n \"full\": {\n \"unlisted\":\n [*filter(lambda sec: sec != target, full_content[\"unlisted\"])],\n },\n \"split\": {\n \"unlisted\": [\n *filter(lambda sec: sec != target,\n split_content[\"unlisted\"])\n ],\n }\n }\n\n if (isinstance(action, list)):\n is_valid_column(target)\n if full_layout_in_use and not target == 'order':\n raise exception\n\n content_update = {\n \"full\": {\n \"order\": reorganize(action, full_content[\"order\"])\n }\n } if full_layout_in_use else {\n \"split\": {\n split_order: reorganize(action, split_content[split_order])\n }\n }\n\n if (action == \"unlist\"):\n if full_layout_in_use and target in full_content['unlisted']:\n raise exception\n\n if not full_layout_in_use and target in split_content['unlisted']:\n raise exception\n\n content_update = {\n \"full\": {\n \"order\":\n [*filter(lambda sec: sec != target, full_content[\"order\"])],\n \"unlisted\":\n [*resume.meta[\"content\"][\"full\"][\"unlisted\"], target]\n }\n } if full_layout_in_use else {\n \"split\": {\n split_order: [\n *filter(lambda sec: sec != target,\n split_content[split_order])\n ],\n \"unlisted\":\n [*resume.meta[\"content\"][\"split\"][\"unlisted\"], target]\n }\n }\n\n if (action in ['order', 'mainOrder', 'secondaryOrder']):\n if creation_update:\n content_update = {\n \"split\": {\n action: [*split_content[action], target]\n },\n \"full\": {\n \"unlisted\": [*full_content[\"unlisted\"], target]\n }\n } if action in [\"mainOrder\", \"secondaryOrder\"] else {\n \"full\": {\n action: [*full_content[action], target]\n },\n \"split\": {\n \"unlisted\": [*split_content[\"unlisted\"], target]\n }\n }\n\n else:\n if full_layout_in_use and target not in full_content['unlisted']:\n raise exception\n\n if not full_layout_in_use and target not in split_content[\n 'unlisted']:\n raise exception\n\n key = 'full' if action == 'order' else 'split'\n content_update = {\n key: {\n action: [*resume.meta[\"content\"][key][action], target],\n \"unlisted\": [\n *filter(\n lambda sec: sec != target, full_content[\"unlisted\"]\n if key == 'full' else split_content['unlisted'])\n ]\n }\n }\n\n if (action == 'migrate'):\n if (not is_in_main_order\n and not is_in_secondary_order) or full_layout_in_use:\n raise exception\n\n content_update = {\n 'split': {\n \"mainOrder\": [\n *filter(lambda sec: sec != target,\n split_content['mainOrder'])\n ],\n \"secondaryOrder\": [*split_content['secondaryOrder'], target]\n } if is_in_main_order else {\n \"secondaryOrder\": [\n *filter(lambda sec: sec != target,\n split_content['secondaryOrder'])\n ],\n \"mainOrder\": [*split_content['mainOrder'], target]\n }\n }\n\n if content_update == None:\n raise exception\n\n return update_existing_resource(\n db,\n resume.id,\n ServerResumeUpdate(meta={\"content\": content_update}),\n Resume,\n get_resume,\n update_resume,\n ).meta[\"content\"]","repo_name":"kosorz/bb-resume-py","sub_path":"backend/app/resources/resumes/defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24929970559","text":"from math import ceil\nimport random\nmarkovTable = {}\nsentenceStarters = []\naverageSentenceLength = 0\n\ndef getInput(inp):\n\tglobal averageSentenceLength\n\tinp = inp.lower().split(\" \")\n\taverageSentenceLength = (averageSentenceLength + len(inp)) / 2\n\tfor x in inp:\n\t\tx = x.lower()\n\t\tif not x in markovTable:\n\t\t\tmarkovTable[x] = []\n\t\t\tmarkovTable[x].append(\"\")\n\t\ttry:\n\t\t\tmarkovTable[x].append(inp[inp.index(x) + 1])\n\t\t\tmarkovTable[x].remove(\"\")\n\t\texcept IndexError:\n\t\t\tcontinue\n\t\texcept ValueError:\n\t\t\tmarkovTable[x].append(inp[inp.index(x) + 1])\n\treturn inp[len(inp) - 1]\n\t\t\t\ndef produceOutput(lastWord):\n\tlength = averageSentenceLength\n\tout = \"\"\n\tx = 0\n\twhile x < length:\n\t\tout += \" \" \n\t\tlastWord = random.choice(markovTable[lastWord])\n\t\tif lastWord == \"\":\n\t\t\tlastWord = random.choice(list(markovTable.keys()))\n\t\tout += lastWord\n\t\tx += 1\n\tprint(\"\\n\" + out + \"\\n\")\n\tgetInput(out)\n\t\nwhile True:\n\ttxt = getInput(input())\n\tproduceOutput(txt)\n","repo_name":"Krobix/interactivestorywriter","sub_path":"interactivestory.py","file_name":"interactivestory.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4341757056","text":"import unittest, platform\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom selenium.webdriver.chrome.options import Options\nimport page\n\nunittest.main(warnings='ignore')\n\noptions = Options()\n\nif platform.system() == \"Linux\":\n PATH = Service('/opt/selenium/chromedriver')\nelif platform.system() == \"Windows\":\n PATH = Service('C:\\sel\\chromedriver.exe')\n\nclass PythonOrgSearch(unittest.TestCase):\n\n def setUp(self):\n self.driver = webdriver.Chrome(service=PATH, options=options)\n self.driver.get(\"http://www.python.org\")\n\n def test_title(self):\n mainPage = page.MainPage()\n assert mainPage.is_title_maching()\n\n def test_example(self):\n print(\"test\")\n assert True\n\n def not_a_test(self):\n print(\"not a test\")\n \n def tearDown(self):\n self.driver.close()\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"suregoth/Selenium","sub_path":"unittest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3876201268","text":"from model import pretrain_vgg\nfrom loss import get_loss\nfrom tools import xy_generator\nimport tensorflow as tf\n\n# def scheduler(epoch):\n# if epoch > 2930 :\n# return 0.0001\n# if epoch > 4102 :\n# return 0.0001\n# return 1e-10\n\nyolo=pretrain_vgg()\nyolo.summary()\nyolo.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=5e-6),\n #optimizer=tf.keras.optimizers.SGD(momentum=0.9, decay=0.0005),\n loss=get_loss,\n )\n \nmodel_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath='./myyolo_bbox5_224_hasobj50_fc.h5',\n save_weights_only=True,\n monitor='loss',\n mode='min',\n save_best_only=True)\n# train_feed = tf.data.Dataset.from_generator(xy_generator, \n# (tf.int16, tf.float32),)\n# train_feed = train_feed.batch(3)\n\n#callback = tf.keras.callbacks.LearningRateScheduler(scheduler)\n#yolo.load_weights('./myyolo_bbox5_224_hasobj50_fc.h5')\ntrain_feed = xy_generator()\nyolo.fit(train_feed,\n steps_per_epoch=128,\n epochs=3000, \n verbose=1,\n callbacks=[model_checkpoint_callback])","repo_name":"yg0585/DeepLearn","sub_path":"myyolov1/yolo_train.py","file_name":"yolo_train.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4724065060","text":"#!/usr/bin/env python\n\nimport datetime\nimport pytz\nimport math\nimport trueskill\n\nimport tornado.web\nimport tornado.template\n\nfrom ..model import db_manager\nimport gdt.ratings.rating_system3 as rs\n\ngoko = rs.TrueSkillSystem('Goko', mu=5500, sigma=2250, beta=1375,\n tau=27.5, draw_probability=0.05, k=2,\n daily_sigma_decay=0.01)\n\nisotropish = rs.TrueSkillSystem('Isotropish', mu=25, sigma=25, beta=25,\n tau=0.25, draw_probability=0.05, k=3)\n\nclass GokoProRatingQuery(tornado.web.RequestHandler):\n def new_goko_rating(self, r_a, d_a, r_b, score_a):\n (r_a2, r_b2) = goko.rate2p(r_a, r_b, score_a)\n d_a2 = max(0, int(r_a2.mu - 2 * r_a2.sigma))\n if score_a == 1 and d_a > d_a2:\n delta = 0\n else:\n delta = d_a2 - d_a\n return {\n 'mu': r_a2.mu,\n 'sigma': r_a2.sigma,\n 'displayed': d_a2,\n 'delta': delta\n }\n\n def get(self):\n print('Received goko pro rating query')\n\n query_type = self.get_argument('query_type')\n\n if (query_type == 'rating_list'):\n ratings = db_manager.fetch_all_pro_ratings()\n self.write({'ratings': ratings})\n\n elif (query_type == 'player_rating'):\n player_id = self.get_argument('player_id')\n r = db_manager.fetch_pro_rating(player_id)\n (m, s, d) = r\n self.write({\n 'mu': m,\n 'sigma': s,\n 'displayed': d\n })\n\n elif (query_type == 'probabilities'):\n player_id_A = self.get_argument('player_id_A')\n (m, s, d_a) = db_manager.fetch_pro_rating(player_id_A)\n r_a = trueskill.Rating(m, s)\n\n player_id_B = self.get_argument('player_id_B')\n (m, s, d_b) = db_manager.fetch_pro_rating(player_id_B)\n r_b = trueskill.Rating(m, s)\n\n pgwin = goko.win_prob(r_a, r_b)\n pgloss = goko.win_prob(r_b, r_a)\n pgdraw = 1 - pgwin - pgloss\n\n player_name_A = self.get_argument('player_name_A')\n r_a = db_manager.fetch_ts2_rating(player_name_A, 'isotropish_nobots')\n\n player_name_B = self.get_argument('player_name_B')\n r_b = db_manager.fetch_ts2_rating(player_name_B, 'isotropish_nobots')\n\n piwin = isotropish.win_prob(r_a, r_b)\n piloss = isotropish.win_prob(r_b, r_a)\n pidraw = 1 - piwin - piloss\n\n p = {\n 'isotropish': {\n 'p1win': piwin,\n 'draw': pidraw,\n 'p1loss': piloss\n },\n 'goko': {\n 'p1win': pgwin,\n 'draw': pgdraw,\n 'p1loss': pgloss \n }\n };\n self.write({'probs': p})\n\n elif (query_type == 'record'):\n pnameA = self.get_argument('player_name_A')\n pnameB = self.get_argument('player_name_B')\n self.write(db_manager.get_heads_up_record(pnameA, pnameB))\n\n elif (query_type == 'assess'):\n player_id_A = self.get_argument('player_id_A')\n (m, s, d_a) = db_manager.fetch_pro_rating(player_id_A)\n r_a = trueskill.Rating(m, s)\n\n player_id_B = self.get_argument('player_id_B')\n (m, s, d_b) = db_manager.fetch_pro_rating(player_id_B)\n r_b = trueskill.Rating(m, s)\n\n self.write({\n 'a_win': {\n 'r_a': self.new_goko_rating(r_a, d_a, r_b, 1),\n 'r_b': self.new_goko_rating(r_b, d_b, r_a, 0),\n },\n 'a_draw': {\n 'r_a': self.new_goko_rating(r_a, d_a, r_b, 0.5),\n 'r_b': self.new_goko_rating(r_b, d_b, r_a, 0.5),\n },\n 'a_loss': {\n 'r_a': self.new_goko_rating(r_a, d_a, r_b, 0),\n 'r_b': self.new_goko_rating(r_b, d_b, r_a, 1),\n }\n })\n self.flush()\n self.finish()\n","repo_name":"aiannacc/goko-dominion-tools","sub_path":"gdt/ratings/assess.py","file_name":"assess.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"12"} +{"seq_id":"35515825334","text":"from PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import QObject\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtGui import QImage\nimport cv2\nimport numpy as np\nfrom collections import deque\nimport argparse\nimport imutils\nimport threading\n\nblueLower = (100, 43, 46)\nblueUpper = (124, 255, 255)\n\nap = argparse.ArgumentParser()\nargs = vars(ap.parse_args())\n\n\nclass CameraDevice(QObject):\n\n frame_ready = pyqtSignal(QImage)\n\n def __init__(self, device_id=0):\n super().__init__()\n self.capture = cv2.VideoCapture(device_id)\n print(type(self.capture))\n self.timer = QTimer()\n\n if not self.capture.isOpened():\n raise ValueError(\"Device not found\")\n\n self.timer.timeout.connect(self.read_frame)\n self.timer.setInterval(1000 / (self.fps or 30))\n self.timer.start()\n\n def __del__(self):\n self.timer.stop()\n self.capture.release()\n\n @property\n def fps(self):\n \"\"\"Frames per second.\"\"\"\n return int(self.capture.get(cv2.CAP_PROP_FPS))\n\n @property\n def size(self):\n \"\"\"Returns the size of the video frames: (width, height).\"\"\"\n width = int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n return (width, height)\n\n def read_frame(self):\n \"\"\"Read frame into QImage and emit it.\"\"\"\n success, frame = self.capture.read()\n if success:\n img = _convert_array_to_qimage(frame)\n self.frame_ready.emit(img)\n else:\n raise ValueError(\"Failed to read frame\")\n \n def start(self):\n threading.Thread(target = self.trackingBlueObject, args = ())\n \n def stop(self):\n threading.Thread(target = self.stoptracking, args = ())\n \n def stoptracking(self):\n print('stop')\n \n def trackingBlueObject(self):\n grabbed, frame= self.capture.read()\n \n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, blueLower, blueUpper)\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n if len(cnts) > 0:\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n print(\"{}\\t{}\".format(center[0],center[1] ))\n if radius > 10:\n cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 255), 2)\n cv2.circle(frame, center, 5, (0, 0, 255), -1)\n ''' \n pts.appendleft(center)\n for i in range(1, len(pts)):\n if pts[i - 1] is None or pts[i] is None:\n continue\n \n thickness = int(np.sqrt(args[\"buffer\"] / float(i + 1)) * 2.5)\n cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)\n '''\n\n\ndef _convert_array_to_qimage(a):\n height, width, channels = a.shape\n bytes_per_line = channels * width\n cv2.cvtColor(a, cv2.COLOR_BGR2RGB, a)\n return QImage(a.data, width, height, bytes_per_line, QImage.Format_RGB888)\n","repo_name":"kmolLin/opencv_with_python","sub_path":"testcode/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24901846523","text":"# O Guanabara fez de forma bem mais simples, aproveitando a variavel \"c\", enquanto eu criei a variável \"cont\" para fazer\n# a mesma coisa. Além disso, ele eliminiu o \"cont%2\" fazendo a range (1, 501, 3), que pegou só os números ímpares\n\ncont = 1\ns = 0\n\nfor c in range(500):\n if cont % 2 == 1:\n if cont % 3 == 0:\n print(cont)\n s += cont\n cont += 1\n else:\n cont += 1\n else:\n cont += 1\n\nprint('A soma final é {}'.format(s))","repo_name":"fernandorssa/CeV_Python_Exercises","sub_path":"Desafio 48.py","file_name":"Desafio 48.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73918892182","text":"from typing import List\n\n\ndef solution_fail(board: List[List[int]], skills: List[List[int]]) -> int:\n n, m = len(board), len(board[0])\n affected = [[0] * m for _ in range(n)]\n ATTACK = 1\n DEFENSE = 2\n # 행만 누적합 사용하는 것도 250,000 * 1,000 시간 초과!\n for type, r1, c1, r2, c2, degree in skills:\n for r in range(r1, r2 + 1):\n affected[r][c1] += degree * -1 if type == ATTACK else degree\n if c2 + 1 < m:\n affected[r][c2] += degree * 1 if type == ATTACK else degree * -1\n for r in range(n):\n for c in range(m):\n pass\n\n\ndef solution(board: List[List[int]], skills: List[List[int]]) -> int:\n n, m = len(board), len(board[0])\n affected = [[0] * m for _ in range(n)]\n ATTACK = 1\n DEFENSE = 2\n # 행만 누적합 사용하는 것도 250,000 * 1,000 시간 초과!\n for type, r1, c1, r2, c2, degree in skills:\n affected[r1][c1] += (degree if type == DEFENSE else degree * -1)\n if c2 + 1 < m:\n affected[r1][c2 + 1] += (degree * -1 if type == DEFENSE else degree)\n if r2 + 1 < n:\n affected[r2 + 1][c1] += (degree * -1 if type == DEFENSE else degree)\n if r2 + 1 < n and c2 + 1 < m:\n affected[r2 + 1][c2 + 1] += (degree if type == DEFENSE else degree * -1)\n\n # test = [[0] * m for _ in range(n)]\n # for type, r1, c1, r2, c2, degree in skills:\n # for r in range(r1, r2 + 1):\n # for c in range(c1, c2 + 1):\n # if type == ATTACK:\n # test[r][c] -= degree\n # else:\n # test[r][c] += degree\n #\n # print(\"test\")\n # for row in test:\n # print(row)\n\n for r in range(n):\n for c in range(m):\n if r - 1 >= 0:\n affected[r][c] += affected[r - 1][c]\n if c - 1 >= 0:\n affected[r][c] += affected[r][c - 1]\n if r - 1 >= 0 and c - 1 >= 0:\n affected[r][c] -= affected[r - 1][c - 1]\n count = 0\n for r in range(n):\n for c in range(m):\n board[r][c] += affected[r][c]\n if board[r][c] > 0:\n count += 1\n return count\n\n\nprint(solution([[5, 5, 5, 5, 5], [5, 5, 5, 5, 5], [5, 5, 5, 5, 5], [5, 5, 5, 5, 5]],\n [[1, 0, 0, 3, 4, 4], [1, 2, 0, 2, 3, 2], [2, 1, 0, 3, 1, 2], [1, 0, 1, 3, 3, 1]]))\n","repo_name":"i960107/algorithm","sub_path":"programmers/파괴되지않은건물.py","file_name":"파괴되지않은건물.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"604009955","text":"from io import TextIOBase\nfrom pathlib import Path\nfrom typing import Any, Mapping, Optional\n\nfrom jinja2 import Environment, PackageLoader, select_autoescape\n\nfrom .config import ConfigBitstream, ConfigByte\nfrom .port import ConfigPort, Port\n\nenv = Environment(loader=PackageLoader(\"kfpga\"), autoescape=select_autoescape())\n\n\nclass Module:\n def __init__(self, name: str, library: \"Library\") -> None:\n self.name = name\n self.config = ConfigBitstream()\n self.config_port = None\n self.ports = []\n\n def add_port(self, port: Port) -> None:\n try:\n self.get_port(port.name)\n except KeyError:\n pass\n else:\n raise ValueError(\n \"Port with the same name already exists {}\".format(port.name)\n )\n\n self.ports.append(port)\n\n def get_port(self, name: str) -> ConfigByte:\n for port in self.ports:\n if port.name == name:\n return port\n\n raise KeyError(\"Port not found: {}\".format(name))\n\n def delete_port(self, name: str):\n for i, port in enumerate(self.ports):\n if port.name == name:\n del self.ports[i]\n return\n\n raise KeyError(\"Port not found: {}\".format(name))\n\n def add_data_input(self, name: str, width: Optional[int] = 1) -> None:\n port = Port(name, Port.Direction.input, width)\n self.add_port(port)\n\n def add_data_output(self, name: str, width: Optional[int] = 1) -> None:\n port = Port(name, Port.Direction.output, width)\n self.add_port(port)\n\n def template_ctx(self) -> Mapping[str, Any]:\n return {\"module\": self}\n\n def template_name(self) -> str:\n return \"verilog/kfpga/{}.v\".format(self.name)\n\n def write_verilog_stream(self, stream: TextIOBase) -> None:\n template_name = self.template_name()\n template = env.get_template(template_name)\n\n ctx = self.template_ctx()\n\n result = template.render(ctx)\n stream.write(result)\n\n def write_verilog_file(self, output_file: Path) -> None:\n if output_file.is_dir():\n output_file /= \"{}.v\".format(self.name)\n\n with output_file.open(\"w\") as f:\n self.write_verilog_stream(f)\n\n\nclass SequentialModuleMixin(Module):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.set_clock()\n self.set_nreset()\n self.set_enable()\n\n def set_clock(self) -> None:\n port = Port(\"clock\", Port.Direction.input, 1)\n self.add_port(port)\n\n def set_enable(self) -> None:\n port = Port(\"enable\", Port.Direction.input, 1)\n self.add_port(port)\n\n def set_nreset(self) -> None:\n port = Port(\"nreset\", Port.Direction.input, 1)\n self.add_port(port)\n\n\nclass ConfigurableModuleMixin(Module):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n def add_config(self, name: str, width: Optional[int] = 1, count=1) -> None:\n config = ConfigByte(name, width, count)\n self.config.add_config(config)\n\n if self.config_port is None:\n self.config_port = ConfigPort(self.config)\n self.add_port(self.config_port)\n\n\nclass ConfigChainModuleMixin(Module):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.set_config_chain()\n\n def set_config_chain(self) -> None:\n port = Port(\"config_in\", Port.Direction.input, 1)\n self.add_port(port)\n port = Port(\"config_out\", Port.Direction.output, 1)\n self.add_port(port)\n port = Port(\"config_clock\", Port.Direction.input, 1)\n self.add_port(port)\n port = Port(\"config_enable\", Port.Direction.input, 1)\n self.add_port(port)\n port = Port(\"config_nreset\", Port.Direction.input, 1)\n self.add_port(port)\n","repo_name":"jtremesay/kfpga","sub_path":"kfpga/hdl/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36475950984","text":"import pytest\nfrom .solution import Solution\n\n\n@pytest.mark.parametrize(\n \"inputs, expected\",\n [\n ([0], True),\n ([], False),\n ([2, 1, 1], True),\n ([2, 3, 1, 1, 4], True),\n ([3, 2, 1, 0, 4], False),\n ([2, 5, 0, 0], True),\n ([1, 1, 2, 2, 0, 1, 1], True),\n ([5, 9, 3, 2, 1, 0, 2, 3, 3, 1, 0, 0], True),\n ],\n)\ndef test_solution(inputs, expected):\n sut = Solution()\n\n actual = sut.can_jump(inputs)\n\n assert actual == expected\n","repo_name":"butyr/leetcode-blind-75-questions","sub_path":"jump_game/test_solution.py","file_name":"test_solution.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"3375734841","text":"import os\nimport sys\nimport logging\nimport asyncio\nimport threading\nimport queue\nimport pickle\nfrom typing import Any, Dict, Optional, Union, Mapping\nimport uuid\nfrom uuid import UUID\nimport faiss\nimport hashlib\n\nimport openai\nfrom langchain.document_loaders import (\n TextLoader,\n PyPDFLoader,\n Docx2txtLoader,\n UnstructuredPowerPointLoader,\n)\n\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter # generic\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import FAISS\nfrom langchain.prompts.prompt import PromptTemplate\n\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import (\n AIMessage,\n HumanMessage,\n SystemMessage,\n ChatGeneration,\n ChatResult,\n)\nfrom langchain.chains import (\n LLMChain,\n ConversationalRetrievalChain,\n)\nfrom langchain.chains.question_answering import load_qa_chain\nfrom langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT\nfrom langchain.memory import ConversationBufferWindowMemory\nfrom langchain.callbacks import get_openai_callback\nfrom langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\nfrom langchain.callbacks.base import (\n BaseCallbackHandler,\n AsyncCallbackHandler,\n)\nfrom langchain.callbacks.manager import (\n CallbackManagerForChainRun,\n AsyncCallbackManagerForChainRun,\n)\n\nlogger = logging.getLogger(__name__)\n\ntext_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(chunk_size=1280, chunk_overlap=200)\nembedding_model = {\n 'name': 'openai',\n 'func': None,\n}\n\nopenai_env = {\n 'api_key': None,\n 'api_base': None,\n}\n\nopenai_model = {\n 'name': 'gpt-3.5-turbo',\n 'max_tokens': 4096,\n 'max_prompt_tokens': 3096,\n 'max_response_tokens': 1000\n}\n\n_queue = queue.Queue()\n\ndef setup_openai_env(api_base=None, api_key=None):\n if not openai_env['api_base']:\n openai_env['api_base'] = api_base\n if not openai_env['api_key']:\n openai_env['api_key'] = api_key\n openai.api_base = openai_env['api_base']\n openai.api_key = openai_env['api_key']\n openai.api_version = None\n return (openai_env['api_base'], openai_env['api_key'])\n\n\ndef setup_openai_model(model):\n logger.debug(model)\n openai_model.update(model)\n logger.debug(model)\n\n\n# class OutputStreamingCallbackHandler(AsyncCallbackHandler):\nclass OutputStreamingCallbackHandler(BaseCallbackHandler):\n send_token: bool = False\n\n # make it a producer to send us reply\n def on_llm_new_token(self, token: str, **kwargs: Any) -> None:\n if self.send_token:\n _queue.put(token)\n # sys.stdout.write(token)\n # sys.stdout.flush()\n\n def on_chain_start(self, serialized, inputs, **kwargs) -> Any:\n \"\"\"run when chain start running\"\"\"\n # don't stream the output from intermedia steps\n logger.debug('****** launch chain %s', serialized)\n if serialized['name'] == 'StuffDocumentsChain':\n logger.debug('start output streamming')\n self.send_token = True\n\n def on_chain_end(self, outputs: Dict[str, Any], *, run_id: UUID, parent_run_id: Optional[UUID] = None, **kwargs: Any,) -> None:\n \"\"\"Run when chain ends running.\"\"\"\n # _queue.put(-1)\n # return await super().on_chain_end(outputs, run_id=run_id, parent_run_id=parent_run_id, **kwargs)\n\n def on_llm_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:\n \"\"\"Run when LLM errors.\"\"\"\n _queue.put(-1)\n\n def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None:\n \"\"\"Run when chain errors.\"\"\"\n _queue.put(-1)\n\n\nOSC = OutputStreamingCallbackHandler()\n\n\nclass EmbeddingModel:\n def __init__(self):\n self.name = None\n self._function = None\n\n @property\n def function(self):\n \"\"\"embedding function of the model\"\"\"\n if not self._function:\n setup_openai_env()\n self.name = 'openai'\n self._function = OpenAIEmbeddings()\n return self._function\n\n\nclass ChatModel:\n def __init__(self):\n self.name = None\n self._model = None\n\n @property\n def model(self):\n if not self._model:\n api_base, api_key = setup_openai_env()\n self.name = 'open_ai'\n max_response_tokens = openai_model['max_prompt_tokens']\n if max_response_tokens > 1024:\n max_response_tokens = 1024\n self._model = ChatOpenAI(\n api_key=api_key,\n api_base=api_base,\n model_name=openai_model['name'],\n max_tokens=max_response_tokens,\n streaming=True,\n # callbacks=[OSC],\n )\n return self._model\n\n\nembedding_model = EmbeddingModel()\nchat_model = ChatModel()\n\ndef pickle_faiss(db):\n idx = faiss.serialize_index(db.index)\n pickled = pickle.dumps((db.docstore, db.index_to_docstore_id, idx))\n return pickled\n\ndef unpick_faiss(pickled, embedding_func = None):\n if not embedding_func:\n embedding_func = embedding_model.function\n docstore, index_to_docstore_id, idx = pickle.loads(pickled)\n index = faiss.deserialize_index(idx)\n db = FAISS(embedding_func.embed_query, index, docstore, index_to_docstore_id)\n return db\n\ndef get_embedding_document(file, mime):\n \"\"\"return a pickled faiss vectorsotre\"\"\"\n\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types\n loaders = {\n 'text/plain': TextLoader,\n 'application/pdf': PyPDFLoader,\n 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': Docx2txtLoader,\n 'application/vnd.openxmlformats-officedocument.presentationml.presentation': UnstructuredPowerPointLoader,\n }\n\n loader = loaders[mime](file)\n docs = loader.load()\n\n embeddings_function = embedding_model.function\n\n for doc in docs:\n hash_str = str(hashlib.md5(str(doc).encode()).hexdigest())\n doc.metadata['hash'] = hash_str # track where chunk from\n documents = text_splitter.split_documents(docs)\n db = FAISS.from_documents(documents, embeddings_function)\n\n return pickle_faiss(db)\n\n\ncondense_question_template = \"\"\"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question (in the same language of Follow Up Input):\"\"\"\nMY_CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(condense_question_template)\n\n\ndef langchain_doc_chat(messages):\n \"\"\"use langchain to process a list of messages\"\"\"\n\n db = messages['faiss_store']\n retriever = db.as_retriever(\n search_type=\"mmr\",\n #search_type=\"similarity\",\n search_kwargs={\n 'k': 3,\n },\n )\n\n memory = ConversationBufferWindowMemory(memory_key='chat_history', return_messages=True, k=32)\n for msg in messages['messages']:\n if msg.get('role', '') == 'assistant':\n memory.chat_memory.add_ai_message(msg['content'])\n else: # user or system message\n memory.chat_memory.add_user_message(msg['content'])\n\n question_generator = LLMChain(\n llm=chat_model.model,\n prompt=MY_CONDENSE_QUESTION_PROMPT\n )\n doc_chain = load_qa_chain(\n llm=chat_model.model,\n chain_type=\"map_reduce\",\n )\n chain = ConversationalRetrievalChain(\n retriever=retriever,\n memory=memory,\n question_generator=question_generator,\n combine_docs_chain=doc_chain,\n )\n\n results = []\n msgs = messages['messages']\n q = msgs[-1]['content']\n logger.debug(q)\n OSC.send_token = False\n\n async def do_chain():\n result = await chain.acall(\n {'question': q},\n callbacks=[OSC],\n )\n _queue.put(-1)\n return result\n\n def ctx_mgr():\n result = asyncio.run(do_chain())\n results.append(result)\n\n thread = threading.Thread(target=ctx_mgr)\n thread.start()\n\n while True:\n item = _queue.get()\n # logger.debug('>>>>\\n>>>> partial item %s', item)\n if item == -1:\n logger.debug('langchan done')\n yield {\n 'content': item,\n 'status': 'done',\n }\n _queue.task_done()\n break\n yield {\n 'content': item,\n 'status': None,\n }\n _queue.task_done()\n\n thread.join()\n logger.debug('langchan exit with %s', results[0]) # should output a coroutine\n\n return\n","repo_name":"WongSaang/chatgpt-ui-server","sub_path":"chat/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":8637,"program_lang":"python","lang":"en","doc_type":"code","stars":225,"dataset":"github-code","pt":"12"} +{"seq_id":"1648125468","text":"from tkinter import (\n Tk as tk_Tk,\n Frame as tk_Frame,\n Canvas as tk_Canvas,\n Button as tk_Button,\n HORIZONTAL as TK_HORIZONTAL,\n Scale as tk_Scale,\n IntVar as tk_IntVar,\n StringVar as tk_StringVar,\n Label as tk_Label)\nfrom contextlib import redirect_stdout as clib_redirect_stdout\nfrom argparse import ArgumentParser, Namespace\nfrom pydub import AudioSegment\nfrom typing import List, Tuple\n\n\nfrom .console_ui import (\n get_current_version, init_packets_processor, exit_with_exception)\nfrom vorbis.vorbis_main import FileDataException\n\nwith clib_redirect_stdout(None):\n from pygame.mixer import (\n music as pygame_music,\n Sound as pygame_Sound,\n pre_init as pygame_mixer_pre_init,\n init as pygame_mixer_init)\n\n\n# class InfoNotebook(ttk_Notebook):\n# \"\"\"Class represents two upper info tabs: amplitude and with coverart if \\\n# presented\"\"\"\n# def __init__(self, filepath, coverart_info, **kw_args):\n# super().__init__(**kw_args)\n# self.grid(row=0, column=0, sticky='NESW')\n# self._filepath = filepath\n#\n# self._create_tabs(coverart_info)\n#\n# self.grid(sticky='NESW')\n#\n# def _create_tabs(self, coverart_info):\n# \"\"\"Method creates tabs for notebook\"\"\"\n# self._coverart_frame = tk_Frame(master=self)\n# self._coverart_frame.columnconfigure(0, weight=1)\n# self._coverart_frame.rowconfigure(0, weight=1)\n# self._coverart_canvas = tk_Canvas(master=self._coverart_frame)\n# self._coverart_canvas.grid(row=0, column=0)\n#\n# final_image: bytes = self._decode_base64_to_bytes(coverart_info[1])\n#\n# if '=image' in coverart_info[0]:\n# coverart_filepath = ntpath_basename(self._filepath)\n# coverart_filepath = coverart_filepath.split('.')[0]\n# coverart_filepath += '.' + coverart_info[0].split('/')[1]\n# coverart_filepath = os_path.join(\n# os_path.dirname(os_path.pardir),\n# 'resources',\n# coverart_filepath)\n# if not os_path.isfile(coverart_filepath):\n# coverart_file = open(coverart_filepath, 'wb')\n# coverart_file.write(final_image)\n#\n# try:\n# self._coverart_image = pil_PhotoImage(\n# pil_Image.open(BytesIO(final_image)))\n# except OSError:\n# pass\n# else:\n# self._coverart_canvas.create_image(\n# (0, 0),\n# image=self._coverart_image,\n# anchor='nw')\n# self._coverart_canvas['width'] = (\n# self._coverart_image.width())\n# self._coverart_canvas['height'] = (\n# self._coverart_image.height())\n#\n# self.add(self._coverart_frame, text='Coverart')\n#\n# self._amplitude_tab = tk_Canvas(master=self)\n#\n# print(self._amplitude_tab['width'])\n# print(self._amplitude_tab['height'])\n#\n# self.add(self._amplitude_tab, text='Amplitude')\n#\n# @staticmethod\n# def _decode_base64_to_bytes(in_data) -> bytes:\n# \"\"\"Function decodes base64 data to file\"\"\"\n# assert isinstance(in_data, bytes)\n#\n# out_data = b''\n# bits_buffer = ''\n#\n# for byte in in_data:\n# if b'A'[0] <= byte <= b'Z'[0]:\n# bits_buffer += bin(byte - 65)[2:].zfill(6)\n# elif b'a'[0] <= byte <= b'z'[0]:\n# bits_buffer += bin(byte - 71)[2:].zfill(6)\n# elif b'0'[0] <= byte <= b'9'[0]:\n# bits_buffer += bin(byte + 4)[2:].zfill(6)\n# elif bytes([byte]) == b'+':\n# bits_buffer += bin(62)[2:].zfill(6)\n# elif bytes([byte]) == b'/':\n# bits_buffer += bin(63)[2:].zfill(6)\n# else:\n# # Got unknown symbol\n# return b''\n#\n# if len(bits_buffer) > 7:\n# out_data += bytes([int(bits_buffer[:8], 2)])\n# bits_buffer = bits_buffer[8:]\n# if bits_buffer != '':\n# out_data += bytes([int(bits_buffer, 2)])\n#\n# return out_data\n\n\nclass AudioToolbarFrame(tk_Frame):\n \"\"\"Class represents audio toolbar frame\"\"\"\n def __init__(self, filepath, **kwargs):\n super().__init__(**kwargs)\n self._filepath = filepath\n self._paused = False\n self._time_offset = 0.0\n\n self.grid(row=1, column=0, sticky='NESW')\n\n self.rowconfigure(0, minsize=25)\n self.rowconfigure(2, minsize=25)\n self.columnconfigure(0, minsize=25)\n self.columnconfigure(1, minsize=35)\n self.columnconfigure(2, minsize=50)\n self.columnconfigure(3, weight=1)\n self.columnconfigure(4, minsize=50)\n\n self._play_button = tk_Button(\n master=self,\n anchor='center',\n text='Play',\n command=self._play_button_hit)\n self._play_button.grid(\n row=1, column=1, sticky='NESW')\n\n self._create_time_scale_widgets()\n\n self._volume_scale_var = tk_IntVar()\n self._volume_scale = tk_Scale(\n master=self,\n sliderlength=20,\n from_=100,\n to=0,\n variable=self._volume_scale_var,\n command=self._volume_scale_moved)\n self._volume_scale.grid(row=0, rowspan=3, column=5)\n self._volume_scale_var.set(100)\n\n def _play_button_hit(self):\n \"\"\"Method contains actions when play button hit\"\"\"\n if pygame_music.get_pos() == -1:\n pygame_music.load(self._filepath)\n\n current_track = pygame_Sound(self._filepath)\n self._time_scale['to'] = current_track.get_length()\n self._play_button['text'] = 'Stop'\n\n pygame_music.play()\n pygame_music.set_pos(float(self._time_scale_var.get()))\n elif self._paused:\n self._play_button['text'] = 'Stop'\n pygame_music.unpause()\n self._paused = False\n else:\n self._play_button['text'] = 'Play'\n pygame_music.pause()\n self._paused = True\n\n @staticmethod\n def _volume_scale_moved(new_position):\n \"\"\"Method contains actions when volume scale moved\"\"\"\n pygame_music.set_volume(float(new_position) * 0.01)\n\n def _create_time_scale_widgets(self):\n \"\"\"Method create time scale itself and related to it widgets\"\"\"\n self._time_scale_var = tk_IntVar()\n\n self._time_scale = tk_Scale(\n master=self,\n orient=TK_HORIZONTAL,\n length=150,\n sliderlength=20,\n variable=self._time_scale_var,\n showvalue=0,\n command=self._time_scale_moved)\n\n current_track = pygame_Sound(self._filepath)\n\n self._time_scale['to'] = current_track.get_length()\n\n self._time_label_var = tk_StringVar()\n\n self._time_label_var.set('0:00')\n self._time_scale_var.trace(\n 'w',\n lambda *args: self._time_label_var.set(\n ''.join([str(self._time_scale_var.get() // 60),\n ':',\n str(self._time_scale_var.get() % 60).zfill(2)])))\n\n self._time_label = tk_Label(\n master=self, textvariable=self._time_label_var)\n\n self._time_scale.grid(row=1, column=3, sticky='EW')\n self._time_label.grid(row=2, column=3)\n\n @staticmethod\n def _time_scale_moved(new_position):\n \"\"\"Method contains actions when time scale moved\"\"\"\n if pygame_music.get_pos() != -1:\n pygame_music.set_pos(float(new_position))\n\n # WouldBeBetter: Fix time calculations below. They are so terrible. :(\n # Still works, tho.\n\n def time_scale_tick(self, root_: tk_Tk):\n \"\"\"Method synchronizes time scale with music progression\"\"\"\n if pygame_music.get_pos() != -1:\n if (abs(pygame_music.get_pos() // 1000 - int(self._time_offset)\n - self._time_scale_var.get()) > 1):\n self._time_offset = float(\n pygame_music.get_pos() / 1000\n - self._time_scale_var.get())\n\n self._time_scale_var.set(\n pygame_music.get_pos() // 1000 - int(self._time_offset))\n\n elif self._play_button['text'] == 'Stop':\n self._play_button['text'] = 'Play'\n self._time_scale_var.set(0)\n\n root_.after(100, self.time_scale_tick, root_)\n\n def get_current_time_in_millis(self) -> int:\n result: int = int(pygame_music.get_pos() - self._time_offset * 1000)\n\n if result < 0:\n return 0\n else:\n return result\n\n\ndef run_graphics_launcher():\n def _parse_arguments() -> Namespace:\n parser = ArgumentParser(\n description='Processes .ogg audiofile with vorbis coding and '\n 'plays it',\n usage='launcher_console.py [options] ')\n\n parser.add_argument(\n '-v', '--version',\n help=\"print program's current version number and exit\",\n action='version',\n version=get_current_version())\n\n parser.add_argument(\n '-d', '--debug',\n help='turn on debug mode',\n action='store_true')\n\n parser.add_argument(\n 'filepath',\n help='path to .ogg audiofile',\n type=str)\n\n return parser.parse_args()\n\n arguments: Namespace = _parse_arguments()\n\n # For better errors handling\n init_packets_processor(arguments.filepath, arguments)\n\n # Init music player\n pygame_mixer_pre_init(44100, -16, 2, 2048)\n pygame_mixer_init()\n\n root = tk_Tk()\n\n root.title(\"Ogg Vorbis\")\n root.resizable(False, False)\n\n # def _get_coverart() -> Tuple[str, bytes]:\n # if (hasattr(packets_processor.logical_stream,\n # 'user_comment_list_strings')):\n # coverart_index: int = -1\n #\n # for i, comment_str in enumerate(\n # packets_processor\n # .logical_stream.user_comment_list_strings):\n # if (comment_str.startswith('COVERARTMIME=')\n # and len(packets_processor.logical_stream\n # .user_comment_list_strings) > i + 1\n # and (packets_processor.logical_stream\n # .user_comment_list_strings[i + 1]\n # .startswith('COVERART='))):\n # coverart_index = i\n #\n # break\n #\n # if coverart_index != -1:\n # return (\n # packets_processor.logical_stream\n # .user_comment_list_strings[coverart_index],\n # packets_processor.logical_stream\n # .user_comment_list_strings[coverart_index + 1]\n # .encode()[9:])\n #\n # return '', b''\n #\n # info_notebook = InfoNotebook(\n # coverart_info=_get_coverart(),\n # filepath=arguments.filepath,\n # master=root,\n # padding=(0, 0))\n\n toolbar_frame = AudioToolbarFrame(\n master=root,\n background='blue',\n filepath=arguments.filepath)\n\n toolbar_frame.time_scale_tick(root)\n\n amplitude_canvas: tk_Canvas = tk_Canvas(\n master=root,\n width=1200,\n height=600)\n\n amplitude_canvas.grid(row=0, column=0)\n\n ogg_audio: AudioSegment = AudioSegment.from_ogg(arguments.filepath)\n\n if ogg_audio.channels > 2:\n exit_with_exception(\n \"Amount of channels more than 2\",\n FileDataException(\n f\"[ogg_audio.channels] > 2: {ogg_audio.channels}\"),\n arguments.debug)\n\n def get_amplitudes_per_channel(audio: AudioSegment) -> List[List[int]]:\n result_amplitude: List[List[int]] = [[] for _ in range(audio.channels)]\n bytes_in_frame: int = audio.channels * audio.sample_width\n\n for frame_shift in range(len(audio.raw_data) // bytes_in_frame):\n for channel in range(audio.channels):\n sample_start: int = (\n frame_shift * bytes_in_frame\n + channel * audio.sample_width)\n sample_end: int = sample_start + audio.sample_width\n\n sample_bytes: bytes = audio.raw_data[sample_start:sample_end]\n\n result_amplitude[channel].append(\n int.from_bytes(sample_bytes, byteorder='big'))\n\n return result_amplitude\n\n pcm_max: int = 2**(ogg_audio.sample_width * 8)\n\n def draw_plot(amplitude: List[int], axes_zero_coord: Tuple[int, int]):\n if len(amplitude) == 0:\n return\n\n x_drawing_offset: float = 1100 / len(amplitude)\n y_drawing_offset: float = 200 / pcm_max\n previous_dot_coord: Tuple[float, float] = (\n axes_zero_coord[0],\n axes_zero_coord[1] - y_drawing_offset * amplitude[0])\n\n for i in range(1, len(amplitude)):\n amplitude_canvas.create_line(\n previous_dot_coord[0],\n previous_dot_coord[1],\n previous_dot_coord[0] + x_drawing_offset,\n axes_zero_coord[1] - y_drawing_offset * amplitude[i],\n fill='blue')\n\n previous_dot_coord = (\n previous_dot_coord[0] + x_drawing_offset,\n axes_zero_coord[1] - y_drawing_offset * amplitude[i])\n\n def draw_plots():\n amplitude_canvas.delete('all')\n\n ms_in_block: int = 5\n\n current_ms_block: int = (\n toolbar_frame.get_current_time_in_millis()\n // ms_in_block * ms_in_block)\n\n amplitudes: List[List[int]] = (\n get_amplitudes_per_channel(\n ogg_audio[current_ms_block:current_ms_block + ms_in_block]))\n\n # Thick line-separator in the center\n amplitude_canvas.create_line(0, 300, 1200, 300, fill='black', width=3)\n\n # Top axis X\n amplitude_canvas.create_line(0 + 50, 300 - 50, 1200 - 50, 300 - 50)\n # Top axis Y\n amplitude_canvas.create_line(0 + 50, 300 - 50, 0 + 50, 50)\n # Top zero\n amplitude_canvas.create_text(0 + 50 - 5, 300 - 50 + 10, text='0')\n # Top Y axis legend (PCM max value)\n amplitude_canvas.create_text(\n 0 + 50 - 25,\n 50,\n text=str(pcm_max))\n # Top X axis legend ([ms_in_block] ms in audio segment)\n amplitude_canvas.create_text(\n 1200 - 50, 300 - 50 + 10, text=f'{ms_in_block}, ms')\n\n draw_plot(amplitudes[0], (50, 300 - 50))\n\n if ogg_audio.channels == 2:\n # Bottom axis X\n amplitude_canvas.create_line(0 + 50, 600 - 50, 1200 - 50, 600 - 50)\n # Bottom axis Y\n amplitude_canvas.create_line(0 + 50, 300 + 50, 0 + 50, 600 - 50)\n # Bottom zero\n amplitude_canvas.create_text(0 + 50 - 5, 600 - 50 + 10, text='0')\n # Bottom Y axis legend (PCM max value)\n amplitude_canvas.create_text(\n 0 + 50 - 25,\n 300 + 50,\n text=str(2 ** (ogg_audio.sample_width * 8)))\n # Bottom X axis legend ([ms_in_block] ms in audio segment)\n amplitude_canvas.create_text(\n 1200 - 50, 600 - 50 + 10, text=f'{ms_in_block}, ms')\n\n draw_plot(amplitudes[1], (50, 600 - 50))\n\n root.after(ms_in_block, draw_plots)\n\n draw_plots()\n\n root.mainloop()\n","repo_name":"semyon-v-dot/ogg_vorbis","sub_path":"ui/graphics_ui.py","file_name":"graphics_ui.py","file_ext":"py","file_size_in_byte":15583,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"3047694896","text":"'''\r\n집 칠하는 규칙\r\n\r\n1. 1번 집의 색은 2번 집의 색과 같지 않아야 한다.\r\n2. N번 집의 색은 N-1번 집의 색과 같지 않아야 한다.\r\n3. i(2 ≤ i ≤ N-1)번 집의 색은 i-1번, i+1번 집의 색과 같지 않아야 한다.\r\n'''\r\n\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\ntable = [list(map(int, input().split())) for _ in range(n)]\r\ndp = [[int(1e9)] * 3 for _ in range(n)]\r\n\r\ndp[0][0], dp[0][1], dp[0][2] = table[0][0], table[0][1], table[0][2]\r\n\r\nfor i in range(1, n):\r\n for j in range(3):\r\n dp[i][(j + 1) % 3] = min(dp[i][(j + 1) % 3], dp[i - 1][j] + table[i][(j + 1) % 3])\r\n dp[i][(j + 2) % 3] = min(dp[i][(j + 2) % 3], dp[i - 1][j] + table[i][(j + 2) % 3])\r\n\r\nprint(min(dp[-1][0], dp[-1][1], dp[-1][2]))","repo_name":"jgh05168/AlgorithmStudy","sub_path":"백준/Silver/1149. RGB거리/RGB거리.py","file_name":"RGB거리.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"11696491436","text":"\"\"\"\r\nComparison of false transient and policy iteration algorithms.\r\n\r\nsolve_FT takes argument check = 0 or 1:\r\n * check = 1: solve_FT begins at values found in solve_PFI.\r\n * check = 0: begins at logarithmic quantities.\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy import interpolate\r\nimport time, math, itertools\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\nimport MF_classes\r\n\r\n\"\"\"\r\nSet common parameters\r\n\"\"\"\r\n\r\ngamma, rho, Pi, rlow = 2., [0.1,0.075], 0.1, 0.0\r\nN, bnd = (40,20), [[0,1], [0.1,0.3]]\r\nmbar, max_iter_eq, pol_maxiter = 4, 12000, 12\r\ntheta, sigsigbar = 0.5, 0.15\r\nDelta_y, tol = 10**-1, 10**-6\r\nrelax = [0.0]\r\ndt = [1*10**-3, 2*10**-3, 3*10**-3]\r\nX, time_PFI, time_FT, time_FT2 = {}, {}, {}, {}\r\nagg_PFI, agg_FT, agg_FT2 = {}, {}, {}\r\ndifference, iterations = {}, {}\r\ndifference2, iterations2 = {}, {}\r\n\r\nfor i in range(len(dt)):\r\n X[i] = MF_classes.MF_corr(rho=rho,gamma=gamma,Pi=Pi,rlow=rlow, sigsigbar=sigsigbar, \\\r\n theta=theta,N=N,X_bnd=bnd,tol=tol,Delta_y=Delta_y,max_iter_eq=max_iter_eq,dt=dt[i], \\\r\n mbar=mbar,pol_maxiter=pol_maxiter,relax=relax)\r\n agg_PFI[i], time_PFI[i] = X[i].solve_PFI()\r\n agg_FT[i], (difference[i], iterations[i]), time_FT[i] = X[i].solve_FT(0)\r\n agg_FT2[i], (difference2[i], iterations2[i]), time_FT2[i] = X[i].solve_FT(1)\r\n\r\nfig,ax = plt.subplots()\r\nfor i in range(len(dt)):\r\n ax.plot(iterations[i], np.log10(difference[i]), label=\"$\\Delta_t$ = {0}\".format(np.round(10**3*dt[i],2)) + \" x $10^{-3}$\", linewidth=2)\r\nplt.title('Beginning at logarithmic quantities')\r\nax.set_ylabel('log$_{10}$(E)')\r\nax.set_xlabel('Number of iterations')\r\nax.legend(loc = 'upper right')\r\ndestin = '../../figures/diff_MF.eps'\r\nplt.savefig(destin, format='eps', dpi=1000)\r\nplt.show()\r\n\r\nfig,ax = plt.subplots()\r\nfor i in range(len(dt)):\r\n ax.plot(iterations2[i], np.log10(difference2[i]), label=\"$\\Delta_t$ = {0}\".format(np.round(10**3*dt[i],2)) + \" x $10^{-3}$\", linewidth=2)\r\nplt.title('Beginning at quantities found with policy iteration')\r\nax.set_ylabel('log$_{10}$(E)')\r\nax.set_xlabel('Number of iterations')\r\nax.legend(loc = 'upper right')\r\ndestin = '../../figures/diff_MF2.eps'\r\nplt.savefig(destin, format='eps', dpi=1000)\r\nplt.show()\r\n","repo_name":"tphelanECON/EslamiPhelan_MCA","sub_path":"code/macro_finance/FT_PI.py","file_name":"FT_PI.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"43666194386","text":"import asyncio\nimport json\nimport time\nfrom os import system\nfrom random import randint\nfrom discord.ext import commands\nimport re\nimport httpx\nfrom colorama import Fore, init\nimport platform\n\n\n\ninit()\ndata = {}\n\nwith open('token.json') as sex:\n data = json.load(sex)\ntoken = data['token']\ninvitesniper = data['invitesnipe']\n\nos = platform.system()\n\nif os == \"Windows\":\n system(\"cls\")\nelse:\n system(\"clear\")\n print(chr(27) + \"[2J\")\n\nprint(Fore.RED + \"\"\"\\\n\n █████╗ ███╗ ███╗██╗███████╗\n██╔══██╗████╗ ████║██║╚══███╔╝\n███████║██╔████╔██║██║ ███╔╝ \n██╔══██║██║╚██╔╝██║██║ ███╔╝ \n██║ ██║██║ ╚═╝ ██║██║███████╗\n╚═╝ ╚═╝╚═╝ ╚═╝╚═╝╚══════╝\n \n\n \"\"\".replace('█', '█' + Fore.CYAN))\nprint(\"========================================\")\nEagle = commands.Bot(command_prefix=\".\", self_Eagle=True)\nready = False\n\ncodeRegex = re.compile(\"(discord.com/gifts/|discordapp.com/gifts/|discord.gift/)([a-zA-Z0-9]+)\")\n\n\ntry:\n @Eagle.event\n async def on_message(ctx):\n global ready\n if not ready:\n print(Fore.CYAN + 'AMIZ Sniper | ON\\n' + Fore.LIGHTBLUE_EX + ' [DEV] - stoned.eagle#0001' + 'servers' + str(\n len(Eagle.guilds)) + ' Servers 🔫\\n' + Fore.RESET)\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n print(f\"{Fore.LIGHTGREEN_EX}[START] - Eagle is ready\")\n if invitesniper == 'True':\n print(Fore.LIGHTBLUE_EX + \"[ENABLED] - Invite Sniper: ON\")\n else:\n print(\"[DISABLED] - Invite Sniper: OFF\")\n ready = True\n if codeRegex.search(ctx.content):\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n code = codeRegex.search(ctx.content).group(2)\n\n start_time = time.time()\n if len(code) < 16:\n try:\n print(\n Fore.RED + \"[INVALID] - Invalid Code: \" + code + \" From \" + ctx.author.name + \"#\" + ctx.author.discriminator + \". |\" + ctx.jump_url )\n except:\n print(\n Fore.RED + \"[INVALID] - Invalid Code | \" + code + \" From \" + ctx.author.name + \"#\" + ctx.author.discriminator + Fore.RESET)\n\n else:\n async with httpx.AsyncClient() as client:\n result = await client.post(\n 'https://discordapp.com/api/v6/entitlements/gift-codes/' + code + '/redeem',\n json={'channel_id': str(ctx.channel.id)},\n headers={'authorization': token, 'user-agent': 'Mozilla/5.0'})\n delay = (time.time() - start_time)\n try:\n print(\n Fore.LIGHTGREEN_EX + \"[-] Sniped code: \" + Fore.LIGHTRED_EX + code + Fore.RESET + \" From \" + ctx.author.name + \"#\" + ctx.author.discriminator + Fore.LIGHTMAGENTA_EX + \" [\" + ctx.guild.name + \" > \" + ctx.channel.name + \"]\" + Fore.RESET)\n except:\n print(\n Fore.LIGHTGREEN_EX + \"[-] Sniped code: \" + Fore.LIGHTRED_EX + code + Fore.RESET + \" From \" + ctx.author.name + \"#\" + ctx.author.discriminator + Fore.RESET)\n\n if 'This gift has been redeemed already' in str(result.content):\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n print(Fore.LIGHTYELLOW_EX + \"[REDEEMED] - Code has been already redeemed\" + Fore.RESET,\n end='')\n elif 'nitro' in str(result.content):\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n print(Fore.GREEN + f\"[SNIPED] - GG, Nitro Applied to{Eagle.user}\" + Fore.RESET, end='')\n elif 'Unknown Gift Code' in str(result.content):\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n print(Fore.LIGHTRED_EX + \"[-] Invalid Code\" + Fore.RESET, end=' ')\n print(\" Delay:\" + Fore.GREEN + \" %.3fs\" % delay + Fore.RESET)\n elif (('**giveaway**' in str(ctx.content).lower() or ('react with' in str(\n ctx.content).lower() and 'giveaway' in str(ctx.content).lower()))):\n try:\n await asyncio.sleep(randint(100, 200))\n await ctx.add_reaction(\"🎉\")\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n print(\n Fore.LIGHTYELLOW_EX + \"[-] Enter Giveaway \" + Fore.LIGHTMAGENTA_EX + \" [\" + ctx.guild.name + \" > \" + ctx.channel.name + \"]\" + Fore.RESET)\n except:\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n print(\n Fore.RED + \"[ERROR] - Something went wrong with Giveaway Sniper \" + Fore.LIGHTMAGENTA_EX + \" [\" + ctx.guild.name + \" > \" + ctx.channel.name + \"]\" + Fore.RESET)\n elif '<@' + str(Eagle.user.id) + '>' in ctx.content and (\n 'giveaway' in str(ctx.content).lower() or 'won' in ctx.content or 'winner' in str(\n ctx.content).lower()):\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n try:\n won = re.search(\"You won the \\*\\*(.*)\\*\\*\", ctx.content).group(1)\n except:\n won = \"UNKNOWN\"\n print(\n Fore.GREEN + \"[GIVEAWAY] - Giveaway Won: \" + Fore.LIGHTCYAN_EX + won + Fore.LIGHTMAGENTA_EX + \" [\" + ctx.guild.name + \" > \" + ctx.channel.name + \"]\" + Fore.RESET)\n\n elif 'discord.gg' in str(ctx.content).lower():\n print(Fore.LIGHTBLUE_EX + time.strftime(\"%H:%M:%S \", time.localtime()) + Fore.RESET, end='')\n try:\n if invitesniper == 'True':\n print(Fore.BLUE + \"[INVITE] - Invite Found\")\n else:\n return\n except:\n print(\"[INFO] - Invite not resolved\")\n if invitesniper == 'True':\n print(\n Fore.GREEN + \"[INVITE] - Invite Found: \" + Fore.LIGHTCYAN_EX + ctx.content + Fore.LIGHTMAGENTA_EX + \" [\" + ctx.guild.name + \" > \" + ctx.channel.name + \"]\" + Fore.RESET)\n else:\n return\n\n Eagle.run(token, bot=False)\nexcept:\n print(Fore.LIGHTRED_EX + \"[ERROR] - Invalid Token Detected\")\n print(Fore.LIGHTBLUE_EX + \"[TIP] - Put your Token in token.json\")\n\n time.sleep(500)\n print(\"[BRUH] - bro fix ur token lmfao\")\n","repo_name":"3xnull/Discord-Sniper","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"38056694424","text":"from flask import json\nimport include.conexion as cnx \nfrom include.EmpleadoVO import EmpleadoVO\nfrom include.LogIn_VO import LogInVO\n\nclass EmpleadoDAO:\n def __init__(self):\n self.__tabla = \"Empleado\"\n\n def findMiembrosMismoDepartamento(self, departamento):\n try:\n conn=cnx.mysql.connect()\n cursor=conn.cursor()\n query_select=('SELECT Nombre, Telefono, Empresa, ID_LoginEmpleado, departamento FROM Empleado WHERE departamento = %s') \n values=(departamento) \n cursor.execute(query_select, values)\n data=cursor.fetchall()\n listaVO=[]\n for fila in data:\n vo = EmpleadoVO(1, fila[0], '', fila[1], fila[2], fila[3], fila[4])\n listaVO.append(vo)\n return listaVO\n except Exception as e:\n return json.dumps({'error':str(e)})\n finally: \n cursor.close()\n conn.close()\n\n def updateUser(self, nombre, telefono, id):\n try:\n conn=cnx.mysql.connect()\n cursor=conn.cursor()\n query_select=('Update Empleado SET Nombre = %s, Telefono = %s WHERE ID_LoginEmpleado = %s') \n values=(nombre, telefono, id) \n cursor.execute(query_select, values)\n conn.commit()\n print(cursor.rowcount, \"record(s) affected\")\n except Exception as e:\n return json.dumps({'error':str(e)})\n finally: \n cursor.close()\n conn.close()\n\n def finUser(self, idEmpleado):\n try:\n conn=cnx.mysql.connect()\n cursor=conn.cursor()\n query_select=('SELECT Nombre, Telefono, Empresa, ID_LoginEmpleado, departamento FROM Empleado WHERE ID_LoginEmpleado = %s') \n values=(idEmpleado) \n cursor.execute(query_select, values)\n data=cursor.fetchall()\n listaVO=[]\n for fila in data:\n vo = EmpleadoVO(1, fila[0], '', fila[1], fila[2], fila[3], fila[4])\n listaVO.append(vo)\n return listaVO\n except Exception as e:\n return json.dumps({'error':str(e)})\n finally: \n cursor.close()\n conn.close()\n\n\n\n def findEmail(self, email):\n try:\n print(email)\n conn=cnx.mysql.connect()\n cursor=conn.cursor()\n query_select=('SELECT Correo FROM Login_Empleado WHERE Correo = %s') \n values=(email) \n cursor.execute(query_select, values)\n data=cursor.fetchall()\n listaVO=[]\n for fila in data:\n vo = LogInVO(fila[0], fila[1])\n listaVO.append(vo)\n return listaVO\n except Exception as e:\n return json.dumps({'error':str(e)})\n finally: \n cursor.close()\n conn.close()\n\n \n def insert(self, vo):\n try:\n conn=cnx.mysql.connect()\n cursor=conn.cursor()\n consulta=(\"INSERT INTO Empleado (Nombre, Telefono, Empresa, ID_LoginEmpleado, departamento)\" \"VALUES(%s,%s,%s,%s,%s)\") \n valores=(\n vo.getNombre(),\n vo.getTelefono(),\n vo.getEmpresa(),\n vo.getIdLogin(),\n vo.getDepartamento()\n )\n cursor.execute(consulta, valores)\n conn.commit()\n return{\n 'message': \"insert succesful\"\n }\n except Exception as e:\n return json.dumps({'error':str(e)}) \n finally: \n cursor.close()\n conn.close() \n\n","repo_name":"MarcoNavarroULSA2/akilli","sub_path":"include/EmpleadoDAO.py","file_name":"EmpleadoDAO.py","file_ext":"py","file_size_in_byte":3643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3533616318","text":"import datetime\r\nimport time\r\nimport os\r\n\r\n#insert the date under you want to count to.\r\nyearGoal = 2019 #Year\r\nmonthGoal = 6 #Month\r\ndayGoal = 4 #Day\r\nhourGoal = 0 #Hour\r\nminuteGoal = 0 #Minute\r\nsecondGoal = 0 #Second\r\n\r\nstartup = True\r\n\r\nfirst = datetime.datetime\r\nfnow = first.now()\r\n\r\ndef update():\r\n isDay = False\r\n today = datetime.date.today()\r\n dt = datetime.datetime\r\n now = dt.now()\r\n\r\n daysAndTime = str(dt(yearGoal, monthGoal, dayGoal, hourGoal, minuteGoal, secondGoal) - dt(year=now.year, month=now.month, day=now.day, hour=now.hour, minute=now.minute, second=now.second))\r\n\r\n if \"day\" in daysAndTime:\r\n days = daysAndTime.split()[0]\r\n hours = daysAndTime.split()[2].split(\":\")[0]\r\n minutes = daysAndTime.split()[2].split(\":\")[1]\r\n seconds = daysAndTime.split()[2].split(\":\")[2]\r\n else: \r\n days = 0\r\n hours = daysAndTime.split(\":\")[0]\r\n minutes = daysAndTime.split(\":\")[1]\r\n seconds = daysAndTime.split(\":\")[2]\r\n \r\n totalseconds = int(seconds) + int(minutes) * 60 + int(hours) * 60 * 60 + int(days) * 24 * 60 * 60\r\n \r\n if startup:\r\n return totalseconds\r\n\r\n totalminutes = totalseconds / 60\r\n\r\n totalhours = totalminutes / 60\r\n\r\n procentage = 100 * (1-(totalseconds/startValue))\r\n\r\n\r\n return \"Date: \" + str(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())) + \"\\nCounting to: \" + str(yearGoal) + \"-\" + str(monthGoal) + \"-\" + str(dayGoal) + \" \" + str(hourGoal).zfill(2) + \":\" + str(minuteGoal).zfill(2) + \":\" + str(secondGoal).zfill(2) + \"\\n\\nTime Left: \" + str(days) + \":\" + str(hours) + \":\" + str(minutes) + \":\" + str(seconds) + \"\\nWeeks Left: \" + str((((int(totalseconds)/60)/24)/7)/60) + \"\\nTotal Hours: \" + str(int(totalhours)) + \"\\nTotal Minutes: \" + str(int(totalminutes)) + \"\\nTotal Seconds: \" + str(int(totalseconds)) + \"\\n\\nPercent\" + \"(\" + str(fnow.year) + \"-\" + str(fnow.month) + \"-\" + str(fnow.day) + \"): \\n\" + str(procentage) + \"%\"\r\n\r\n\r\n \r\nwhile True:\r\n if startup:\r\n startValue = update()\r\n startup = False\r\n os.system(\"clear\")\r\n print(update())\r\n time.sleep(1)\r\n \r\n\r\n","repo_name":"RealNexa/countdown","sub_path":"displayTime.py","file_name":"displayTime.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35103709266","text":"import json\nimport re\nimport threading\n\nimport websocket\n\nfrom platypush.backend import Backend\nfrom platypush.message.event.music import (\n MusicPlayEvent,\n MusicPauseEvent,\n MusicStopEvent,\n NewPlayingTrackEvent,\n PlaylistChangeEvent,\n VolumeChangeEvent,\n PlaybackConsumeModeChangeEvent,\n PlaybackSingleModeChangeEvent,\n PlaybackRepeatModeChangeEvent,\n PlaybackRandomModeChangeEvent,\n MuteChangeEvent,\n SeekChangeEvent,\n)\n\n\n# noinspection PyUnusedLocal\nclass MusicMopidyBackend(Backend):\n \"\"\"\n This backend listens for events on a Mopidy music server streaming port.\n Since this backend leverages the Mopidy websocket interface it is only\n compatible with Mopidy and not with other MPD servers. Please use the\n :class:`platypush.backend.music.mpd.MusicMpdBackend` for a similar polling\n solution if you're not running Mopidy or your instance has the websocket\n interface or web port disabled.\n\n Requires:\n\n * A Mopidy instance running with the HTTP service enabled.\n\n \"\"\"\n\n def __init__(self, host='localhost', port=6680, **kwargs):\n super().__init__(**kwargs)\n\n self.host = host\n self.port = int(port)\n self.url = 'ws://{}:{}/mopidy/ws'.format(host, port)\n self._msg_id = 0\n self._ws = None\n self._latest_status = {}\n self._reconnect_thread = None\n self._connected_event = threading.Event()\n\n try:\n self._latest_status = self._get_tracklist_status()\n except Exception as e:\n self.logger.warning('Unable to get mopidy status: {}'.format(str(e)))\n\n @staticmethod\n def _parse_track(track, pos=None):\n if not track:\n return {}\n\n conv_track = track.get('track', {}).copy()\n conv_track['id'] = track.get('tlid')\n conv_track['file'] = conv_track['uri']\n del conv_track['uri']\n\n if 'artists' in conv_track:\n conv_track['artist'] = conv_track['artists'][0].get('name')\n del conv_track['artists']\n\n if 'name' in conv_track:\n conv_track['title'] = conv_track['name']\n del conv_track['name']\n\n if 'album' in conv_track:\n conv_track['album'] = conv_track['album']['name']\n\n if 'length' in conv_track:\n conv_track['time'] = (\n conv_track['length'] / 1000\n if conv_track['length']\n else conv_track['length']\n )\n del conv_track['length']\n\n if pos is not None:\n conv_track['pos'] = pos\n\n if '__model__' in conv_track:\n del conv_track['__model__']\n\n return conv_track\n\n def _communicate(self, msg):\n if isinstance(msg, str):\n msg = json.loads(msg)\n\n self._msg_id += 1\n msg['jsonrpc'] = '2.0'\n msg['id'] = self._msg_id\n msg = json.dumps(msg)\n\n ws = websocket.create_connection(self.url)\n ws.send(msg)\n response = json.loads(ws.recv()).get('result')\n ws.close()\n return response\n\n def _get_tracklist_status(self):\n return {\n 'repeat': self._communicate({'method': 'core.tracklist.get_repeat'}),\n 'random': self._communicate({'method': 'core.tracklist.get_random'}),\n 'single': self._communicate({'method': 'core.tracklist.get_single'}),\n 'consume': self._communicate({'method': 'core.tracklist.get_consume'}),\n }\n\n def _on_msg(self):\n def hndl(*args):\n msg = args[1] if len(args) > 1 else args[0]\n msg = json.loads(msg)\n event = msg.get('event')\n if not event:\n return\n\n status = {}\n track = msg.get('tl_track', {})\n\n if event == 'track_playback_paused':\n status['state'] = 'pause'\n track = self._parse_track(track)\n if not track:\n return\n self.bus.post(\n MusicPauseEvent(status=status, track=track, plugin_name='music.mpd')\n )\n elif event == 'track_playback_resumed':\n status['state'] = 'play'\n track = self._parse_track(track)\n if not track:\n return\n self.bus.post(\n MusicPlayEvent(status=status, track=track, plugin_name='music.mpd')\n )\n elif event == 'track_playback_ended' or (\n event == 'playback_state_changed' and msg.get('new_state') == 'stopped'\n ):\n status['state'] = 'stop'\n track = self._parse_track(track)\n self.bus.post(\n MusicStopEvent(status=status, track=track, plugin_name='music.mpd')\n )\n elif event == 'track_playback_started':\n track = self._parse_track(track)\n if not track:\n return\n\n status['state'] = 'play'\n status['position'] = 0.0\n status['time'] = track.get('time')\n self.bus.post(\n NewPlayingTrackEvent(\n status=status, track=track, plugin_name='music.mpd'\n )\n )\n elif event == 'stream_title_changed':\n m = re.match(r'^\\s*(.+?)\\s+-\\s+(.*)\\s*$', msg.get('title', ''))\n if not m:\n return\n\n track['artist'] = m.group(1)\n track['title'] = m.group(2)\n status['state'] = 'play'\n status['position'] = 0.0\n self.bus.post(\n NewPlayingTrackEvent(\n status=status, track=track, plugin_name='music.mpd'\n )\n )\n elif event == 'volume_changed':\n status['volume'] = msg.get('volume')\n self.bus.post(\n VolumeChangeEvent(\n volume=status['volume'],\n status=status,\n track=track,\n plugin_name='music.mpd',\n )\n )\n elif event == 'mute_changed':\n status['mute'] = msg.get('mute')\n self.bus.post(\n MuteChangeEvent(\n mute=status['mute'],\n status=status,\n track=track,\n plugin_name='music.mpd',\n )\n )\n elif event == 'seeked':\n status['position'] = msg.get('time_position') / 1000\n self.bus.post(\n SeekChangeEvent(\n position=status['position'],\n status=status,\n track=track,\n plugin_name='music.mpd',\n )\n )\n elif event == 'tracklist_changed':\n tracklist = [\n self._parse_track(t, pos=i)\n for i, t in enumerate(\n self._communicate({'method': 'core.tracklist.get_tl_tracks'})\n )\n ]\n\n self.bus.post(\n PlaylistChangeEvent(changes=tracklist, plugin_name='music.mpd')\n )\n elif event == 'options_changed':\n new_status = self._get_tracklist_status()\n if new_status['random'] != self._latest_status.get('random'):\n self.bus.post(\n PlaybackRandomModeChangeEvent(\n state=new_status['random'], plugin_name='music.mpd'\n )\n )\n if new_status['repeat'] != self._latest_status['repeat']:\n self.bus.post(\n PlaybackRepeatModeChangeEvent(\n state=new_status['repeat'], plugin_name='music.mpd'\n )\n )\n if new_status['single'] != self._latest_status['single']:\n self.bus.post(\n PlaybackSingleModeChangeEvent(\n state=new_status['single'], plugin_name='music.mpd'\n )\n )\n if new_status['consume'] != self._latest_status['consume']:\n self.bus.post(\n PlaybackConsumeModeChangeEvent(\n state=new_status['consume'], plugin_name='music.mpd'\n )\n )\n\n self._latest_status = new_status\n\n return hndl\n\n def _retry_connect(self):\n def reconnect():\n while not self.should_stop() and not self._connected_event.is_set():\n try:\n self._connect()\n except Exception as e:\n self.logger.warning('Error on websocket reconnection: %s', e)\n\n self._connected_event.wait(timeout=10)\n\n self._reconnect_thread = None\n\n if not self._reconnect_thread or not self._reconnect_thread.is_alive():\n self._reconnect_thread = threading.Thread(target=reconnect)\n self._reconnect_thread.start()\n\n def _on_error(self):\n def hndl(*args):\n error = args[1] if len(args) > 1 else args[0]\n ws = args[0] if len(args) > 1 else None\n self.logger.warning('Mopidy websocket error: {}'.format(error))\n if ws:\n ws.close()\n\n return hndl\n\n def _on_close(self):\n def hndl(*_):\n self._connected_event.clear()\n self._ws = None\n self.logger.warning('Mopidy websocket connection closed')\n\n if not self.should_stop():\n self._retry_connect()\n\n return hndl\n\n def _on_open(self):\n def hndl(*_):\n self._connected_event.set()\n self.logger.info('Mopidy websocket connected')\n\n return hndl\n\n def _connect(self):\n if not self._ws:\n self._ws = websocket.WebSocketApp(\n self.url,\n on_open=self._on_open(),\n on_message=self._on_msg(),\n on_error=self._on_error(),\n on_close=self._on_close(),\n )\n\n self._ws.run_forever()\n\n def run(self):\n super().run()\n self.logger.info(\n 'Started tracking Mopidy events backend on {}:{}'.format(\n self.host, self.port\n )\n )\n self._connect()\n\n def on_stop(self):\n self.logger.info('Received STOP event on the Mopidy backend')\n if self._ws:\n self._ws.close()\n\n self.logger.info('Mopidy backend terminated')\n\n\n# vim:sw=4:ts=4:et:\n","repo_name":"BlackLight/platypush","sub_path":"platypush/backend/music/mopidy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10918,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"12"} +{"seq_id":"21239279799","text":"def process_file(input_name, output_name):\n \"\"\"L\"\"\"\n \n in_file = open(input_name)\n lines = in_file.readlines()\n \n out_file = open(output_name, 'w')\n \n for line in lines:\n line = line.strip(\" \")\n for word in line:\n word = word.lower()\n out_file.write(word)\n \n in_file.close()\n out_file.close()\n \n# setup a file to convert\ntext = \"\"\"Hey??\n This is A funny looking text \n That nEeds converting \n tO lower cAse\"\"\"\nf = open('test.txt', 'w')\nf.write(text)\nf.close()\n# test your function\nprocess_file('test.txt', 'output.txt') \nprint(open('output.txt').read())\n ","repo_name":"Loquaxious/COSC121","sub_path":"Exam Revison/self66.py","file_name":"self66.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40792349110","text":"from django.shortcuts import render\nfrom eignir.models import Eign\nfrom pantanir.models import Pantanir\nimport datetime\n\n\n\ndef stadfesta(request, id):\n # þetta er til þess að við getum farið með kortaupplýsingarnar yfir á næstu síðu til staðfestingar\n context = {'card': request.POST['cardnumber'],\n 'name': request.POST['cardname'],\n 'expmonth': request.POST['expmonth'],\n 'expyear': request.POST['expyear'],\n 'cvc': request.POST['cvv'],\n 'eign': Eign.objects.get(id=id),\n }\n return render(request, 'pantanir/pontun_stadfesta.html', context)\n\n\n\ndef index(request, id):\n context = {'eign': Eign.objects.get(id=id)}\n return render(request, 'pantanir/index.html', context)\n\n\ndef kvittun(request,id):\n eign = Eign.objects.get(id=id)\n #dagsetningin sem eignin er keypt\n date=datetime.date.today().strftime(\"%B %d, %Y\")\n\n #þegar eign er keypt setjum við allar upplýsingar sem við þurfum um hana í pantanir töfluna\n instance = Pantanir.objects.create(heimilisfang=eign.heimilisfang, baejarfelag=eign.baejarfelag,\n postnumer=eign.postnumer, verd=eign.verd,\n brunabotamat=eign.brunabotamat, fasteignamat=eign.fasteignamat,\n tegund=eign.tegund, staerd=eign.staerd,\n byggingarar=eign.byggingarar, sett_a_solu=eign.sett_a_solu,\n fjoldi_svefnherberga=eign.fjoldi_svefnherberga,\n fjoldi_badherberga=eign.fjoldi_badherberga,\n fjoldi_herbergja=eign.fjoldi_herbergja, nafn_seljanda=eign.nafn_seljanda,\n simi_seljanda=eign.simi_seljanda, netfang_seljanda=eign.netfang_seljanda,\n starfsmenn=eign.starfsmenn, nafn_kaupanda=request.user.profile.fullt_nafn,\n simi_kaupanda=request.user.profile.simi, netfang_kaupanda=request.user.profile.netfang,\n kennitala_kaupanda=request.user.profile.kennitala,\n heimilisfang_kaupanda=request.user.profile.heimilisfang,\n borg_kaupanda=request.user.profile.borg, land_kaupanda=request.user.profile.land,\n postnr_kaupanda=request.user.profile.postnr, notandanafn=request.user)\n\n #eyðum eigninni út út eign töflunni þegar eignin er keypt\n context = {'eign': Eign.objects.filter(id=id).delete(),\n 'pantanir': Pantanir.objects.all().last(),\n 'date': date}\n return render(request, 'pantanir/kvittun.html', context)\n\n\n\n\n","repo_name":"andrilor/CastleApartments-36","sub_path":"castleapartments/pantanir/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"is","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"28620502692","text":"\"\"\"This problem was asked by Uber.\nGiven an array of integers, return a new array such that each element at index i of the new array is the product of all the numbers in the original array except the one at i.\nFor example, if our input was [1, 2, 3, 4, 5], the expected output would be [120, 60, 40, 30, 24]. If our input was [3, 2, 1], the expected output would be [2, 3, 6].\nFollow-up: what if you can't use division?\"\"\"\nfrom __future__ import print_function\n\n\ndef get_prefix_product(input_arr):\n product = 1\n result = []\n for num in input_arr:\n product *= num\n result.append(product)\n return result\n\n\ndef f(input_arr):\n length = len(input_arr)\n modified_arr = [1]+input_arr+[1]\n left_prod = get_prefix_product(modified_arr)\n modified_arr.reverse()\n right_prod = get_prefix_product(modified_arr)\n right_prod.reverse()\n result = []\n for i in xrange(length):\n result.append(left_prod[i]*right_prod[i+2])\n return zip(input_arr, result)\n\n\ninputs = [[1, 2, 3, 4, 5],\n [1234, 567, -90],\n [1, 2345]]\nmap(lambda x: print(f(x)), inputs)\n","repo_name":"shubamg/solutions_in_python","sub_path":"DCP2.py","file_name":"DCP2.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32944475773","text":"# Importing required modules and library\nimport numpy as np\nfrom dataPreparation import * \nfrom PIL import Image\nimport time\nfrom modelLoader import *\nimport requests\n\nurl = 'http://128.199.176.47:8080/cam_result'\nmodel = modelLoader()\nresult=[]\ndef Capture(frame,n):\n# Creating the capture object from opencv\n if not os.path.exists('capture_images'):\n print(\"New directory created\")\n os.makedirs('capture_images')\n time.sleep(.3)\n image = frame\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300,300)), 1.0, (300, 300), (104.0, 177.0, 123.0))\n model.setInput(blob)\n detections = model.forward()\n # Image from the frame\n ii=0\n for i in range(1):#0, detections.shape[2]):\n box = detections[0,0, i, 3:7] * np.asarray([w,h, w, h])\n (startX, startY, endX, endY) = box.astype('int')\n confidence = detections[0, 0, i, 2]\n if(confidence >= 0.5 and startX>=100 and endX<=600): #and startX>=180 and endX<=420\n br=0\n try:\n if (startY>=50):\n startY=startY-50\n if (endY+50<=300):\n endY=endY+50\n frame = image[startY:endY, startX-50:endX+50]\n frame = cv2.resize(frame,(224,224))\n ii=1\n # image_name=f\"capture_images/{n}.png\"\n cv2.imwrite(f\"capture_images/{n}.png\",frame)\n # files = {'media': open(image_name, 'rb')}\n # requests.post(url, files=files)\n except:\n print(\"status=-1 : from capture image file .\")\n # url_=url+\"?status=-1\"\n # x = requests.post(url_, json = {})\n # print(\"cant capture image\",x)\n br=1\n if br==1:\n break\n\n return ii\n","repo_name":"sazzad1779/Ad-system","sub_path":"capture.py","file_name":"capture.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"31194211134","text":"from . import get_initial_image, filter_events, filter_trials, classify_licks, \\\n get_invalid_lick_disabled_trials\n\n\ndef test_catch_trials_have_no_changes(raw):\n \"\"\"Checks that trials don't have stimulus changes, and if they do verifies \n that they change to the same image identity\n \"\"\"\n bad_trial_indices = []\n for trial in raw[\"items\"][\"behavior\"][\"trial_log\"]:\n if trial[\"trial_params\"][\"catch\"] is True and \\\n len(trial[\"stimulus_changes\"]) > 1 and \\\n trial[\"stimulus_changes\"][0][0][0] != trial[\"stimulus_changes\"][0][1][0]: # catch trials with stimulus changes to different stim is bad\n bad_trial_indices.append(trial[\"index\"])\n\n assert len(bad_trial_indices) < 1, \\\n f\"Catch trials have stimulus changes. Indices: {bad_trial_indices}\"\n\n\ndef test_image_sequence(raw):\n \"\"\"Tests that image name is contiguous across trials. If there was a \n stimulus change in the previous trial, the initial image of the next change\n should be the final image of the previous change.\n \"\"\"\n prev = get_initial_image(raw)\n for trial in raw[\"items\"][\"behavior\"][\"trial_log\"]:\n if len(trial[\"stimulus_changes\"]) > 0:\n initial_image = trial[\"stimulus_changes\"][0][0][0]\n change_image = trial[\"stimulus_changes\"][0][1][0]\n assert prev == initial_image, \\\n \"Initial image for a change should be the change image from the last trial with a stimulus change.\"\n prev = change_image\n\n\ndef test_event_log(raw):\n \"\"\"Tests that trials dont have incorrect events based on whether theyre go\n or catch\n\n Notes\n -----\n - go trials should not have: sham_change, rejection, false_alarm\n - catch trials should not have: change, hit, miss\n \"\"\"\n bad_trial_indices = []\n # check go trials\n for trial in filter_trials(raw, False):\n bad_events = [\n filter_events(trial, \"sham_change\"),\n filter_events(trial, \"false_alarm\"),\n filter_events(trial, \"rejection\"),\n ]\n if any(bad_events):\n bad_trial_indices.append(trial[\"index\"])\n\n # check catch trials\n for trial in filter_trials(raw, True):\n bad_events = [\n filter_events(trial, \"change\"),\n filter_events(trial, \"hit\"),\n filter_events(trial, \"miss\"),\n ]\n if any(bad_events):\n bad_trial_indices.append(trial[\"index\"])\n\n assert len(bad_trial_indices) < 1, \\\n f\"Trials failing validation. Indices: {bad_trial_indices}\"\n\n\ndef test_abort_licks(raw):\n \"\"\"Tests that trials in which the mouse licked before the change or \n sham-change are listed as aborts in the trial log\n \"\"\"\n bad_trial_indices = []\n for trial in raw[\"items\"][\"behavior\"][\"trial_log\"]:\n early, within = classify_licks(trial)\n abort_events = filter_events(trial, \"abort\")\n if len(early) > 0 and len(abort_events) < 1:\n bad_trial_indices.append(trial[\"index\"])\n\n assert len(bad_trial_indices) < 1, \\\n f\"Trials failing validation. Indices: {bad_trial_indices}\"\n\n\ndef test_non_abort_event_log(raw):\n \"\"\"Tests that:\n 1) non-abort trials for which catch is True have the following response \n types:\n a) no lick in the response window: rejection\n b) lick in the response window: false alarm \n 2) non-abort trials for which catch is False:\n a) no lick in the response window: miss\n b) lick in the response window: hit\n \"\"\"\n bad_trial_indices = []\n for trial in raw[\"items\"][\"behavior\"][\"trial_log\"]:\n abort_events = filter_events(trial, \"abort\")\n if len(abort_events) < 1:\n continue\n\n early, within = classify_licks(trial)\n hit_events = filter_events(trial, \"hit\")\n miss_events = filter_events(trial, \"miss\")\n rejection_events = filter_events(trial, \"rejection\")\n false_alarm_events = filter_events(trial, \"false_alarm\")\n auto_reward_events = filter_events(trial, \"auto_reward\")\n\n # auto rewarded trials have weird event logic, TODO: pair this with actual hit/miss events?\n if len(auto_reward_events) > 0:\n continue\n\n if trial[\"trial_params\"][\"catch\"] is False:\n if len(within) > 0:\n if any([\n len(hit_events) < 1,\n len(miss_events) > 0,\n len(rejection_events) > 0,\n len(false_alarm_events) > 0,\n ]):\n bad_trial_indices.append(trial[\"index\"])\n elif len(within) < 0:\n if any([\n len(miss_events) < 1,\n len(rejection_events) > 0,\n len(hit_events) > 0,\n len(false_alarm_events) > 0,\n ]):\n bad_trial_indices.append(trial[\"index\"])\n elif trial[\"trial_params\"][\"catch\"] is True:\n if len(within) > 0:\n if any([\n len(rejection_events) < 1,\n len(false_alarm_events) > 0,\n len(hit_events) > 0,\n len(miss_events) > 0,\n ]):\n bad_trial_indices.append(trial[\"index\"])\n elif len(within) < 0:\n if any([\n len(false_alarm_events) < 1,\n len(hit_events) > 0,\n len(miss_events) > 0,\n len(rejection_events) > 0,\n ]):\n bad_trial_indices.append(trial[\"index\"])\n else:\n raise Exception(\"Unexpected catch type.\")\n\n assert len(bad_trial_indices) < 1, \\\n f\"Trials failing validation. Indices: {bad_trial_indices}\"\n\n\ndef test_non_abort_catch_same_image(raw):\n \"\"\"Tests all non-abort catch trials have same image\n \"\"\"\n bad_trial_indices = []\n for trial in raw[\"items\"][\"behavior\"][\"trial_log\"]:\n if trial[\"trial_params\"][\"catch\"] is True:\n stimulus_changes = trial[\"stimulus_changes\"]\n if len(stimulus_changes) > 0 and \\\n stimulus_changes[0][0][0] != stimulus_changes[0][1][0]:\n bad_trial_indices.append(trial[\"index\"])\n\n assert len(bad_trial_indices) < 1, \\\n f\"Trials failing validation. Indices: {bad_trial_indices}\"\n\n\ndef test_non_abort_go_have_change(raw):\n \"\"\"Tests all non-abort go trials have a change\n \"\"\"\n bad_trial_indices = []\n for trial in raw[\"items\"][\"behavior\"][\"trial_log\"]:\n if trial[\"trial_params\"][\"catch\"] is False:\n abort_events = filter_events(trial, \"abort\")\n if len(abort_events) < 1 and len(trial[\"stimulus_changes\"]) > 1:\n bad_trial_indices.append(trial[\"index\"])\n\n assert len(bad_trial_indices) < 1, \\\n f\"Trials failing validation. Indices: {bad_trial_indices}\"\n\n\ndef test_no_reward_epoch(raw):\n invalid_trials = get_invalid_lick_disabled_trials(raw)\n assert len(invalid_trials) < 1, \\\n f\"Trials failing validation. Indices: {invalid_trials}\"\n","repo_name":"AllenInstitute/doc-pickle-tests","sub_path":"tests/test_raw.py","file_name":"test_raw.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42440337170","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on June 01 7:36 PM 2023\nCreated in PyCharm\nCreated as Misc/hydrogen_energy_levels\n\n@author: Dylan Neff, Dylan\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n ns_dis = np.arange(1, 7)\n es_dis = bound_energy(ns_dis)\n ns_cont = np.linspace(1, 7, 500)\n es_cont = bound_energy(ns_cont)\n # plt.grid()\n plt.figure(figsize=(6, 3), dpi=144)\n plt.axhline(0, color='black')\n plt.plot(ns_cont, es_cont, color='red', alpha=0.4, label=r'$-13.6 eV / n^2$')\n plt.scatter(ns_dis, es_dis, marker='_', s=1000, lw=3)\n plt.xlabel('n')\n plt.ylabel('Energy (eV)')\n plt.title('Hydrogen Energy Levels')\n plt.legend()\n plt.tight_layout()\n plt.show()\n\n print('donzo')\n\n\ndef bound_energy(n):\n return -13.6 / n**2\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Dyn0402/Misc","sub_path":"Physics_5CL/hydrogen_energy_levels.py","file_name":"hydrogen_energy_levels.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"74629075540","text":"import math\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter\nimport matplotlib.gridspec as gridspec\n\nimport numpy as np\n\nfrom ..Opt.Bayesian import Bayesian\n\n\ndef plot_fisher_matrix(sens_matrix, exp, model, fig, lines=None):\n \"\"\"\n \"\"\"\n\n fisher = exp.get_fisher_matrix(sens_matrix)\n\n fisher_data = Bayesian.fisher_decomposition(fisher, model, tol=1E-3)\n\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n eigs = fisher_data[0]\n eig_vects = fisher_data[1]\n eig_func = fisher_data[2]\n indep = fisher_data[3]\n\n ax1.semilogy(eigs, 'sk')\n ax1.set_xlabel(\"Eigenvalue number\")\n ax1.set_ylabel(r\"Eigenvalue / Pa$^{-2}$\")\n ax1.set_xlim(-0.5, len(eigs) - 0.5)\n ax1.set_ylim([0.1 * min(eigs[np.nonzero(eigs)]), 10 * max(eigs)])\n ax1.xaxis.set_major_locator(MultipleLocator(1))\n ax1.xaxis.set_major_formatter(FormatStrFormatter('%d'))\n\n styles = ['-g', '-.b', '--m', ':k', '-c', '-.y', '--r'] *\\\n int(math.ceil(eig_func.shape[0] / 7.0))\n\n for i in range(eig_func.shape[0]):\n ax2.plot(indep, eig_func[i], styles[i],\n label=\"{:d}\".format(i))\n # end\n\n # find rho=25.77 gpa\n for line, name in lines:\n ax2.axvline(line)\n # end\n ax2.legend(loc='best')\n ax2.get_legend().set_title(\"Eigen-\\nfunctions\", prop={'size': 7})\n ax2.set_xlabel(r\"Density / g cm$^{-3}$ \")\n ax2.set_ylabel(\"Eigenfunction response / Pa\")\n\n fig.tight_layout()\n\n return fig\n\n\ndef plot_sens_matrix(sens_matrix, exp, model=None, axes=None, fig=None,\n labels=[], linestyles=[]):\n \"\"\"Prints the sensitivity matrix\n\n Args:\n model(PhysicsModel): The model the sensitivity is in respect to\n exp(Experiment): The experiment the matrix is compared to\n sens_matrix(np.ndarray): The sensitivity matrix\n\n Keyword Args:\n axes(plt.Axes): The axes object *Ignored*\n fig(plt.Figure): A valid matplotlib figure on which to plot.\n If `None`, creates a new figure\n sens_matrix(dict): A dict of the total sensitivity\n labels(list): Strings for labels *Ignored*\n linestyles(list): Strings for linestyles *Ignored*\n\n Return:\n (plt.Figure): The figure\n \"\"\"\n if fig is None:\n fig = plt.figure()\n else:\n fig = fig\n # end\n print(model)\n gs = gridspec.GridSpec(3, 4,\n width_ratios=[6, 1, 6, 1])\n\n ax1 = fig.add_subplot(gs[0])\n ax2 = fig.add_subplot(gs[2])\n ax3 = fig.add_subplot(gs[4])\n ax4 = fig.add_subplot(gs[6])\n ax5 = fig.add_subplot(gs[8])\n ax6 = fig.add_subplot(gs[10])\n if model is not None:\n knot_post = model.get_t()\n else:\n knot_post = np.arange(sens_matrix.shape[1])\n # end\n\n resp_val = exp[0]\n\n style = ['-r', '-g', '-b', ':r', ':g', ':b',\n '--r', '--g', '--b', '--k']\n for i in range(10):\n ax1.plot(sens_matrix[:, i],\n style[i], label=\"{:4.3f}\".format(knot_post[i]))\n ax1.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n # ax1.get_legend().set_title(\"knots\",\n # prop = {'size':rcParams['legend.fontsize']})\n for i in range(10, 20):\n ax2.plot(sens_matrix[:, i],\n style[i - 10], label=\"{:4.3f}\".format(knot_post[i]))\n ax2.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n # ax2.get_legend().set_title(\"knots\",\n # prop = {'size':rcParams['legend.fontsize']})\n for i in range(20, 30):\n ax3.plot(sens_matrix[:, i],\n style[i - 20], label=\"{:4.3f}\".format(knot_post[i]))\n ax3.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n # ax3.get_legend().set_title(\"knots\",\n # prop = {'size':rcParams['legend.fontsize']})\n\n for i in range(30, 40):\n ax4.plot(sens_matrix[:, i],\n style[i - 30], label=\"{:4.3f}\".format(knot_post[i]))\n ax4.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n # ax4.get_legend().set_title(\"knots\",\n # prop = {'size':rcParams['legend.fontsize']})\n\n for i in range(40, sens_matrix.shape[1]):\n ax5.plot(sens_matrix[:, i],\n style[i - 40], label=\"{:4.3f}\".format(knot_post[i]))\n ax5.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n # for i in range(50, sens_matrix.shape[1]):\n # ax6.plot(sens_matrix[:, i],\n # style[i - 50], label=\"{:4.3f}\".format(knot_post[i]))\n # ax6.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n # ax5.get_legend().set_title(\"knots\",\n # prop = {'size':rcParams['legend.fontsize']})\n\n ax1.set_ylabel('Sensitivity')\n ax3.set_ylabel('Sensitivity')\n ax5.set_ylabel('Sensitivity')\n ax5.set_xlabel('Model resp. indep. var.')\n ax4.set_xlabel('Model resp. indep. var.')\n\n # xlocator = (max(resp_val) - min(resp_val)) / 4\n # ax1.xaxis.set_major_locator(MultipleLocator(xlocator))\n # ax2.xaxis.set_major_locator(MultipleLocator(xlocator))\n # ax3.xaxis.set_major_locator(MultipleLocator(xlocator))\n # ax4.xaxis.set_major_locator(MultipleLocator(xlocator))\n # ax5.xaxis.set_major_locator(MultipleLocator(xlocator))\n\n fig.tight_layout()\n plt.show()\n return fig\n","repo_name":"fraserphysics/F_UNCLE","sub_path":"F_UNCLE/Utils/Plotting.py","file_name":"Plotting.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"35103940606","text":"from abc import ABC, abstractmethod\n\n\nclass Serializable(ABC):\n \"\"\"\n Base class for reflection entities that can be serialized to JSON/YAML.\n \"\"\"\n\n @abstractmethod\n def to_dict(self) -> dict:\n \"\"\"\n Serialize the entity to a string.\n \"\"\"\n raise NotImplementedError()\n","repo_name":"BlackLight/platypush","sub_path":"platypush/common/reflection/_serialize.py","file_name":"_serialize.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":263,"dataset":"github-code","pt":"12"} +{"seq_id":"27491932023","text":"import torch\nfrom torch import nn\nfrom transformers import AlbertModel\n\nclass AlbertExtra(nn.Module):\n def __init__(self, feature_dim, num_labels):\n super().__init__()\n \n self.albert = AlbertModel.from_pretrained('albert-base-v2')\n self.feature_processor = nn.Sequential(\n nn.Linear(feature_dim, 64),\n nn.ReLU(),\n nn.Linear(64, 64),\n )\n \n self.classifier = nn.Linear(self.albert.config.hidden_size + 64, num_labels)\n\ndef forward(self, input_ids=None, attention_mask=None, extra_features=None, label_names=None):\n\n extra_features = extra_features.float()\n \n outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask)\n albert_output = outputs[0]\n \n processed_features = self.feature_processor(extra_features)\n \n combined = torch.cat((albert_output, processed_features), dim=1)\n \n logits = self.classifier(combined)\n\n loss = None\n if label_names is not None:\n if self.num_labels == 1:\n loss_fct = torch.nn.BCEWithLogitsLoss()\n loss = loss_fct(logits.view(-1), label_names.view(-1))\n else:\n loss_fct = torch.nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), label_names.view(-1))\n\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n","repo_name":"beautifulstatistics/EmailClass","sub_path":"archive/ALBERTExtra.py","file_name":"ALBERTExtra.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"41229471335","text":"import numpy as np\nfrom scipy import linalg\nfrom six import iteritems\n\nfrom .thermo_constants import RT\n\n\ndef cov2corr(covariance):\n \"\"\"Calculates correlation matrix from covariance matrix\n corr(i,j) = cov(i,j)/stdev(i) * stdev(j)\n\n Arguments:\n covariance {np.ndarray} -- covariance matrix\n\n Returns:\n [np.ndarray] -- correlation matrix\n \"\"\"\n\n stdev = np.sqrt(np.diag(covariance))\n outer_stdev = np.outer(stdev, stdev)\n correlation = covariance / outer_stdev\n correlation[covariance == 0] = 0\n\n return correlation\n\n\ndef findcorrelatedmets(covariance, metabolites):\n \"\"\"[summary]\n\n Arguments:\n covariance {[type]} -- [description]\n metabolites {[type]} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n\n correlation_mat = cov2corr(covariance)\n\n # Check for nan in correlation matrix and keep track of them and the corresponding metabolites\n non_prob_ind, prob_ind, non_prob_mets, nan_mets = [], [], [], []\n for i in range(len(correlation_mat)):\n if not np.isnan(correlation_mat[:, i]).all():\n non_prob_ind.append(i)\n non_prob_mets.append(metabolites[i])\n else:\n prob_ind.append(i)\n nan_mets.append(metabolites[i])\n\n reduced_correlation = correlation_mat[:, non_prob_ind]\n reduced_correlation = reduced_correlation[non_prob_ind, :]\n\n reduced_cov = covariance[:, non_prob_ind]\n reduced_cov = reduced_cov[non_prob_ind, :]\n\n # removing metabolites with all zeros in correlation matrix\n zero_mets, final_mets, non_zero_ind = [], [], []\n for i in range(0, len(reduced_correlation)):\n if np.count_nonzero(reduced_correlation[:, i]) == 0:\n zero_mets.append(non_prob_mets[i])\n else:\n non_zero_ind.append(i)\n final_mets.append(non_prob_mets[i])\n\n final_correlation = reduced_correlation[:, non_zero_ind]\n final_correlation = final_correlation[non_zero_ind, :]\n\n final_cov = reduced_cov[:, non_zero_ind]\n final_cov = final_cov[non_zero_ind, :]\n\n # Find high variance metabolites\n ind_high_variances = list(np.where(np.sqrt(np.diag(final_cov)) > 25)[0])\n\n # Find indices that are highly correlated (corr > 0.7 | corr < -0.7) and check if they are same as high variance metabolites\n (\n new_ellipsoid_ind,\n old_ellipsoid_ind,\n no_ellipse_mets,\n new_ellipse_mets,\n old_ellipse_mets,\n ) = ([], [], [], [], [])\n\n for i in range(len(final_correlation)):\n if i in ind_high_variances:\n pos_corr = list(set(np.where(final_correlation[:, i] > 0.7)[0]))\n neg_corr = list(set(np.where(final_correlation[:, i] < -0.7)[0]))\n correlated_ind = pos_corr + neg_corr\n if len(correlated_ind) == 0:\n no_ellipse_mets.append(final_mets[i])\n\n if set(correlated_ind).intersection(set(ind_high_variances)) == set(\n correlated_ind\n ):\n new_ellipsoid_ind.append(i)\n new_ellipse_mets.append(final_mets[i])\n\n else:\n old_ellipsoid_ind.append(i)\n old_ellipse_mets.append(final_mets[i])\n\n else:\n old_ellipsoid_ind.append(i)\n old_ellipse_mets.append(final_mets[i])\n\n old_cov = final_cov[:, old_ellipsoid_ind]\n old_cov = old_cov[old_ellipsoid_ind, :]\n new_cov = final_cov[:, new_ellipsoid_ind]\n new_cov = new_cov[new_ellipsoid_ind, :]\n\n return (old_ellipse_mets, new_ellipse_mets, old_cov, new_cov)\n\n\ndef Exclude_quadratic(model):\n\n big_var_rxn = []\n for rxn in model.reactions:\n if rxn.id in model.Exclude_reactions:\n continue\n lb_conc, ub_conc, lb_form, ub_form = (0, 0, 0, 0)\n for met, stoic in iteritems(rxn.metabolites):\n if met.Kegg_id in [\"C00080\", \"cpd00067\"]:\n continue\n if stoic < 0:\n lb_conc += stoic * met.concentration_variable.ub\n ub_conc += stoic * met.concentration_variable.lb\n lb_form += stoic * met.compound_variable.lb\n ub_form += stoic * met.compound_variable.ub\n else:\n lb_conc += stoic * met.concentration_variable.lb\n ub_conc += stoic * met.concentration_variable.ub\n lb_form += stoic * met.compound_variable.lb\n ub_form += stoic * met.compound_variable.ub\n\n lb_delG_rxn = RT * lb_conc + lb_form + rxn.transport_delG + rxn.transform\n ub_delG_rxn = RT * ub_conc + ub_form + rxn.transport_delG + rxn.transform\n\n if abs(lb_delG_rxn - ub_delG_rxn) > 5000:\n big_var_rxn.append(rxn)\n\n high_var_mets = []\n for reaction in big_var_rxn:\n for metabolite in reaction.metabolites:\n if metabolite.std_dev > 50:\n high_var_mets.append(metabolite.id)\n return list(set(high_var_mets))\n\n\ndef correlated_pairs(model):\n\n delete_met, cov_mets, cov_met_inds, non_duplicate = [], [], [], []\n correlated_pair = {}\n\n # Find metabolites that are present in different compartments and make sure they get one row/col in covariance matrix\n for met in model.metabolites:\n if met.delG_f == 0 or np.isnan(met.delG_f):\n delete_met.append(met)\n else:\n cov_met_inds.append(model.metabolites.index(met))\n cov_mets.append(met)\n non_duplicate.append(met.Kegg_id)\n\n # Pick indices of non zero non nan metabolites\n cov_dg = model.covariance_dG[:, cov_met_inds]\n cov_dg = cov_dg[cov_met_inds, :]\n\n correlation_mat = cov2corr(cov_dg)\n\n for i in range(len(correlation_mat)):\n correlated_ind = np.where(np.abs(correlation_mat[:, i] > 0.99))[0]\n\n if len(correlated_ind) > 1:\n for j in correlated_ind:\n if j == i:\n continue\n if cov_mets[i].Kegg_id == cov_mets[j].Kegg_id:\n continue\n if cov_mets[i] in correlated_pair:\n correlated_pair[cov_mets[i].id].append(cov_mets[j].id)\n else:\n correlated_pair[cov_mets[i].id] = [cov_mets[j].id]\n\n for key in correlated_pair:\n for i in range(len(correlated_pair[key])):\n if correlated_pair[key][i] in correlated_pair:\n if key in correlated_pair[correlated_pair[key][i]]:\n correlated_pair[correlated_pair[key][i]].remove(key)\n\n correlated_mets = {k: v for k, v in correlated_pair.items() if v}\n\n return correlated_mets\n\n\ndef quadratic_matrices(covariance, metabolites):\n\n inv_covar = linalg.inv(covariance)\n met_varnames = [met.compound_variable.name for met in metabolites]\n\n ind1, ind2, val = ([], [], [])\n for i in range(len(covariance)):\n ind1.extend(len(covariance) * [met_varnames[i]])\n ind2.extend(met_varnames)\n val.extend(list(inv_covar[:, i]))\n\n return (ind1, ind2, val)\n","repo_name":"biosustain/multitfa","sub_path":"src/multitfa/util/util_func.py","file_name":"util_func.py","file_ext":"py","file_size_in_byte":6978,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"12"} +{"seq_id":"74545638102","text":"lista = []\nwhile True:\n num = int(input('\\033[34;1mDigite um valor:\\033[m '))\n if num in lista:\n print('\\033[31;1mValor duplicado. Não vou adicionar...\\033[m')\n else:\n print('\\033[32;1mValor adicionado com sucesso...\\033[m')\n lista.append(num)\n cont = str(input('\\033[34;1mQuer continuar? [S/N]\\033[m ')).upper().strip()[0]\n while cont not in 'SN':\n cont = str(input('\\033[31;1mDigito Ínvalido, Porfavor digite \"S\" para Sim ou \"N\" para Não.\\033[m'\n '\\n\\033[34;1mQuer continuar? [S/N]\\033[m ')) \\\n .upper().strip()\n\n if cont == 'N':\n break\nlista.sort()\nif len(lista) == 1:\n print(f'\\033[36;1mVoçe digitou o valor: {lista}')\nelse:\n print(f'\\033[36;1mVocê digitou os valores: {lista}.')\n\n","repo_name":"Artur-STN/Curso-de-python","sub_path":"ExerciciosCursoEmVideo/ex079 - Valores únicos em uma lista.py","file_name":"ex079 - Valores únicos em uma lista.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"17011920586","text":"import socket\nimport time\n\nfrom ...logger import CommLogger\n\nclass InterfaceIP:\n\n # Reopen the socket if it has been open for > 30min\n OPEN_TIMEOUT = 30*60\n\n # Reopen the socket if the last command was > 10min ago\n LAST_TIMEOUT = 10*60\n\n\n class CommError(Exception):\n\n def __init__(self, iface, msg, op=None, ret=None):\n self.iface = iface\n self.msg = msg\n self.op = op\n self.ret = ret\n\n def __str__(self):\n base = \"%s [%s:%i]: %s\"%(type(self.iface).__name__, self.iface.ip, self.iface.port, self.msg)\n if self.op is not None:\n base += \"\\nCMD Sent: %s\"%(self.op)\n if self.ret is not None:\n base += \"\\nReturn Value: %s\"%(self.ret)\n return base\n\n\n def __init__(self, ip, port, log=None):\n self.ip = ip\n self.port = port\n\n self.log = CommLogger(log, type(self).__name__)\n\n self._openSocket()\n\n self.time_last = time.time()\n\n def __del__(self):\n self._closeSocket()\n\n\n def _openSocket(self):\n try:\n #create an AF_INET, STREAM socket (TCP)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n except socket.error:\n error = self.CommError(self, 'Failed to create the socket!')\n self.log.error(str(error))\n raise error\n\n try:\n #Connect to remote server\n self.sock.connect((self.ip , self.port))\n except socket.error:\n error = self.CommError(self, 'Failed to connect to the IP!')\n self.log.error(str(error))\n raise error\n\n self.time_open = time.time()\n\n def _closeSocket(self):\n self.sock.close()\n time.sleep(.300)\n\n\n def close(self):\n self._closeSocket()\n\n\n def query(self, cmd, resp=False, len=4096):\n \"\"\"\n Sends a command to the instrument and receives data if needed.\n\n The cmd string is encoded to bytes and a NEWLINE is appended.\n This function waits to receive data in return if resp=True is passed as an argument.\n The returned data is decoded into a string and the NEWLINE is stripped.\n \"\"\"\n\n # Reopen the socket if the last cmd was a long time ago, or of the socket was open too long\n if self.time_last < time.time() - InterfaceIP.LAST_TIMEOUT or self.time_open < time.time() - InterfaceIP.OPEN_TIMEOUT:\n self.log.warning(\"Socket will be reopened due to timeout.\")\n self._closeSocket()\n self._openSocket()\n\n self.time_last = time.time()\n\n\n try:\n self.log.sent(cmd)\n #Send cmd string\n self.sock.sendall(cmd.encode() + b'\\n')\n time.sleep(0.02)\n except socket.error:\n error = self.CommError(self, 'Failed to send', op=cmd)\n self.log.error(str(error))\n raise error\n\n if resp:\n reply = self.sock.recv(int(len))\n reply = reply.decode().strip('\\n\\r')\n\n self.log.recv(reply)\n return reply\n\n else:\n return True\n\n ## -----------------------------------------\n\n def id(self):\n return self.query(\"*IDN?\", True)\n","repo_name":"deragent/ETHZ-TCT-Control","sub_path":"tct/lab/generic/InterfaceIP.py","file_name":"InterfaceIP.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"27963886972","text":"#Ex059 - Crie um programa que leia dois valores e mostre na tela um menu na tela:\r\n\r\n#[ 1 ] somar\r\n\r\n#[ 2 ]multiplicar \r\n\r\n#[ 3 ]maior\r\n\r\n#[ 4 ]novos números\r\n\r\n#[ 5 ]sair do programa\r\n\r\n#Seu programa deverá realizar a operação solicitada em cada caso\r\n\r\n#Pré-correção;\r\n\r\n'''valor1 = float(input('Digite o 1° valor: '))\r\nvalor2 = float(input('Digite o 2° valor: '))\r\n\r\nprint(\"\"\"Como deseja tratar dos valores;\r\n \r\n[ 1 ]Somar\r\n[ 2 ]Multiplicar\r\n[ 3 ]Maior\r\n[ 4 ]Novos números\r\n[ 5 ]Sair do programa\r\n\"\"\")\r\nescolha = int(input('Sua escolha: '))\r\nif escolha == 1:\r\n soma = valor1 + valor2\r\n print(f'{soma :.1f}')\r\nelif escolha == 2:\r\n multiplicação = valor1 * valor2\r\n print(f'Resultado: {multiplicação :.1f}')\r\nelif escolha == 3:\r\n if valor1 > valor2:\r\n print(f'O {valor1} é o maior')\r\n else:\r\n print(f'O {valor2} é o maior')\r\nelif escolha == 4:\r\n valor1 = float(input('Digite o 1° valor: '))\r\n valor2 = float(input('Digite o 2° valor: '))'''\r\n \r\n#Pós correção;\r\n \r\nn1 = int(input('Primeiro valor: '))\r\nn2 = int(input('Segundo valor: '))\r\nopção = 0\r\nwhile opção != 5:\r\n print(''' [ 1 ]somar\r\n [ 2 ]multiplicar\r\n [ 3 ]maior\r\n [ 4 ]novos números \r\n [ 5 ]sair do programa''')\r\n opção = int(input('>>>>> Qual é sua opção? '))\r\n if opção == 1:\r\n soma = n1 + n2\r\n print(f'A soma entre {n1} + {n2} é igual a {soma}')\r\n elif opção == 2:\r\n produto = n1 * n2\r\n print(f'O resultado de {n1} x {n2} é {produto}')\r\n elif opção == 3:\r\n if n1 > n2:\r\n maior = n1\r\n else: \r\n maior = n2\r\n print(f'Entre {n1} e {n2}, o maior valor é {maior}')\r\n elif opção == 4:\r\n print('Informe os números novamente: ')\r\n n1 = int(input('Primeiro valor: '))\r\n n2 = int(input('Segundo valor: '))\r\n elif opção == 5:\r\n print('Finalizando...')\r\n else:\r\n print('Opção inválida. Tente novamente')\r\n print('=-=' * 10)\r\nprint('Fim do programa! Volte sempre')","repo_name":"Gilliardjnr/python-scripts","sub_path":"Ex059 - Estrutura de repetição (while).py","file_name":"Ex059 - Estrutura de repetição (while).py","file_ext":"py","file_size_in_byte":2088,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"43794426857","text":"import json\n\nfrom django.views.generic import TemplateView, View\nfrom django.http import JsonResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.db import connection\n\nfrom .forms import (\n Filter, RequestEmpresaForm\n)\n\nfrom .models import Empresa\n\n\n\nclass IndexView(TemplateView):\n template_name = 'index.html'\n\n\nclass TablaEmpresaView(TemplateView):\n template_name = 'empresa/lista.html'\n\n\n def get_context_data(self, **kwargs):\n context = super(TablaEmpresaView, self).get_context_data(**kwargs)\n form = Filter()\n context['form'] = form\n \n return context\n\n\nclass EmpresaDetailView(TemplateView):\n template_name = 'empresa/detail.html'\n\n\nclass RequestEmpresaInfoView(View):\n \n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n # print('Queries Counted: {}'.format(len(connection.queries)))\n # return response\n\n def post(self, *args, **kwargs):\n try:\n data_form = {\n 'request_email': self.request.POST.get('request_email'),\n 'request_f_name': self.request.POST.get('request_f_name'),\n 'request_l_name': self.request.POST.get('request_l_name'),\n }\n list_emp_selecteds = json.loads(self.request.POST.get('request_empresa_list'))\n\n form = RequestEmpresaForm(data_form)\n \n if form.is_valid():\n selecteds = list()\n instance = form.save()\n \n for empresa_id in list_emp_selecteds:\n selecteds.append(empresa_id[\"id\"])\n\n queryset = Empresa.objects.filter(id__in=selecteds)\n instance.empresas.set(list(queryset))\n resp = {\n \"status\": 200,\n \"message\": \"Solicitud procesada exitosamente.\"\n }\n\n return JsonResponse(resp, safe=True)\n else:\n return JsonResponse({\"error\": form.errors, \"status\": 400}, safe=True)\n except Exception as e:\n return JsonResponse({\"error\": str(e), \"status\": 500}, safe=True)\n","repo_name":"projectja/cc_cex_enterpriseproject","sub_path":"enterprises/empresa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"9800191904","text":"from tkinter import *\nfrom pandas import *\nfrom random import choice\n\nBACKGROUND_COLOR = \"#B1DDC6\"\nnew_word = []\ncsv_file=\"\"\nempty_dict = {}\n\ndef save_to_csv():\n df_words = DataFrame(all_words)\n df_words.to_csv(\"./data/words_to_learn.csv\", index=False)\n\ndef got_right():\n all_words.remove(new_word)\n save_to_csv()\n get_new_word()\n\ndef got_wrong():\n get_new_word()\n\ndef get_new_word():\n #first thing is to stop the timer that flips the cards as soon as the button is hit.\n global flip_timer\n window.after_cancel(flip_timer)\n\n #get the word from dictionary\n global new_word\n new_word = choice(all_words)\n keys = list(new_word.keys())\n orig_lang = keys[0]\n new_guess_word = new_word[orig_lang]\n\n #changes the layout\n canvas.itemconfig(canvas_img, image=card_front_img)\n canvas.itemconfig(canvas_word, text=new_guess_word, fill=\"black\")\n canvas.itemconfig(canvas_lang, text=orig_lang, fill=\"black\")\n\n #creates a new timer so the card flips after 3 seconds without interaction.\n flip_timer = window.after(3000,flip_card, new_word)\n\ndef flip_card(word):\n #gets word from dictionary\n keys = list(word.keys())\n trans_lang = keys[1]\n translated_word = word[trans_lang]\n\n #changes the layout\n canvas.itemconfig(canvas_img, image=card_back_img)\n canvas.itemconfig(canvas_lang, text=trans_lang, fill=\"white\")\n canvas.itemconfig(canvas_word, text=translated_word, fill=\"white\")\n\n\n#Load csv as pandas df and convert the columns into 2 time series and then to a list, to navigate through.\ntry:\n with open(\"./data/words_to_learn.csv\") as data_file:\n data_file.read()\nexcept FileNotFoundError:\n csv_file = \"./data/french_words.csv\"\nelse:\n csv_file = \"./data/words_to_learn.csv\"\nfinally:\n print(csv_file)\n df_words = read_csv(csv_file)\n #all_words = [(word.French, word.English) for index, word in df_words.iterrows()]\n all_words = df_words.to_dict(orient=\"records\")\n\n\nwindow = Tk()\nwindow.title(\"Flashy\")\nwindow.config(padx=50,pady=50, bg=BACKGROUND_COLOR)\nflip_timer = window.after(3000,flip_card, empty_dict)\n\n\n#Images\ncard_back_img = PhotoImage(file=\"./images/card_back.png\")\ncard_front_img = PhotoImage(file=\"./images/card_front.png\")\nright_img = PhotoImage(file=\"./images/right.png\")\nwrong_img = PhotoImage(file=\"./images/wrong.png\")\n\n#Canvas\ncanvas = Canvas(width=800,height=526, bg=BACKGROUND_COLOR, highlightthickness=0)\ncanvas_img = canvas.create_image(400,263,image=card_front_img)\ncanvas_lang = canvas.create_text(400,150,text=\"Title\", font=(\"Ariel\",40,\"italic\"))\ncanvas_word = canvas.create_text(400,263,text=\"word\", font=(\"Ariel\",60,\"bold\"))\ncanvas.grid(column=0,row=0, columnspan=2)\n\n#Buttons\nright_bt = Button(image=right_img, command=got_right, highlightthickness=0)\n# right_bt = Button(image=right_img, command=get_new_word, highlightthickness=0)\nright_bt.grid(column=0,row=1)\n\nwrong_bt = Button(image=wrong_img, command=got_wrong, highlightthickness=0)\n# wrong_bt = Button(image=wrong_img, command=get_new_word, highlightthickness=0)\nwrong_bt.grid(column=1,row=1)\n\nget_new_word()\n\nwindow.mainloop()","repo_name":"mozartiano123/python","sub_path":"flash-card-project-start/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36045903198","text":"# -*- encoding: utf-8 -*-\n# Module ialblshow\nfrom numpy import *\nfrom numpy.random import rand\n\ndef ialblshow(f):\n from iaapplylut import iaapplylut\n\n nblobs = max(ravel(f))\n r = floor(0.5 + 255*rand(nblobs, 1))\n g = floor(0.5 + 255*rand(nblobs, 1))\n b = floor(0.5 + 255*rand(nblobs, 1))\n ct = concatenate((r,g,b), 1)\n ct = concatenate(([[0,0,0]], ct))\n\n g = iaapplylut(f, ct)\n return g\n\n","repo_name":"andersonfreitas/ia636","sub_path":"ia636/ialblshow.py","file_name":"ialblshow.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13381929590","text":"#Memory Flash Game by bjh-developer, built in Visual Studio Code, with Python 3\n\n### Optimised for all IDE except for Jupyter Notebook! ###\n\n#Let's make a game!\n#The game plays like this: Each level, a series of numbers (0 to 9) will be flashed, and each number is flashed \n#at a regular interval (like blinking - show 1 number, clear it, show 2nd number, etc). \n#So level 1 has 1 number, level 2 has 2 and so on. When the entire series is shown\n#we ask the player to answer, what were the numbers he/she saw. E.g. if he saw 1, 2, 3, his answer should be '123'.\n#If he answers correctly, he goes on to the next level. If not, it's game over!\n#To start the game, we can print 'Ready?' and wait for 1 second, and start flashing.\n\n#We shall learn to organise our game and divide it up into specific tasks. And what is a block of code that does a\n#specific task?\n#Tasks:\n#1. Start/Manage the game - start the game, and generally run the game\n#2. Generate the series of numbers\n#3. Flash the numbers\n#4. Get the answer from the player\n#5. Check the answer against the series (optional, we can put this in 1.)\n\nimport time\nimport random\nimport os\n\ngame = True\nround = 1\ncomp_num = []\ncheck_num = ''\nuser_ans = ''\n\ndef generate_series():\n # Generates the number series for the round\n for i in range(round):\n global comp_num\n comp_num.append(random.randint(0, 9))\n\ndef start_round():\n # Flashes the numbers and asks the user for the answer\n os.system('clear')\n for j in range(len(comp_num)):\n print(comp_num[j])\n time.sleep(0.5)\n os.system('clear')\n print()\n time.sleep(0.2)\n os.system('clear')\n global user_ans\n user_ans = input(\"What is your answer? \")\n \ndef get_answer():\n #Request answer from player\n global comp_num\n global check_num\n global user_ans\n check_num = ''.join(str(v) for v in comp_num)\n \ndef start_game():\n #Initialises the game and starts the first round\n global user_ready, game\n print('Welcome to the Memory Flash Game!')\n time.sleep(1.5)\n print()\n print('Instructions:\\n1. Numbers will start appearing on your screen and you must remember them.\\n2. You will have to type out the numbers you memorised and if you got it right, you move on to the next level, if not byebye.')\n time.sleep(3)\n print()\n user_ready = input('Are you ready [Y/N]?')\n\n if user_ready == 'Y' or user_ready == 'y':\n print()\n print(\"EYES ON THE SCREEN\")\n print()\n time.sleep(2)\n game = True\n\n elif user_ready == 'N' or user_ready == 'n':\n print('Alright, you can come back anytime!')\n print()\n time.sleep(2)\n print('byebye!')\n game = False\n\n else:\n print('Sorry, please enter Y/N')\n start_game()\n\n time.sleep(1)\n #os.system('clear')\n\n#callout\nstart_game()\nwhile game:\n\n generate_series()\n start_round()\n get_answer()\n\n if user_ans != check_num:\n print('Oh man... you loss.')\n print()\n time.sleep(2)\n print(f'Your highest score is {round - 1}')\n print()\n time.sleep(2)\n print('You can re-try the game next time, just re-run the code :)')\n print()\n time.sleep(2)\n print('Byebye!')\n print()\n print()\n print('Memory Flash Game by bjh-developer, built in Visual Studio Code, with Python 3')\n game = False\n \n elif user_ans == check_num:\n round += 1\n comp_num.clear()\n print(f'Congratulations! You win! Time to go on to the next level! (Round number = number of digits: {round})')\n print()\n time.sleep(3.5)\n print(\"EYES ON THE SCREEN\")\n time.sleep(2)\n else:\n print('error1')\n","repo_name":"bjh-developer/Python-Mini-Projects","sub_path":"Memory Flash Game.py","file_name":"Memory Flash Game.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"619950890","text":"from oagcnn.engine.predictor import OAGCNNPredictor\nfrom oagcnn.engine.default_argparser import argument_parser\nfrom oagcnn.models.oagcnn import OAGCNN\nfrom oagcnn.config.defaults import cfg\n\n\ndef inference_from_config(cfg):\n device = \"cuda\" if cfg.PREDICTOR.USE_GPU else \"cpu\"\n model = OAGCNN(cfg, device)\n\n predictor = OAGCNNPredictor(cfg, device, model)\n predictor.run_predictor()\n\n\nif __name__ == \"__main__\":\n args = argument_parser()\n cfg.merge_from_file(args.config_file)\n cfg.freeze()\n inference_from_config(cfg)\n","repo_name":"acaelles97/OAGCNN","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"71641121621","text":"import os\nimport sys\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom tools.helper import get_experiment_dirs\nfrom tools.plot_helpers import plot_load_duration\n\n\ndef plot():\n\n dirs = get_experiment_dirs('all_scenarios')\n\n price_el = pd.read_csv(\n os.path.join(dirs['raw'], 'price_electricity_spot_2017.csv'),\n index_col=0\n )\n\n price_el.columns = ['electricity']\n\n plot_load_duration(\n price_el,\n legend=False,\n plot_original=True,\n title='Electricity spot price',\n ylabel='Hourly price [Eur/MWh]',\n )\n\n filename = os.path.join(dirs['plots'], 'price_el.pdf')\n\n plt.savefig(filename)\n\n plt.close()\n\n print(f\"Saved plot to {filename}\")\n\nif __name__ == '__main__':\n plot()\n","repo_name":"oemof-heat/district_heating_power_to_heat","sub_path":"model/scripts/plot_combination_scripts/electricity_spot_price_timeseries.py","file_name":"electricity_spot_price_timeseries.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"71110814422","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pages', '0004_auto_20160302_0953'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='page',\n name='id',\n ),\n migrations.AlterField(\n model_name='page',\n name='slug',\n field=models.CharField(max_length=250, unique=True, serialize=False, verbose_name=b\"Page's url name\", primary_key=True),\n ),\n ]\n","repo_name":"developmentseed/api.work.vote","sub_path":"apps/pages/migrations/0005_auto_20160302_0954.py","file_name":"0005_auto_20160302_0954.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"73362621782","text":"'''\n clear_actors\n'''\n\nfrom carla_utils import carla\n\nfrom ..system import parse_yaml_file_unsafe\nfrom ..world_map import Core\n\nfrom .tools import default_argparser\n\n\nif __name__ == \"__main__\":\n print(__doc__)\n \n import os\n from os.path import join\n\n try:\n config = parse_yaml_file_unsafe('./config/carla.yaml')\n except FileNotFoundError:\n print('[vehicle_visualizer] use default config.')\n file_dir = os.path.dirname(__file__)\n config = parse_yaml_file_unsafe(join(file_dir, './default_carla.yaml'))\n args = default_argparser().parse_args()\n config.update(args)\n\n core = Core(config, use_tm=False)\n core.tick()\n actors = core.world.get_actors()\n vehicles = actors.filter('*vehicle*')\n sensors = actors.filter('*sensor*')\n\n for actor in sensors: print(actor)\n for actor in vehicles: print(actor)\n\n import pdb; pdb.set_trace()\n\n for actor in sensors: actor.destroy()\n for actor in vehicles: actor.destroy()\n\n core.tick()\n","repo_name":"IamWangYunKai/DG-TrajGen","sub_path":"carla_utils/utils/ca.py","file_name":"ca.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"12"} +{"seq_id":"29768279875","text":"# -*- encoding: utf-8 -*-\nimport warnings\nimport requests\n\n__all__ = ('BaiduGeoCoderService',)\n\nclass BaiduGeoCoderService(object):\n \"\"\"\n 包装了百度地图WEB服务API的类,可以方便调用百度地图web服务API\n \"\"\"\n api_url = 'http://api.map.baidu.com/geocoder/v2/'\n\n def __init__(self, api_ak, null=True):\n if not api_ak:\n warnings.warn('Using Baidu map service need an api ak value')\n self.api_ak = api_ak\n self.null = null\n\n def address_to_location(self, address):\n \"\"\"\n 根据地址来返回地址的经纬度数据元组: (longitude, latitude)\n \"\"\"\n if not address:\n return (None, None) if self.null else (0.0, 0.0)\n\n try:\n response = requests.get(self.api_url, params={\n 'address': address,\n 'ak' : self.api_ak,\n 'output' : 'json'\n }).json()\n return response['result']['location']['lng'], \\\n response['result']['location']['lat']\n except Exception as e:\n return (None, None) if self.null else (0.0, 0.0)\n\n def location_to_address(self, longitude, latitude):\n \"\"\"\n 根据指定的经纬度来返回对应的地址字符串\n \"\"\"\n if longitude is None and latitude is None:\n warnings.warn('Can not use None value to get address value!')\n return None\n\n try:\n response = requests.get(self.api_url, params={\n 'location': ','.join([str(latitude), str(longitude)]),\n 'output':'json',\n 'pois':1,\n 'ak':self.api_ak\n }).json()\n return response\n except Exception as e:\n return None\n\n\n","repo_name":"focusonecc/common","sub_path":"geo/baidu_geo_v2.py","file_name":"baidu_geo_v2.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74153432340","text":"#!/usr/bin/env python\n\n# imports and basic setup\nimport argparse\nfrom glob import glob\nimport time\nimport re\nimport traceback\nimport logging\nimport os\n\nfrom dream_utils import *\nfrom file_utils import mkdir_p\nfrom img_utils import *\n\n\nlogging.basicConfig()\nlogger = logging.getLogger('situations')\n\n\ndef calculate_resize(shape, resize):\n if resize is None:\n return None\n if resize[0] > 0 and resize[1] > 0:\n return resize\n if resize[0] > 0:\n return resize[0], int(shape[1] / (shape[0] / float(resize[0])))\n if resize[1] > 0:\n return int(shape[0] / (shape[1] / float(resize[1]))), resize[1]\n\n\ndef get_next_generation(curent_index, gen_path, ext='caffemodel'):\n # the reason to look for '*.solverstate' instead of '*.caffemodel'\n # is that the former appears only if model snapshot is fully written\n pattern = r'_(\\d+).%s$' % ext\n model_files = glob(gen_path + '/ria_gurtow_iter_[0-9]*.%s' % ext)\n model_indices = [int(re.search(pattern, f).groups()[0]) for f in model_files]\n\n # find and return generation index which nore than the current one\n model_indices = [i for i in sorted(model_indices) if i > curent_index]\n if len(model_indices) > 0:\n return model_indices[0]\n\n\ndef await_next_generation(curent_index, gen_path):\n next_index = get_next_generation(curent_index, gen_path)\n while next_index is None:\n logger.info('Waiting for model generation next to #%s' % curent_index)\n time.sleep(1)\n next_index = get_next_generation(curent_index, gen_path)\n logger.info('Found generation {next_index} next to {curent_index}'.format(\n curent_index=curent_index, next_index=next_index))\n return next_index\n\n\ndef load_generation(gen_index, test=False, gen_path='models/Ria_Gurtow/generations/'):\n # zero generation is an original model\n if gen_index == 0:\n if test:\n print('Iteration #0 is original emotions model')\n return None\n\n from settings.emotions_model import emotions\n return emotions\n\n model_path = 'models/Ria_Gurtow/'\n model_file = '{gen_path}/ria_gurtow_iter_{gen_index}.caffemodel'.format(\n gen_path=gen_path, \n gen_index=gen_index\n )\n prototxt_file = model_path + 'deploy.prototxt'\n mean_file = 'models/Ria_Gurtow/train.binaryproto'\n\n # in test mode we just dump file info and return nothing\n if test:\n print('Files for generation #%s:' % gen_index)\n for resource, file_path in {'model': model_file, 'prototxt': prototxt_file, 'mean': mean_file}.iteritems():\n if os.path.isfile(file_path):\n print('SUCCESS: {resource} file in {path}'.format(resource=resource, path=file_path))\n else:\n print('ERROR: {resource} file in missing at {path}'.format(resource=resource, path=file_path))\n return None\n\n # try to load model generation until success\n dreamer = None\n while dreamer is None:\n try:\n dreamer = Dreamer(\n net_fn=prototxt_file,\n param_fn=model_file,\n mean=mean_file,\n end_level='pool5')\n logger.info('Model generation #%03d is loaded'%gen_index)\n except KeyboardInterrupt as ke:\n raise ke\n except Exception as e:\n logger.error(traceback.format_exc())\n time.sleep(1)\n\n return dreamer\n\n\ndef make_dream(dreamer, image, gen_index, stages, dest,\n resize_in=None, resize_out=None, image_mask=None, prefix='', verbose_save=False, num_rendered=0):\n logger.info('Generating dream for generation #%03d...' % gen_index)\n \n img_shape = tuple(reversed(image.shape[:2]))\n resize_in = calculate_resize(img_shape, resize_in)\n resize_out = calculate_resize(img_shape, resize_out)\n\n in_out = []\n if resize_in is not None:\n in_out += ['in-%sx%s' % resize_in]\n if resize_out is not None:\n in_out += ['out-%sx%s' % resize_out]\n in_out = '-'.join(in_out)\n\n if len(prefix) > 0:\n prefix += '-'\n filename = '{prefix}gen-{gen}-{stages}-{in_out}.jpg'.format(\n prefix=prefix, gen='%05d' % gen_index, stages='-'.join(stages), in_out=in_out)\n\n if verbose_save:\n save_as = dest + '/verbose-gen-%05d' % gen_index\n else:\n save_as = None\n\n dream = dreamer.long_dream(image, stages=stages,\n resize_in=resize_in, resize_out=resize_out,\n mask=image_mask,\n save_as=save_as,\n show_results=False, num_rendered=num_rendered)\n\n # apply mask and save\n if image_mask is not None:\n dream = apply_mask_to_img(dream, image_mask)\n fromarray(dream).save('{dest}/{filename}'.format(dest=dest, filename=filename))\n\n\ndef render_next_generation(gen_index, max_generation=None):\n if max_generation is None:\n return True\n return gen_index <= max_generation\n\n\ndef render(image, start_from, dest, stages,\n max_generation=None,\n resize_in=None, resize_out=None, image_mask=None,\n verbose_save=False, num_rendered=0, test=False, gen_path='models/Ria_Gurtow/generations'):\n gen_index = start_from\n # render generations once ready \n while render_next_generation(gen_index, max_generation):\n try:\n dreamer = load_generation(gen_index, test=test, gen_path=gen_path)\n\n if not test:\n make_dream(dreamer, image, gen_index, stages, dest,\n resize_in=resize_in, resize_out=resize_out, image_mask=image_mask, \n verbose_save=verbose_save, num_rendered=num_rendered)\n else:\n print('Nothing to calculate for generation #%s in test run' % gen_index)\n\n gen_index = await_next_generation(gen_index, gen_path)\n except KeyboardInterrupt as ke:\n raise ke\n except Exception as e:\n logger.error(traceback.format_exc())\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Renders generations for Ria Gurtow model.')\n parser.add_argument('--stages', nargs='*', action='store', type=str, required=True, help='net layers to use in stages')\n parser.add_argument('--image', type=str, required=True, help='image to process')\n parser.add_argument('--mask', type=str, default=None, help='mask to apply over the image')\n parser.add_argument('--start_from', type=int, default=0, help='generation to start from')\n parser.add_argument('--max_gen', type=int, default=None, help='maximum generation to render')\n parser.add_argument('--resize_in', nargs=2, type=int, default=None, help='image size for N-1 stages')\n parser.add_argument('--resize_out', nargs=2, type=int, default=None, help='image size for Nth stages')\n parser.add_argument('--dest', type=str, default='situations/data/frames', help='destination directory for frames')\n parser.add_argument('--gen_path', type=str, default='models/Ria_Gurtow/generations', help='destination of model generations')\n parser.add_argument('--save_all', default=False, action='store_true', help='save all intermediate frames')\n parser.add_argument('--save_stages', default=False, action='store_true', help='save each stage')\n parser.add_argument('--num_rendered', type=int, default=0, help='numbere of extra rendered images to save')\n parser.add_argument('--verbose', action='store_true', help='verbose output')\n parser.add_argument('--test', action='store_true', help='test run')\n args = parser.parse_args()\n\n if args.verbose:\n logger.setLevel(logging.INFO)\n \n logger.info('Arguments: %s'%args)\n\n image_mask = None\n resize_in = None\n resize_out = None\n image = np.float32(PIL.Image.open(args.image))\n if args.mask is not None:\n image_mask = PIL.Image.open(args.mask)\n if args.resize_in is not None:\n resize_in = tuple(args.resize_in)\n if args.resize_out is not None:\n resize_out = tuple(args.resize_out)\n\n mkdir_p(args.dest)\n\n render(image=image, start_from=args.start_from, dest=args.dest, stages=args.stages,\n max_generation=args.max_gen,\n resize_in=resize_in, resize_out=resize_out, image_mask=image_mask,\n verbose_save=args.save_all, num_rendered=args.num_rendered, test=args.test, gen_path=args.gen_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Sitin/neuromatriarchy","sub_path":"situations.py","file_name":"situations.py","file_ext":"py","file_size_in_byte":8428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"10524700444","text":"import io\nimport base64\nimport math\nfrom fastapi import FastAPI, Request, Form\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use('seaborn')\n\napp = FastAPI()\ntemplates = Jinja2Templates(directory='templates')\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n\n@app.get(\"/\")\nasync def root(request: Request) -> None:\n \"\"\"\n This function returns the index.html page\n \"\"\"\n return templates.TemplateResponse(\"index.html\", \n {\"request\" : request})\n\n@app.get(\"/solve\")\nasync def solve(a:int, b:int, c:int) -> dict:\n \"\"\"\n This function returns the roots of a quadratic equation\n params: a, b, c - coefficients of the quadratic equation\n returns: a dictionary with the roots of the quadratic equation\n \"\"\"\n if b**2 - 4*a*c < 0:\n return {\"roots\" : []}\n elif b**2 - 4*a*c == 0:\n return {\"roots\" : [-b/(2*a)]}\n else:\n return {\"roots\" : sorted([(-b + math.sqrt(b**2 - 4*a*c))/(2*a), (-b - math.sqrt(b**2 - 4*a*c))/(2*a)])}\n\n@app.post(\"/plot\")\nasync def plot(request:Request, a:str = Form(...), b:str = Form(...), c:str = Form(...)):\n \"\"\"\n This function returns the plot of a quadratic equation\n params: a, b, c - coefficients of the quadratic equation\n returns: a plot of the quadratic equation\n \"\"\"\n a = int(a)\n b = int(b)\n c = int(c)\n\n roots = await solve(a, b, c)\n\n fig = plt.figure()\n x = np.linspace(-10, 10, 100)\n y = a*x**2 + b*x + c\n plt.plot(x, y)\n plt.grid()\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Parabola of the quadratic equation')\n plt.axhline(y=0, color='k')\n plt.axvline(x=0, color='k')\n plt.savefig('plot.png')\n \n img = io.BytesIO()\n fig.savefig(img, format='png')\n img.seek(0)\n plot_url = base64.b64encode(img.getvalue()).decode()\n\n return templates.TemplateResponse(\"plot.html\",\n {\"request\" : request, 'a': a, 'b': b, 'c': c,\n \"roots\" : roots['roots'],\n \"picture\" : plot_url})","repo_name":"rustya5041/mds-data-scraping","sub_path":"webdev/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1833945284","text":"from flask_restful import Resource, reqparse, inputs\n\nfrom libs.strings import get_text\nfrom models.product import ProductModel\n\nclass Product(Resource):\n\tdef get(self, name):\n\t\tproduct = ProductModel.find_by_name(name)\n\n\t\tif not product:\n\t\t\treturn {\"message\": get_text(\"NOT_FOUND\").format(\"name\")}\n\n\t\treturn {\"Product\": product.json()}\n\n\nclass ProductRegister(Resource):\n\tparser = reqparse.RequestParser()\n\n\tparser.add_argument('name', type=str, required=True)\n\tparser.add_argument('price', type=float, required=True)\n\tparser.add_argument('description', type=str)\n\tparser.add_argument('genre', type=str, required=True)\n\tparser.add_argument('available', type=inputs.boolean)\n\n\tdef post(self):\n\t\tproduct_data = self.parser.parse_args()\n\n\t\ttry:\n\t\t\tproduct = ProductModel(**product_data)\n\t\texcept:\n\t\t\treturn {\"message\": get_text(\"ERROR_CREATING_PRODUCT\")}\n\n\t\tif product.price < 0:\n\t\t\treturn {\"message\": get_text(\"CANT_INPUT_NEGATIVE\")}\n\n\t\tif ProductModel.find_by_name(product.name):\n\t\t\treturn {\"message\": get_text(\"ALREADY_TAKEN\").format(product.name)}\n\n\t\ttry:\n\t\t\tproduct.save_to_database()\n\t\t\treturn {\"message\": get_text(\"CREATED_SUCCESSFULLY\").format(product.name)}\n\t\texcept:\n\t\t\treturn {\"message\": get_text(\"ERROR_ADDING\")}\n\nclass ProductModify(Resource):\n\tparser = reqparse.RequestParser()\n\n\tparser.add_argument('name', type=str, required=True)\n\tparser.add_argument('price', type=float)\n\tparser.add_argument('description', type=str)\n\tparser.add_argument('genre', type=str)\n\tparser.add_argument('available', type=inputs.boolean)\n\n\tdef put(self):\n\t\tproduct_data = self.parser.parse_args()\n\n\t\tproduct = ProductModel.find_by_name(product_data['name'])\n\n\t\tif product.price != product_data['price']:\n\t\t\tproduct.price = product_data['price']\n\n\t\tif product.available != product_data['available']:\n\t\t\tproduct.available = product_data['available']\n\n\t\tif product.genre != product_data['genre']:\n\t\t\tproduct.genre = product_data['genre']\n\n\t\tif product.description != product_data['description']:\n\t\t\tproduct.description = product_data['description']\n\n\t\ttry:\n\t\t\tproduct.save_to_database()\n\t\texcept:\n\t\t\treturn {\"message\": get_text(\"ERROR_ADDING\")}\n\n\t\treturn {\"message\": get_text(\"DONE_MODIFICATIONS\")}\n\n\nclass ProductGenre(Resource):\n\tdef get(self, genre):\n\t\tproducts = ProductModel.find_by_genre(genre)\n\n\t\tif not products:\n\t\t\treturn {\"message\": get_text(\"NOT_FOUND\").format(\"genre\")}\n\n\t\treturn {\"Products\": [product.json() for product in products]}\n\n\nclass ProductPrice(Resource):\n\tdef get(self, price):\n\t\tproducts = ProductModel.find_price_greater(price)\n\n\t\tif not products:\n\t\t\treturn {\"message\": get_text(\"NOT_FOUND\").format(\"price\")}\n\n\t\treturn {\"Products\": [product.json() for product in products]}\n\n\nclass ProductList(Resource):\n\tdef get(self):\n\t\treturn {\"Products\": [product.json() for product in ProductModel.query.all()]}","repo_name":"MuhammadTarek10/singlevendor-ecommerce","sub_path":"resources/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24666366698","text":"from bottle import (route, run, template, request, redirect)\nfrom scraputils import get_news\nfrom db import News, session\nfrom bayes import NaiveBayesClassifier\n\n\n@route(\"/news\")\ndef news_list():\n s = session()\n rows = s.query(News).filter(News.label == None).all()\n return template('news_template', rows=rows)\n\n\n@route(\"/add_label/\")\ndef add_label():\n s = session()\n label = request.query.label\n row_id = request.query.id\n row = s.query(News).filter(News.id == row_id).one()\n row.label = label\n s.commit()\n redirect(\"/news\")\n\n\n@route(\"/update\")\ndef update_news():\n s = session()\n last_news = get_news()\n for news in last_news:\n check = s.query(News).filter(News.author==news['author'], News.title==news['title']).count()\n if check == 0:\n new = News(title=news['title'], author=news['author'], url=news['url'],\n comments=news['comments'], points=news['points'])\n s.add(new)\n s.commit()\n redirect(\"/news\")\n\n\n@route(\"/classify\")\ndef classify_news():\n new_news_lable = s.query(News).filter(News.title not in x_train and News.label != None).all()\n x_train_new = [row.title for row in new_news_lable]\n y_train_new = [row.label for row in new_news_lable]\n classifier.fit(x_train_new, y_train_new)\n news_without_lable = s.query(News).filter(News.label == None).all()\n x = [row.title for row in news_without_lable]\n label = classifier.predict(x)\n classified_news = [[] for _ in range(3)]\n good = [news_without_lable[i] for i in\n range(len(news_without_lable)) if label[i] == 'good']\n maybe = [news_without_lable[i] for i in\n range(len(news_without_lable)) if label[i] == 'maybe']\n never = [news_without_lable[i] for i in\n range(len(news_without_lable)) if label[i] == 'never']\n\n return template('recommended', {'good': good, 'never': never, 'maybe': maybe})\n\n\nif __name__ == \"__main__\":\n s = session()\n classifier = NaiveBayesClassifier()\n marked_news = s.query(News).filter(News.label != None).all()\n x_train = [row.title for row in marked_news]\n y_train = [row.label for row in marked_news]\n classifier.fit(x_train, y_train)\n run(host=\"localhost\", port=8080)\n","repo_name":"SpaceOcean/cs102","sub_path":"homework06/hackernews.py","file_name":"hackernews.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36600263780","text":"\"\"\"\nSample Wise Transformer\n\"\"\"\n\nimport torch\nfrom commune.transformation.block.base import SequentialTransformPipeline\nfrom commune.transformation.block.torch import step_indexer, standard_variance, difference_transform\n\nfrom copy import deepcopy\n\nclass SampleTransformManager(object):\n def __init__(self,\n feature_group,\n process_pipeline_map=None):\n\n\n self.__dict__.update(locals())\n\n if not self.process_pipeline_map:\n self.build()\n def build(self):\n \"\"\"\n Build mapping for tensor key to transform pipeline\n \"\"\"\n process_pipeline_map = {}\n # price sample wise preprocessing for prices\n for key in self.feature_group['con']:\n process_pipeline_map[key] = \\\n {'pipeline': SequentialTransformPipeline(pipeline=[\n difference_transform(),\n standard_variance()\n \n ]),\n 'input_key': key}\n\n self.process_pipeline_map = process_pipeline_map\n\n def transform(self, x):\n\n \"\"\"\n\n :param x: dictionary of tensors\n :return: dictionary of tensors post transformed\n \"\"\"\n\n for key in x.keys():\n if key in self.process_pipeline_map:\n pipeline_state_dict = self.process_pipeline_map[key]\n input_key = pipeline_state_dict['input_key']\n pipeline_obj = pipeline_state_dict['pipeline']\n x[key] = pipeline_obj.transform(deepcopy(x[input_key]))\n return x\n\n def __call__(self, x):\n\n \"\"\"\n :param kwargs: dicitonary of inputs\n :return: dictionary of outputs\n \"\"\"\n\n with torch.no_grad():\n # prefix with gt_ if these keys are in predicted columns\n # we want to take the first initial input periods\n\n \n x = self.transform(x)\n\n return x\n\n\n","repo_name":"commune-ai/commune-ai","sub_path":"backend/commune/transformation/complete/regression/crypto/split/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"5194882312","text":"import pandas as pd\r\nimport numpy as np\r\nimport streamlit as st\r\nimport plotly.express as px\r\n\r\nimport json\r\nimport requests\r\n\r\n#Title\r\nst.markdown(\"

Marketing Dashboard

\", unsafe_allow_html=True)\r\nst.markdown(\"

This dashboard will help you get more information about the Marketing datasets and their output

\",unsafe_allow_html=True)\r\n\r\n#Background Image\r\nimport base64\r\ndef add_bg_from_local(image_file):\r\n with open(image_file,\"rb\") as image_file:\r\n encoded_string = base64.b64encode(image_file.read())\r\n st.markdown(\r\n f\"\"\"\r\n \"\"\",\r\n unsafe_allow_html=True\r\n )\r\nadd_bg_from_local('Background Image.png')\r\n \r\n#Separating orderbook and trade columns\r\ndf=pd.read_json('data.json')\r\n\r\nl0 = df.columns\r\nl1 = []\r\nl2 = []\r\n\r\ni=0\r\nfor i,x in enumerate (l0) :\r\n if 'orderbook' in x :\r\n l1.append(x)\r\n elif 'trade' in x:\r\n l2.append(x)\r\n i += 1\r\n\r\ndf1 = pd.DataFrame(l1)\r\ndf2 = pd.DataFrame(l2) \r\n\r\n# Functions for each of the pages\r\n#All types of charts\r\n\r\n#Adding dataframe in a button to show\r\n\r\nst.sidebar.title(\"Selecting Visual Charts\")\r\nst.sidebar.markdown(\"Selecting the Charts accordingly.\")\r\n\r\nst.sidebar.markdown(\"

Show Analysis Status

\", unsafe_allow_html=True)\r\nDataset = st.sidebar.checkbox(\"Show Status\", False, key = 1)\r\n\r\n\r\nif Dataset: \r\n st.write(df)\r\n\r\n#data = pd.read_json(r\"D:\\Excelr DataScience\\Deployment\\Marketing Dashboard\\data.json\")\r\nchart_visual = st.sidebar.selectbox('Select Visual Charts', ('Select Option','Pie Chart', 'Line Chart', 'Bar Chart'))\r\n \r\ndef interactive_plot1(l0):\r\n st.markdown(\"

Line Chart

\", unsafe_allow_html=True)\r\n col1, col2 = st.columns(2)\r\n x_axis_val = col1.selectbox('', options=df1)\r\n y_axis_val = col2.selectbox('', options=df2)\r\n \r\n plot = px.line(df, x=x_axis_val, y=y_axis_val)\r\n st.plotly_chart(plot)\r\n \r\ndef interactive_plot2(l0):\r\n st.markdown(\"

Bar Chart

\", unsafe_allow_html=True)\r\n col1, col2 = st.columns(2)\r\n \r\n x_axis_val = col1.selectbox('Select the X-axis', options=df1)\r\n y_axis_val = col2.selectbox('Select the Y-axis', options=df2)\r\n\r\n plot = px.bar(df, x=x_axis_val, y=y_axis_val)\r\n st.plotly_chart(plot)\r\n \r\ndef interactive_plot3(l0):\r\n st.markdown(\"

Pie Chart

\", unsafe_allow_html=True)\r\n col1, col2 = st.columns(2)\r\n \r\n x = col1.selectbox('Select the X-axis', options=df1)\r\n y = col2.selectbox('Select the Y-axis', options=df2)\r\n\r\n plot = px.pie(df, x, y)\r\n st.plotly_chart(plot)\r\n \r\n#Sidebar navigation\r\n#st.sidebar.title('Navigation')\r\n#option=st.sidebar.radio('Select what you want to display:',['Line Chart','Bar Chart','Area Chart','Scatter Chart'])\r\n\r\n# Navigation options\r\nif chart_visual == 'Line Chart':\r\n interactive_plot1(df)\r\nelif chart_visual == 'Bar Chart':\r\n interactive_plot2(df)\r\nelif chart_visual == 'Pie Chart':\r\n interactive_plot3(df)\r\n\r\n\r\n","repo_name":"Ssamg25/Marketing-Dashboard","sub_path":"MarketingDashboard.py","file_name":"MarketingDashboard.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"9903428809","text":"#!/usr/bin/env python3\n\nimport json\nimport subprocess\n\n\n\ndef get_tors_tags():\n \"\"\" return json of minimal client state\"\"\"\n fields = ['hash', 'custom_getter', 'custom_1' ]\n rcmd = [\"rtcontrol\", '-q', '--json', '*', '-o' + ','.join(fields)]\n cout = subprocess.check_output(rcmd)\n return json.loads(cout)\n\n\nneedstag = {}\n\nfor t in get_tors_tags():\n if t['custom_getter']:\n if t['custom_getter'] != t['custom_1']:\n if t['custom_1']:\n print(t['custom_getter'],\"will override\",t['custom_1'])\n if t['custom_getter'] not in needstag:\n needstag[t['custom_getter']] = []\n needstag[t['custom_getter']].append(t['hash'])\n\n\nfor tag in needstag.keys():\n print(tag, len(needstag[tag]))\n # Just go all in\n rcmd = ['rtcontrol', '-qohash', 'custom_getter='+tag, '--custom=1='+tag]\n subprocess.check_output(rcmd)\n","repo_name":"nayfield/hottip","sub_path":"convert_g_to_1.py","file_name":"convert_g_to_1.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13941040800","text":"from django.urls import path, include\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('naviguer/', views.naviguer_entre_pokemon, name=\"naviguer_entre_pokemon\"),\n path('list_equipe/', views.equipe, name=\"list_equipes\"),\n path('view_equipe/', views.equipe_details, name=\"equipe_details\"),\n path('create_equipe/', views.create_equipe, name=\"create_equipe\"),\n path('update_equipe/', views.update_equipe, name=\"update_equipe\"),\n path('delete_equipe/', views.delete_equipe, name=\"delete_equipe\"),\n\n]\n","repo_name":"Cahuete0512/emi_pokedex","sub_path":"EMI_Poke_app/Project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40082358489","text":"from collections import defaultdict\n\nclass Solution:\n def checkRecord(self, s: str) -> bool:\n\n m = defaultdict(list)\n for i, c in enumerate(s, start=1):\n if c == 'L' and s[i-1:i+2].count('L') > 2:\n return False\n m[c] += 1\n\n if m['A'] > 1:\n return False\n return True\n\n\ntest = 'LPLLPALLPLL'\nprint(Solution().checkRecord(test))\n","repo_name":"sir-wiggles/leetcode","sub_path":"student-attendance-record-i.py","file_name":"student-attendance-record-i.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40837976884","text":"# -*- coding: utf-8 -*-\n\n\nclass AppError(Exception):\n def __init__(self, code, message, status_code):\n self.code = code\n self.message = message\n self.status_code = status_code\n\n def __str__(self):\n message = self.message\n if isinstance(message, unicode):\n message = message.encode('utf-8')\n return '<%d %s>' % (self.code, message)\n\n\nclass ServerError(AppError):\n def __init__(self, code, message, status_code=500):\n super(ServerError, self).__init__(code, message, status_code)\n\n\nclass ClientError(AppError):\n def __init__(self, code, message, status_code=400):\n super(ClientError, self).__init__(code, message, status_code)\n\n\n# 全局\nErrArgs = ClientError(10001, u'参数错误')\nErrSystem = ServerError(10002, u'系统错误')\nErrDataBase = ServerError(10003, u'数据库错误')\nErrRedisOpt = ServerError(10004, u'Redis操作失败')\n\n# oss_analysis\nErrNoModel = ClientError(20001, u'此id在数据库中不存在', status_code=404)\nErrOrderType = ClientError(20002, u'此订单类型不支持')\nErrCountType = ClientError(20003, u'此统计类型不支持')\n\n# web_admin\nErrUploadImage = ServerError(30001, u'图片上传失败')\nErrUploadAppPackage = ServerError(30002, u'app上传失败')\n","repo_name":"jacknjzhou/flask_proj","sub_path":"utils/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"39586513483","text":"from django.shortcuts import render\nfrom home.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom client.models import Area, Vehicles\n\n# Create your views here.\ndef index(request):\n context = {}\n if not request.user.is_authenticated:\n return render(request, 'hometemp/login.html', context)\n else:\n return render(request, 'hometemp/signin.html', context)\n \ndef search(request):\n context = {}\n return render(request, 'clienttemp/search.html', context)\n\ndef searchResults(request):\n city = request.POST['city']\n city = city.lower()\n vehicleList = []\n areas = Area.objects.get(city=city)\n for area in areas:\n vehicles = Vehicles.objects.filter(area=area)\n for vehicle in vehicles:\n if vehicle.is_available == True:\n vehicleDict = {'name': vehicle.vehiclename, 'model': vehicle.vehiclemodel, 'id': vehicle.id, 'capacity': vehicle.vehiclecapacity, 'description': vehicle.vehicledescription}\n vehicleList.append(vehicleDict)\n request.session['vehiclelist'] = vehicleList\n return render(request, 'clienttemp/searchresults.html')","repo_name":"D3stinn3/RentAV","sub_path":"client/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"38794712064","text":"import os.path\n\nbase_path = os.path.split(os.path.abspath(__file__))[0]\n\nbind = '0.0.0.0:9600'\nworkers = 1\nkeepalive = 1\n#user = 'minestatus'\nerrorlog = os.path.join(base_path, 'log/error.log')\nloglevel = 'warning'\nproc_name = 'minecheck gunicorn'\n","repo_name":"Dav1dde/minecheck","sub_path":"gunicorn.config.py","file_name":"gunicorn.config.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"3418290642","text":"from grid import *\nimport heapq\nimport time\nimport numpy as np\n\nopen_list = []\nclose_list = []\n\ndef basic_link(grid):\n start = time.time()\n open_list.clear()\n close_list.clear()\n InitializeVertex(grid, grid.start)\n heapq.heappush(open_list, grid.start)\n while len(open_list) > 0:\n s = heapq.heappop(open_list)\n if s == grid.goal:\n end = time.time()\n path = []\n current = s\n while current is not grid.start:\n path.append(current)\n current = current.parent\n path.append(grid.start)\n t = turtle.Turtle()\n grid.draw_grid(t)\n draw_path(path, t, (grid.rows-1)*10)\n print(\"Time to complete:\", end - start)\n return path\n close_list.append(s)\n for t in grid.neighbors_theta(s):\n if t not in close_list:\n if t not in open_list:\n InitializeVertex(grid, t)\n if grid.lineofsight(s.parent, t):\n ChoosePath1(grid, s, t)\n else:\n ChoosePath2(grid, s, t)\n print(\"No path found\")\n\ndef InitializeVertex(grid, s):\n if s == grid.start:\n s.parent = s\n s.f = 0\n s.g = 0\n else:\n s.parent = None\n s.f = float(\"inf\")\n s.g = float(\"inf\")\n\ndef ChoosePath1(grid, s, t):\n temp_f = s.parent.f + Theta(grid.goal, s.parent, t)\n if temp_f < t.f:\n t.parent = s.parent\n t.f = temp_f\n t.g = temp_f\n if t not in open_list:\n heapq.heappush(open_list, t)\n heapq.heapify(open_list)\n\ndef ChoosePath2(grid, s, t):\n temp_f = s.f + Theta(grid.goal, s, t)\n if temp_f < t.f:\n t.parent = s\n t.f = temp_f\n t.g = temp_f\n if t not in open_list:\n heapq.heappush(open_list, t)\n heapq.heapify(open_list)\n\ndef Theta(B, A, C):\n if A.x == B.x and A.y == B.y or B.x == C.x and B.y== C.y:\n return 0\n else:\n a = np.array([A.x,A.y])\n b = np.array([B.x,B.y])\n c = np.array([C.x,C.y])\n ba = a-b\n bc = c-b\n return np.arccos((np.dot(ba, bc))/(np.linalg.norm(ba) * np.linalg.norm(bc)))\n\n\n\ndef UpdateVertex(grid, s, t):\n g_old = t.g\n ComputeCost(grid,s,t)\n if t.g < g_old:\n if t in open_list:\n open_list.remove(t)\n heapq.heapify(open_list)\n heapq.heappush(open_list, t)\n\ndef ComputeCost(grid, s, t):\n if grid.lineofsight(s.parent, t):\n if s.parent.g + c(s.parent, t) < t.g:\n t.parent = s.parent\n t.g = s.parent.g + c(s.parent, t)\n t.f = s.parent.f + c(s.parent, t)\n else:\n if s.g + c(s, t) < t.g:\n t.parent = s\n","repo_name":"cjohnson19/anyanglesearch","sub_path":"src/basic_link.py","file_name":"basic_link.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"72509235862","text":"import torch\nimport os\nimport json\nimport argparse\nimport numpy as np\nfrom metavision_ml.detection.lightning_model import LightningDetectionModel\nfrom metavision_ml.data.cd_processor_iterator import CDProcessorIterator\nfrom metavision_ml.detection_tracking.object_detector import ObjectDetector\nfrom metavision_core.event_io.events_iterator import EventsIterator\n\n\ndef run_all_tests(checkpoint_path, jit_directory, sequence_raw_filename=None):\n nn_filename = os.path.join(jit_directory, 'model.ptjit')\n testcase_torch_jit_reset(nn_filename)\n testcase_forward_network_with_and_without_box_decoding_cpu(nn_filename)\n testcase_forward_network_with_and_without_box_decoding_gpu_if_available(nn_filename)\n\n if checkpoint_path != None:\n testcase_compare_ckpt_vs_jit(ckpt_filename=checkpoint_path, jit_directory=jit_directory,\n sequence_raw_filename=sequence_raw_filename)\n\n\ndef get_sizes(nn_filename):\n json_filename = os.path.join(os.path.dirname(nn_filename), \"info_ssd_jit.json\")\n with open(json_filename) as f:\n dic = json.load(f)\n return dic['in_channels'], dic['num_classes']\n\n\ndef testcase_torch_jit_reset(nn_filename, height=120, width=160):\n \"\"\"\n Tests the fact that function reset_all() works properly:\n * memory cell and activations are indeed set to zero\n * when providing the same input tensor to the network, the outputs we obtain from the first propagation following\n a reset should be identical to the outputs we obtain from the first propagation after loading the model\n \"\"\"\n print(\"Test the 'reset' function of the model ...\")\n assert os.path.isfile(nn_filename)\n\n device = torch.device(\"cpu\")\n\n model = torch.jit.load(nn_filename, map_location=device)\n model.reset_all()\n model.eval()\n\n in_channels, num_classes = get_sizes(nn_filename)\n\n T, N, C = 1, 1, in_channels\n x = torch.rand(T, N, C, height, width).to(device)\n\n for name, module in model.named_modules():\n if hasattr(module, \"prev_c\"):\n assert (module.prev_c == 0).all().item()\n assert (module.prev_h == 0).all().item()\n\n with torch.no_grad():\n # first propagation\n y1 = model.forward_network_without_box_decoding(x)\n assert type(y1) is list\n assert len(y1) == 2\n cls1 = y1[1]\n assert cls1.dim() == 3\n assert cls1.shape[0] == 1\n assert cls1.shape[2] == num_classes\n for name, module in model.named_modules():\n if hasattr(module, \"prev_c\"):\n assert not (module.prev_c == 0).all().item()\n assert not (module.prev_h == 0).all().item()\n\n # second propagation (without reset)\n y2 = model.forward_network_without_box_decoding(x)\n cls2 = y2[1]\n assert not (cls1 == cls2).all().item()\n for name, module in model.named_modules():\n if hasattr(module, \"prev_c\"):\n assert not (module.prev_c == 0).all().item()\n assert not (module.prev_h == 0).all().item()\n\n # multiple propagations (with reset)\n cls_prev = cls1 \n for _iter in range(10):\n model.reset_all()\n for name, module in model.named_modules():\n if hasattr(module, \"prev_c\"):\n assert (module.prev_c == 0).all().item()\n assert (module.prev_h == 0).all().item()\n y3 = model.forward_network_without_box_decoding(x)\n cls3 = y3[1]\n # for some reason this is not exactly zero at the first iteration\n assert (cls_prev - cls3).abs().max().item() < 1e-5\n for name, module in model.named_modules():\n if hasattr(module, \"prev_c\"):\n assert not (module.prev_c == 0).all().item()\n assert not (module.prev_h == 0).all().item()\n cls_prev = cls3\n \n\ndef testcase_forward_network_with_and_without_box_decoding_cpu(nn_filename, height=120, width=160):\n \"\"\"\n Checks forward() is working on torch.jit model on the CPU, with and without torch.no_grad()\n \"\"\"\n print('Test forward propagation on CPU ...')\n device = torch.device(\"cpu\")\n\n assert os.path.isfile(nn_filename)\n model = torch.jit.load(nn_filename).to(device)\n model.reset_all()\n model.eval()\n\n in_channels, num_classes = get_sizes(nn_filename)\n T, N, C = 1, 1, in_channels\n x = 0.05 * torch.arange(T*N*C*height*width).reshape(T, N, C, height, width)\n\n model.forward_network_without_box_decoding(x)\n model.forward(x, 0.5)\n\n with torch.no_grad():\n y1 = model.forward_network_without_box_decoding(x)\n y1 = model(x, 0.5)\n\n\ndef testcase_forward_network_with_and_without_box_decoding_gpu_if_available(nn_filename, height=120, width=160):\n \"\"\"\n Checks forward() is working on torch.jit model on the GPU, with and without torch.no_grad()\n\n If no GPU is available, the test is skipped\n \"\"\"\n print('Test forward propagation on GPU ...')\n if not torch.cuda.is_available():\n return\n\n device = torch.device(\"cuda\")\n\n assert os.path.isfile(nn_filename)\n model = torch.jit.load(nn_filename).to(device)\n model.reset_all()\n model.eval()\n\n in_channels, num_classes = get_sizes(nn_filename)\n T, N, C = 1, 1, in_channels\n x = 0.05 * torch.arange(T*N*C * height * width, device=device).reshape(T, N, C, height, width)\n\n model.forward_network_without_box_decoding(x)\n model.forward(x, 0.5)\n\n with torch.no_grad():\n y1 = model.forward_network_without_box_decoding(x)\n y1 = model(x, 0.5)\n\n\ndef testcase_compare_ckpt_vs_jit(ckpt_filename, jit_directory, height=120, width=160, sequence_raw_filename=None):\n print(\"Test if the PyTorch model and Torchjit model have consistent setting, and test if two models produce \"\n \"the same result when input sequence is provided ...\")\n assert os.path.isfile(ckpt_filename)\n assert os.path.isdir(jit_directory)\n checkpoint = torch.load(ckpt_filename, map_location=torch.device('cpu'))\n hparams = argparse.Namespace(**checkpoint['hyper_parameters'])\n lightning_model = LightningDetectionModel(hparams)\n lightning_model.load_state_dict(checkpoint['state_dict'])\n\n in_channels = lightning_model.hparams[\"in_channels\"]\n assert \"background\" not in lightning_model.hparams[\"classes\"]\n num_classes = len(lightning_model.hparams[\"classes\"]) + 1 # including the background class\n\n jit_filename = os.path.join(jit_directory, \"model.ptjit\")\n assert os.path.isfile(jit_filename)\n jit_json_filename = os.path.join(jit_directory, \"info_ssd_jit.json\")\n jit_json = json.load(open(jit_json_filename, \"r\"))\n\n assert jit_json[\"in_channels\"] == in_channels\n assert jit_json[\"num_classes\"] == num_classes\n\n detector_ckpt = lightning_model.detector\n detector_ckpt.reset_all()\n detector_ckpt.eval()\n\n detector_jit = torch.jit.load(jit_filename)\n detector_jit.reset_all()\n detector_jit.eval()\n\n device = torch.device(\"cpu\")\n T, N, C = 1, 1, in_channels\n\n # check propagation with random inputs gives the same results\n with torch.no_grad():\n for _ in range(30):\n x = torch.rand(T, N, C, height, width)\n\n loc_ckpt, prob_ckpt = detector_ckpt.forward(x)\n loc_jit, prob_jit = detector_jit.forward_network_without_box_decoding(x)\n\n assert loc_ckpt.shape == loc_jit.shape\n assert (torch.abs(loc_ckpt - loc_jit) < 1e-5).all(), torch.abs(loc_ckpt - loc_jit).max()\n\n assert prob_ckpt.shape == prob_jit.shape\n assert (torch.abs(prob_ckpt - prob_jit) < 1e-5).all(), (torch.abs(prob_ckpt - prob_jit)).max()\n\n detector_ckpt.reset_all()\n detector_ckpt.eval()\n detector_jit.reset_all()\n detector_jit.eval()\n\n delta_t = lightning_model.hparams.get(\"delta_t\",0)\n assert jit_json[\"delta_t\"] == delta_t\n preproc_name = lightning_model.hparams.get(\"preprocess\",'none')\n assert preproc_name.startswith(jit_json[\"preprocessing_name\"])\n\n if not sequence_raw_filename:\n return\n\n # check boxes prediction are the same\n cdproc_iterator = CDProcessorIterator(path=sequence_raw_filename,\n preprocess_function_name=preproc_name, delta_t=delta_t)\n\n tensors_from_cd_proc_iterator = []\n with torch.no_grad():\n for i, x in enumerate(cdproc_iterator):\n if i >= 20:\n break\n tensors_from_cd_proc_iterator.append(x.clone())\n res_ckpt = detector_ckpt.get_boxes(x[None], score_thresh=0.4, nms_thresh=1.)\n assert len(res_ckpt) == 1\n assert len(res_ckpt[0]) == 1\n res_ckpt = res_ckpt[0][0]\n nb_det_ckpt = 0 if res_ckpt[\"boxes\"] is None else len(res_ckpt[\"boxes\"])\n\n res_jit = detector_jit.forward(x[None], score_thresh=0.4)\n assert len(res_jit) == 1\n assert len(res_jit[0]) == 1\n res_jit = res_jit[0][0]\n assert nb_det_ckpt == len(res_jit)\n\n if nb_det_ckpt > 0:\n idxs_ckpt = res_ckpt[\"scores\"].sort()[1]\n idxs_jit = res_jit[:, 4].sort()[1]\n\n scores_ckpt = res_ckpt[\"scores\"][idxs_ckpt]\n scores_jit = res_jit[idxs_jit, 4]\n assert (torch.abs(scores_ckpt - scores_jit) < 1e-6).all()\n\n classes_ckpt = res_ckpt[\"labels\"][idxs_ckpt]\n classes_jit = res_jit[idxs_jit, 5]\n assert (classes_ckpt == classes_jit).all()\n\n boxes_ckpt = res_ckpt[\"boxes\"][idxs_ckpt]\n boxes_jit = res_jit[idxs_jit, :4]\n assert (torch.abs(boxes_ckpt - boxes_jit) < 1e-6).all()\n tensors_from_cd_proc_iterator = torch.cat(tensors_from_cd_proc_iterator, dim=0).numpy()\n\n # check CD Processing is the same\n events_iterator = EventsIterator(input_path=sequence_raw_filename, delta_t=delta_t, mode=\"delta_t\")\n ev_height, ev_width = events_iterator.get_size()\n object_detector = ObjectDetector(\n directory=jit_directory, events_input_width=ev_width, events_input_height=ev_height)\n assert object_detector.get_accumulation_time() == delta_t\n cd_proc = object_detector.get_cd_processor()\n input_tensor = cd_proc.init_output_tensor()\n assert input_tensor.shape == (C, ev_height, ev_width)\n ts = 0\n for i, events in enumerate(events_iterator):\n if i >= 20:\n break\n input_tensor.fill(0.)\n cd_proc.process_events(cur_frame_start_ts=ts, events_np=events, frame_tensor_np=input_tensor)\n diff_tensors = np.abs(input_tensor - tensors_from_cd_proc_iterator[i])\n assert abs(tensors_from_cd_proc_iterator[i].min() - input_tensor.min()) < 1e-6\n\n assert abs(tensors_from_cd_proc_iterator[i].max() - input_tensor.max()) < 0.01\n assert abs(tensors_from_cd_proc_iterator[i].mean(\n ) - input_tensor.mean()) < 1e-4, \"{} vs {}\".format(tensors_from_cd_proc_iterator[i].mean(), input_tensor.mean())\n\n assert (np.abs(tensors_from_cd_proc_iterator[i] - input_tensor) < 0.01).all()\n ts += delta_t\n","repo_name":"charles-baxter-uml/Neuromorphic-Vision-Capstone","sub_path":"CPPMetavionSDK/dep/Prophesee/lib/python3/site-packages/metavision_ml/detection/jitting_test.py","file_name":"jitting_test.py","file_ext":"py","file_size_in_byte":11137,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"35264498552","text":"import logging\nimport re\nfrom datetime import date\nfrom hashlib import sha256\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom git import Repo, TagReference\nfrom git.objects import Commit\n\nimport auto_changelog\nfrom auto_changelog.domain_model import Changelog, RepositoryInterface, default_tag_pattern\n\n\nclass GitRepository(RepositoryInterface): # pylint: disable=too-few-public-methods\n def __init__( # pylint: disable=too-many-arguments\n self,\n repository_path,\n latest_version: Optional[str] = None,\n skip_unreleased: bool = True,\n tag_prefix: str = \"\",\n tag_pattern: Optional[str] = None,\n ):\n self.repository = Repo(repository_path)\n self.tag_prefix = tag_prefix\n self.tag_pattern = tag_pattern\n self.commit_tags_index = self._init_commit_tags_index(self.repository, self.tag_prefix, self.tag_pattern)\n # in case of defined latest version, unreleased is used as latest release\n self._skip_unreleased = skip_unreleased and not bool(latest_version)\n self._latest_version = latest_version or None\n\n def generate_changelog( # pylint: disable=too-many-arguments,too-many-locals\n self,\n title: str = \"Changelog\",\n description: str = \"\",\n remote: str = \"origin\",\n issue_pattern: Optional[str] = None,\n issue_url: Optional[str] = None,\n diff_url: Optional[str] = None,\n starting_commit: str = \"\",\n stopping_commit: str = \"HEAD\",\n ) -> Changelog:\n locallogger = logging.getLogger(\"repository.generate_changelog\")\n issue_url = issue_url or self._issue_from_git_remote_url(remote)\n diff_url = diff_url or self._diff_from_git_remote_url(remote)\n changelog = Changelog(title, description, issue_pattern, issue_url, self.tag_prefix, self.tag_pattern)\n if self._repository_is_empty():\n locallogger.info(\"Repository is empty.\")\n return changelog\n iter_rev = self._get_iter_rev(starting_commit, stopping_commit)\n commits = self.repository.iter_commits(\n iter_rev, topo_order=True\n ) # Fixes this bug: https://github.com/KeNaCo/auto-changelog/issues/112\n # Some thoughts here\n # First we need to check if all commits are \"released\". If not, we have to create our special \"Unreleased\"\n # release. Then we simply iter over all commits, assign them to current release or create new if we find it.\n first_commit = True\n skip = self._skip_unreleased\n locallogger.debug(\"Start iterating commits\")\n for commit in commits:\n sha = commit.hexsha[0:7]\n locallogger.debug(\"Found commit %s\", sha)\n\n if skip and commit not in self.commit_tags_index:\n locallogger.debug(\"Skipping unreleased commit %s\", sha)\n continue\n skip = False\n\n if first_commit and commit not in self.commit_tags_index:\n # if no last version specified by the user => consider HEAD\n if not self._latest_version:\n locallogger.debug(\"Adding release 'unreleased'\")\n changelog.add_release(\"Unreleased\", sha, date.today(), sha256())\n else:\n locallogger.debug(\"Adding release '%s'\", self._latest_version)\n changelog.add_release(self._latest_version, self._latest_version, date.today(), sha256())\n first_commit = False\n\n if commit in self.commit_tags_index:\n release_attributes = self._extract_release_args(commit, self.commit_tags_index[commit])\n locallogger.debug(\"Adding release '%s' with attributes %s\", release_attributes[0], release_attributes)\n changelog.add_release(*release_attributes)\n\n note_attributes = self._extract_note_args(commit)\n locallogger.debug(\"Adding commit %s with attributes %s\", sha, note_attributes)\n changelog.add_note(*note_attributes)\n\n # create the compare url for each release\n releases = changelog.releases\n # we are using len(changelog.releases) - 1 because there is not compare url for the oldest version\n if diff_url is not None: # if links are off\n for release_index in reversed(range(len(changelog.releases) - 1)):\n releases[release_index].set_compare_url(diff_url, releases[release_index + 1].title)\n\n # Close the link to the repository\n # If we are not closing it, some references are not cleaned on windows\n self.repository.close()\n\n return changelog\n\n def _issue_from_git_remote_url(self, remote: str) -> Optional[str]:\n \"\"\"Creates issue url with {id} format key\"\"\"\n try:\n url = self._remote_url(remote)\n return auto_changelog.default_issue_url.format(base_url=url)\n except ValueError as e:\n logging.error(\"%s. Turning off issue links.\", e)\n return None\n\n def _diff_from_git_remote_url(self, remote: str):\n try:\n url = self._remote_url(remote)\n return auto_changelog.default_diff_url.format(base_url=url)\n except ValueError as e:\n logging.error(\"%s. Turning off compare url links.\", e)\n return None\n\n def _remote_url(self, remote: str) -> str:\n \"\"\"Extract remote url from remote url\"\"\"\n url = self._get_git_url(remote=remote)\n url = GitRepository._sanitize_remote_url(url)\n return url\n\n @staticmethod\n def _sanitize_remote_url(remote: str) -> str:\n # 'git@github.com:Michael-F-Bryan/auto-changelog.git' -> 'https://github.com/Michael-F-Bryan/auto-changelog'\n # 'https://github.com/Michael-F-Bryan/auto-changelog.git' -> 'https://github.com/Michael-F-Bryan/auto-changelog'\n return re.sub(r\"^(https|git|ssh)(:\\/\\/|@)(.*@)?([^\\/:]+)[\\/:]([^\\/:]+)\\/(.+).git$\", r\"https://\\4/\\5/\\6\", remote)\n\n # This part is hard to mock, separate method is nice approach how to overcome this problem\n def _get_git_url(self, remote: str) -> str:\n remote_config = self.repository.remote(name=remote).config_reader\n # remote url can be in one of this three options\n # Test is the option exits before access it, otherwise the program crashes\n if remote_config.has_option(\"url\"):\n return remote_config.get(\"url\")\n elif remote_config.has_option(\"pushurl\"):\n return remote_config.get(\"pushurl\")\n elif remote_config.has_option(\"pullurl\"):\n return remote_config.get(\"pullurl\")\n else:\n return \"\"\n\n def _get_iter_rev(self, starting_commit: str, stopping_commit: str):\n if starting_commit:\n c = self.repository.commit(starting_commit)\n if not c.parents:\n # starting_commit is initial commit,\n # treat as default\n starting_commit = \"\"\n else:\n # iter_commits iters from the first rev to the second rev,\n # but not contains the second rev.\n # Here we set the second rev to its previous one then the\n # second rev would be included.\n starting_commit = f\"{starting_commit}~1\"\n\n iter_rev = f\"{stopping_commit}...{starting_commit}\" if starting_commit else stopping_commit\n return iter_rev\n\n def _repository_is_empty(self):\n return not bool(self.repository.references)\n\n @staticmethod\n def _init_commit_tags_index(\n repo: Repo, tag_prefix: str, tag_pattern: Optional[str] = None\n ) -> Dict[Commit, List[TagReference]]:\n \"\"\"Create reverse index\"\"\"\n reverse_tag_index: Dict[Commit, List[TagReference]] = {}\n semver_regex = default_tag_pattern\n for tagref in repo.tags:\n tag_name = tagref.name\n commit = tagref.commit\n\n consider_tag = False\n\n # consider & remove the prefix if we found one\n if tag_name.startswith(tag_prefix):\n tag_name = tag_name.replace(tag_prefix, \"\")\n\n # if user specified a tag pattern => consider it\n if tag_pattern is not None:\n if re.fullmatch(tag_pattern, tag_name):\n consider_tag = True\n # no tag pattern specified by user => check semver semantic\n elif re.fullmatch(semver_regex, tag_name):\n consider_tag = True\n\n # good format of the tag => consider it\n if consider_tag:\n if commit not in reverse_tag_index:\n reverse_tag_index[commit] = []\n reverse_tag_index[commit].append(tagref)\n return reverse_tag_index\n\n @staticmethod\n def _extract_release_args(commit, tags) -> Tuple[str, str, Any, Any]:\n \"\"\"Extracts arguments for release\"\"\"\n title = \", \".join(map(lambda tag: f\"{tag.name}\", tags))\n date_ = commit.authored_datetime.date()\n sha = commit.hexsha\n\n # TODO parse message, be carefull about commit message and tags message\n\n return title, title, date_, sha\n\n @staticmethod\n def _extract_note_args(commit) -> Tuple[str, str, str, str, str, str]:\n \"\"\"Extracts arguments for release Note from commit\"\"\"\n sha = commit.hexsha\n message = commit.message\n type_, scope, description, body, footer = GitRepository._parse_conventional_commit(message)\n return sha, type_, description, scope, body, footer\n\n @staticmethod\n def _parse_conventional_commit(message: str) -> Tuple[str, str, str, str, str]:\n type_ = scope = description = body_footer = body = footer = \"\"\n # TODO this is less restrictive version of re. I have somewhere more restrictive one, maybe as option?\n match = re.match(r\"^(\\w+)(\\(\\w+\\))?!?: (.*)(\\n\\n[\\w\\W]*)?$\", message.strip())\n if match:\n type_, scope, description, body_footer = match.groups(default=\"\")\n else:\n locallogger = logging.getLogger(\"repository._parse_conventional_commit\")\n locallogger.debug(\"Commit message did not match expected pattern: %s\", message)\n if scope:\n scope = scope[1:-1]\n if body_footer:\n bf_match = re.match(r\"^(\\n\\n[\\w\\W]+?)?(\\n\\n([a-zA-Z-]+|BREAKING[- ]CHANGE)(: | #)[\\w\\W]+)$\", body_footer)\n if bf_match:\n result = bf_match.groups(default=\"\")\n body = result[0][2:]\n footer = result[1][2:]\n else:\n body = body_footer[2:]\n return type_, scope, description, body, footer\n","repo_name":"KeNaCo/auto-changelog","sub_path":"auto_changelog/repository.py","file_name":"repository.py","file_ext":"py","file_size_in_byte":10659,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"12"} +{"seq_id":"36312215889","text":"# 백준 10845번, 큐\nimport sys\nfrom collections import deque\nn = int(sys.stdin.readline())\ntotal_queue = deque([])\nresult = []\nfor i in range(n):\n inputs = list(map(str,sys.stdin.readline().rstrip().split()))\n if (inputs[0] == \"push\"):\n total_queue.append(inputs[1])\n elif (inputs[0] == \"pop\"):\n if len(total_queue) == 0:\n result.append(-1)\n else:\n result.append(total_queue.popleft())\n elif (inputs[0] == \"size\"):\n result.append(len(total_queue))\n elif (inputs[0]== \"empty\"):\n if len(total_queue) == 0:\n result.append(1)\n else:\n result.append(0)\n elif (inputs[0]== \"front\"):\n if len(total_queue) == 0:\n result.append(-1)\n else:\n result.append(total_queue[0])\n elif (inputs[0] == \"back\"):\n if len(total_queue) == 0:\n result.append(-1)\n else:\n result.append(total_queue[-1])\nfor i in result:\n print(i)","repo_name":"heejunns/algorithm","sub_path":"백준 문제풀이/BOJ_10845.py","file_name":"BOJ_10845.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18366612747","text":"import json\r\nimport numpy as np\r\n# Water Temp: 28\r\n# Lambda = 0.1 {'cost': 1130.5307547912814, 'water_use': 1014.4913956197322, 'health_cost': 1160.3935917154927, 'g_l': 2, 'g_m': 20, 'g_h': 130}\r\n# Lambda = 1.0 {'cost': 1203.040897787921, 'water_use': 1198.3194266608537, 'health_cost': 4.721471127067363, 'g_l': 4, 'g_m': 20, 'g_h': 158}\r\n# Lambda = 10.0 {'cost': 1209.8972370594106, 'water_use': 1208.5131999999896, 'health_cost': 0.13840370594209736, 'g_l': 4, 'g_m': 22, 'g_h': 156}\r\n# Water Temp: 29\r\n# Lambda = 0.1\r\n# Lambda = 1.0 {'cost': 1270.7178261033218, 'water_use': 1266.8580000000647, 'health_cost': 3.859826103257095, 'g_l': 18, 'g_m': 30, 'g_h': 130}\r\n# Lambda = 10.0 {'cost': 1276.3194852495699, 'water_use': 1274.4623980976244, 'health_cost': 0.18570871519455145, 'g_l': 21, 'g_m': 26, 'g_h': 136}\r\n# Lambda = 100.0 {'cost': 1212.7778829323108, 'water_use': 1210.2642266609591, 'health_cost': 0.025136562713516993, 'g_l': 4, 'g_m': 20, 'g_h': 160}\r\ndef find_costs_from_json(data, il, im, ih, lambda_water_quality=1):\r\n time_step_seconds = 360\r\n time_adj = 60 * 60 / time_step_seconds\r\n sel_vals = []\r\n sum_dists = 0\r\n for d in data:\r\n l = d[\"gamma_l\"]\r\n m = d[\"gamma_m\"]\r\n h = d[\"gamma_h\"]\r\n dist = ((l-il)**2 + (m-im)**2 + (h-ih)**2)**0.5\r\n if dist <= 3:\r\n sel_vals.append({\"dist\": dist, \"water_use\": d[\"water use\"], \"health_cost\": d[\"health_cost\"] / time_adj})\r\n\r\n if len(sel_vals) == 0:\r\n return 0, 0, 0\r\n\r\n water_use = 0\r\n health_cost = 0\r\n sum_score = 0\r\n\r\n for d in sel_vals:\r\n score = np.exp(-2 * d[\"dist\"]**2)\r\n water_use += d[\"water_use\"] * score\r\n health_cost += d[\"health_cost\"] * score\r\n sum_score += score\r\n\r\n water_use /= sum_score\r\n health_cost /= sum_score\r\n\r\n return water_use, health_cost, water_use + lambda_water_quality * health_cost\r\n\r\n\r\nmin_l = 2\r\nmax_l = 22\r\n\r\nmin_m = 20\r\nmax_m = 82\r\n\r\nmin_h = 100\r\nmax_h = 182\r\n\r\nlambda_water_quality = 10.0\r\n\r\nmin_cost = 10000\r\nselected_params = []\r\nwith open(\"data_T28C_20220807.json\", \"r\") as i:\r\n data = json.load(i)\r\n for g_l in np.arange(start=min_l, stop=max_l, step=1):\r\n print('start for g_l:{}'.format(g_l))\r\n for g_m in np.arange(start=min_m, stop=max_m, step=1):\r\n print('start for g_l:{}'.format(g_m))\r\n costs_hist = []\r\n for g_h in np.arange(start=min_h, stop=max_h, step=1):\r\n if g_l < g_m < g_h:\r\n water_use, health_cost, cost = find_costs_from_json(data, g_l, g_m, g_h, lambda_water_quality)\r\n costs_hist.append(cost)\r\n if len(costs_hist) > 5:\r\n if costs_hist[-1] > costs_hist[-2] and costs_hist[-2] > costs_hist[-3] and \\\r\n costs_hist[-3] > costs_hist[-4] and costs_hist[-4] > costs_hist[-5]:\r\n break\r\n if water_use == 0:\r\n #print({\"g_l\": g_l, \"g_m\": g_m, \"g_h\": g_h,\"Failed\":True})\r\n continue\r\n if cost < min_cost:\r\n min_cost = cost\r\n selected_params = {\"cost\": cost, \"water_use\": water_use, \"health_cost\": health_cost,\r\n \"g_l\": g_l, \"g_m\": g_m, \"g_h\": g_h}\r\n print(selected_params)\r\n\r\nprint(\"Selected Parameters:{}\".format(selected_params))\r\n","repo_name":"hmofidin/SwimmingPoolSimulator","sub_path":"AnalyzeExperimentalPoint.py","file_name":"AnalyzeExperimentalPoint.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6504105312","text":"# TODO add janglo jobs to here and make cronjob to update this once a day\n# problem - janglo has unpredictable size, how do you differentiate jobs?\n# cronjob can be done in windows with task scheduler\n# TODO don't add duplicate records! namely, add a primary key\n# -*- coding: utf-8 -*-\nimport csv\nimport datetime\nimport urllib.request\nimport sqlite3\n\nimport bs4\nfrom dateutil import parser\n\n\"\"\"\nScrapes Secret Tel Aviv jobs board, adds new jobs into a database\n\"\"\"\n\n\ndef update_db():\n \"\"\"\n Calls the scraping/cleaning functions, exports to CVS\n \"\"\"\n with sqlite3.connect('jobs.db') as con:\n # call the scraping functions\n soup = scrape_secret()\n jobs = clean_jobs(soup)\n result = organise(jobs)\n excel_data = data_cleanser(result)\n export_to_excel(excel_data)\n\n # after exporting to csv (just in case) we delete the title row and the last row (it's not a job)\n # and convert nested lists into tuples (necessary for the sqlite3 import)\n del excel_data[0]\n del excel_data[200]\n new_result = [tuple(l) for l in excel_data]\n # only necessary once\n # con.execute('''CREATE TABLE jobs (Title, Company, Location, Type, Date Posted)''')\n\n con.executemany(\"\"\"\n INSERT INTO \n jobs \n VALUES (?, ?, ?, ?, ?)\"\"\", new_result)\n\n\n# function to remove multiple occurrences of one term ('new')\ndef remove_value_from_list(the_list, val):\n return [value for value in the_list if value != val]\n\n\ndef length_enforcer(the_list, length):\n return [value for value in the_list if len(value) == length]\n\n\n# hit the website and scrape the first page\ndef scrape_secret():\n url = \"https://jobs.secrettelaviv.com/\"\n req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n page = urllib.request.urlopen(req)\n # jobs are in spans\n # soup = BeautifulSoup(page, 'lxml')\n parse_only = bs4.SoupStrainer('span')\n return bs4.BeautifulSoup(page, \"lxml\", parse_only=parse_only)\n\n\ndef clean_jobs(soup):\n jobs = [span.get_text().strip() for span in soup.findChildren()]\n # remove extraneous elements\n rem_list = ['',\n 'Subscribe to our EVENTS Newsletter',\n 'Join our facebook GROUP']\n for removal_string in rem_list:\n jobs.remove(removal_string)\n jobs = remove_value_from_list(jobs, '')\n return remove_value_from_list(jobs, 'new')\n\n\ndef organise(jobs):\n # make list of lists\n result = []\n new_list = []\n for job in jobs:\n if len(new_list) == 7:\n a = list(new_list)\n result.append(a)\n new_list = [job]\n else:\n new_list.append(job)\n result.append(new_list)\n return length_enforcer(result, 7)\n\n\ndef data_cleanser(result):\n for i in result:\n del i[1]\n del i[2]\n try:\n i[4] = parser.parse(i[4])\n except ValueError:\n pass\n result.insert(0,[\"Title\", \"Company\", \"Location\", \"Type\", \"Date Posted\"])\n return result\n\n\ndef export_to_excel(result):\n csvfile = \"secret_today\" + datetime.datetime.today().strftime('%m-%d') + \".csv\"\n with open(csvfile, \"w\") as output:\n writer = csv.writer(output, lineterminator='\\n')\n writer.writerows(result)\n\n\nif __name__ == '__main__':\n update_db()\n","repo_name":"lordgrenville/Python-Scripts","sub_path":"scrape_jobs/secret_scrape.py","file_name":"secret_scrape.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6703509716","text":"#app.py\n\nfrom flask import Flask, render_template, request, redirect, url_for, session\nfrom flask_mysqldb import MySQL,MySQLdb\nimport redis # ==> Make sure to install this library using pip install redis\nfrom datetime import datetime\nimport time\nimport pickle #\n\n\ncounter = 0\napp = Flask(__name__)\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'Cyber!@#'\napp.config['MYSQL_DB'] = 'cyberdb'\napp.config['MYSQL_CURSORCLASS'] = 'DictCursor'\n\nmysql = MySQL(app)\n\nstartTime = datetime.now()\n\n\n\n# Redis Object\nR_SERVER = redis.Redis(\"localhost\")\n\n@app.route('/')#open home screen\ndef home():\n session['block'] = 'no'\n session['client'] = 'yes'\n session.pop('client')\n return render_template(\"home.html\")\n\n@app.route('/login',methods=[\"GET\",\"POST\"])#search for user\ndef login():\n if request.method == 'POST':\n email = request.form['email']\n #id = request.form['password']\n if (R_SERVER.get(email)):\n start = time.time()\n user =pickle.loads(R_SERVER.get(email))\n end = time.time()\n # session['name'] = user['username']\n session['location'] = 'redis'\n session['time'] = end - start\n else:\n start = time.time()\n curl = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n curl.execute(\"SELECT * FROM cloud_users WHERE email=%s\",(email,))\n user = curl.fetchone()\n end = time.time()\n curl.close()\n if int(user['followers']) > 5:\n R_SERVER.set(user['email'], pickle.dumps(user) )\n session['location'] = 'mysql'\n session['time'] = end - start\n if len(user) > 0:\n session['name'] = user['username']\n session['name'] = user['username']\n session['email'] = user['email']\n session['followers'] = user['followers']\n session['id'] = user['id']\n session['client'] = 'yes'\n return render_template(\"home.html\")\n\n else:\n return \"Error user not found\"\n else:\n return render_template(\"login.html\")\n\n@app.route('/logout', methods=[\"GET\", \"POST\"]) #get out from user seesion\ndef logout():\n session.clear()\n return render_template(\"home.html\")\n\n@app.route('/insert', methods=[\"GET\", \"POST\"])#insert 1000 users to mySql\ndef insert():\n i = 1\n cur = mysql.connection.cursor()\n cur.execute(\"DELETE FROM cloud_users\")\n mysql.connection.commit()\n while i < 1000:\n cur.execute(\"INSERT INTO cloud_users (username, email, followers) VALUES (%s,%s,%s)\",('stam',str(i)+'@gmail.com',str(i),))\n mysql.connection.commit()\n i += 1\n cur.close()\n session['db'] = ''\n session['message'] = 'INSERTED successfully'\n return render_template(\"message.html\")\n\n@app.route('/getList', methods=[\"GET\", \"POST\"])#get all user from mySql or redis(if you got it from mySql insert also to redis)\ndef getList():\n if (R_SERVER.get(\"ALL\")):\n start = time.time()\n allUsers =pickle.loads(R_SERVER.get(\"ALL\"))\n end = time.time()\n session['db'] = 'redis'\n session['message'] = end - start\n else:\n start = time.time()\n curl = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n curl.execute(\"SELECT * FROM cloud_users\")\n allUsers = curl.fetchone()\n end = time.time()\n curl.close()\n session['db'] = 'mySql'\n session['message'] = end - start\n R_SERVER.set(\"ALL\", pickle.dumps(allUsers) )\n return render_template(\"message.html\")\n\n@app.route('/forgot', methods=[\"GET\", \"POST\"])#delete user\ndef forgot():\n if request.method == 'POST':\n email = request.form['email']\n R_SERVER.delete(email)\n R_SERVER.delete(\"ALL\")\n session['message'] = 'Deleted successfully'\n return render_template(\"message.html\")\n else:\n return render_template(\"forgotPwd.html\")\n\n@app.route('/confirm', methods=[\"GET\", \"POST\"])#update user if is follower amount is biger then 5 enter to redis else delete from redis\ndef confirm():\n if request.method == 'POST':\n # id = request.form['id']\n #name = request.form['name']\n email = request.form['email']\n followers = request.form['followers']\n cur = mysql.connection.cursor()\n cur.execute(\"UPDATE cloud_users SET followers = %s WHERE email = %s\",(followers,email,))\n mysql.connection.commit()\n curl = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n curl.execute(\"SELECT * FROM cloud_users WHERE email=%s\",(email,))\n user = curl.fetchone()\n curl.close()\n if int(user['followers']) > 5:\n R_SERVER.set(email, pickle.dumps(user) )\n else :\n R_SERVER.delete(email)\n R_SERVER.delete(\"ALL\")\n session['message'] = 'Updated successfully'\n return render_template(\"message.html\")\n else:\n return render_template(\"chackCode.html\")\n\n@app.route('/client', methods=[\"GET\", \"POST\"])# unused.. we hope thar we didnt forgot to delete it\ndef client():\n if request.method == 'POST':\n name = request.form['name']\n phone = request.form['phone']\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO client (FirstName,phone) VALUES (%s,%s)\",(name,phone,))\n mysql.connection.commit()\n session['name'] = name\n session['phone'] = phone\n session['client'] = 'yes'\n return render_template(\"home.html\")\n else:\n return render_template(\"register.html\")\n\n@app.route('/register', methods=[\"GET\", \"POST\"])#register new user\ndef register():\n if request.method == 'GET':\n return render_template(\"register.html\")\n else:\n\n name = request.form['name']\n email = request.form['email']\n followers = request.form['followers']\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO cloud_users (username, email, followers) VALUES (%s,%s,%s)\",(name,email,followers,))\n mysql.connection.commit()\n cur.execute(\"SELECT * FROM cloud_users WHERE email=%s\",(email,))\n user = cur.fetchone()\n cur.close()\n session['name'] = user['username']\n session['email'] = user['email']\n session['followers'] = user['followers']\n session['id'] = user['id']\n session['client'] = 'yes'\n session['location'] = 'mysql'\n R_SERVER.delete(\"ALL\")\n return render_template(\"home.html\")\n\nif __name__ == '__main__':\n app.secret_key = \"^A%DJAJU^JJ123\"\n app.run(debug=True)\n","repo_name":"tomco2210/10353---CloudDev-Course","sub_path":"HW4.Customer.Service.With.Redis/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18678972768","text":"from typing import List\n\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if sorted(prices, reverse=True) == prices:\n return 0\n max_price, min_price = 0, prices[0]\n profit = 0\n for i in range(len(prices) - 1):\n if prices[i + 1] < prices[i]:\n min_price = min(prices[i + 1], min_price)\n elif prices[i + 1] > prices[i]:\n max_price = prices[i + 1]\n profit = max((max_price - min_price), profit)\n else:\n continue\n return profit\n","repo_name":"aamikhayloff/leetcode","sub_path":"Problems/Problem_121.py","file_name":"Problem_121.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6701478770","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 28 09:45:08 2016\n\n@author: Suhas Somnath, Chris Smith\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\nimport time\nfrom warnings import warn\nfrom multiprocessing import cpu_count\nimport numpy as np\nfrom sklearn.utils import gen_batches\nfrom sklearn.utils.extmath import randomized_svd\n\nfrom .process import Process\nfrom ..io.hdf_utils import getH5DsetRefs, checkAndLinkAncillary, findH5group, \\\n getH5RegRefIndices, createRefFromIndices, checkIfMain, calc_chunks, copy_main_attributes, copyAttributes\nfrom ..io.io_hdf5 import ioHDF5\nfrom ..io.io_utils import check_dtype, transformToTargetType, getAvailableMem\nfrom ..io.microdata import MicroDataset, MicroDataGroup\n\n\nclass SVD(Process):\n\n def __init__(self, h5_main, num_components=None):\n\n super(SVD, self).__init__(h5_main)\n self.process_name = 'SVD'\n\n '''\n Calculate the size of the main data in memory and compare to max_mem\n We use the minimum of the actual dtype's itemsize and float32 since we\n don't want to read it in yet and do the proper type conversions.\n '''\n self.data_transform_func, is_complex, is_compound, n_features, n_samples, type_mult = check_dtype(h5_main)\n\n if num_components is None:\n num_components = min(n_samples, n_features)\n else:\n num_components = min(n_samples, n_features, num_components)\n self.num_components = num_components\n self.parms_dict = {'num_components': num_components}\n self.duplicate_h5_groups = self._check_for_duplicates()\n\n def compute(self):\n \"\"\"\n Computes SVD and writes results to file\n\n Returns\n -------\n h5_results_grp : h5py.Datagroup object\n Datagroup containing all the results\n \"\"\"\n\n\n '''\n Check if a number of compnents has been set and ensure that the number is less than\n the minimum axis length of the data. If both conditions are met, use fsvd. If not\n use the regular svd.\n\n C.Smith -- We might need to put a lower limit on num_comps in the future. I don't\n know enough about svd to be sure.\n '''\n print('Performing SVD')\n\n t1 = time.time()\n\n U, S, V = randomized_svd(self.data_transform_func(self.h5_main), self.num_components, n_iter=3)\n\n print('SVD took {} seconds. Writing results to file.'.format(round(time.time() - t1, 2)))\n\n self._write_results_chunk(U, S, V)\n del U, S, V\n\n return self.h5_results_grp\n\n def _write_results_chunk(self, U, S, V):\n \"\"\"\n Writes the provided SVD results to file\n\n Parameters\n ----------\n U : array-like\n Abundance matrix\n S : array-like\n variance vector\n V : array-like\n eigenvector matrix\n \"\"\"\n\n ds_S = MicroDataset('S', data=np.float32(S))\n ds_S.attrs['labels'] = {'Principal Component': [slice(0, None)]}\n ds_S.attrs['units'] = ['']\n ds_inds = MicroDataset('Component_Indices', data=np.uint32(np.arange(len(S))))\n ds_inds.attrs['labels'] = {'Principal Component': [slice(0, None)]}\n ds_inds.attrs['units'] = ['']\n del S\n\n u_chunks = calc_chunks(U.shape, np.float32(0).itemsize)\n ds_U = MicroDataset('U', data=np.float32(U), chunking=u_chunks)\n del U\n\n V = transformToTargetType(V, self.h5_main.dtype)\n v_chunks = calc_chunks(V.shape, self.h5_main.dtype.itemsize)\n ds_V = MicroDataset('V', data=V, chunking=v_chunks)\n del V\n\n '''\n Create the Group to hold the results and add the existing datasets as\n children\n '''\n grp_name = self.h5_main.name.split('/')[-1] + '-' + self.process_name + '_'\n svd_grp = MicroDataGroup(grp_name, self.h5_main.parent.name[1:])\n svd_grp.addChildren([ds_V, ds_S, ds_U, ds_inds])\n\n '''\n Write the attributes to the group\n '''\n svd_grp.attrs = self.parms_dict\n svd_grp.attrs.update({'svd_method': 'sklearn-randomized', 'last_pixel': self.h5_main.shape[0] - 1})\n\n '''\n Write the data and retrieve the HDF5 objects then delete the Microdatasets\n '''\n hdf = ioHDF5(self.h5_main.file)\n h5_svd_refs = hdf.writeData(svd_grp)\n\n h5_U = getH5DsetRefs(['U'], h5_svd_refs)[0]\n h5_S = getH5DsetRefs(['S'], h5_svd_refs)[0]\n h5_V = getH5DsetRefs(['V'], h5_svd_refs)[0]\n h5_svd_inds = getH5DsetRefs(['Component_Indices'], h5_svd_refs)[0]\n self.h5_results_grp = h5_S.parent\n\n # copy attributes\n copy_main_attributes(self.h5_main, h5_V)\n h5_V.attrs['units'] = np.array(['a. u.'], dtype='S')\n\n del ds_S, ds_V, ds_U, svd_grp\n\n # Will attempt to see if there is anything linked to this dataset.\n # Since I was meticulous about the translators that I wrote, I know I will find something here\n checkAndLinkAncillary(h5_U,\n ['Position_Indices', 'Position_Values'],\n h5_main=self.h5_main)\n\n checkAndLinkAncillary(h5_V,\n ['Position_Indices', 'Position_Values'],\n anc_refs=[h5_svd_inds, h5_S])\n\n checkAndLinkAncillary(h5_U,\n ['Spectroscopic_Indices', 'Spectroscopic_Values'],\n anc_refs=[h5_svd_inds, h5_S])\n\n checkAndLinkAncillary(h5_V,\n ['Spectroscopic_Indices', 'Spectroscopic_Values'],\n h5_main=self.h5_main)\n\n '''\n Check h5_main for plot group references.\n Copy them into V if they exist\n '''\n for key in self.h5_main.attrs.keys():\n if '_Plot_Group' not in key:\n continue\n\n ref_inds = getH5RegRefIndices(self.h5_main.attrs[key], self.h5_main, return_method='corners')\n ref_inds = ref_inds.reshape([-1, 2, 2])\n ref_inds[:, 1, 0] = h5_V.shape[0] - 1\n\n svd_ref = createRefFromIndices(h5_V, ref_inds)\n\n h5_V.attrs[key] = svd_ref\n\n###############################################################################\n\n\ndef simplified_kpca(kpca, source_data):\n \"\"\"\n Performs kernel PCA on the provided dataset and returns the familiar\n eigenvector, eigenvalue, and scree matrices.\n\n Note that the positions in the eigenvalues may need to be transposed\n\n Parameters\n ----------\n kpca : KernelPCA object\n configured Kernel PCA object ready to perform analysis\n source_data : 2D numpy array\n Data arranged as [iteration, features] example - [position, time]\n\n Returns\n -------\n eigenvalues : 2D numpy array\n Eigenvalues in the original space arranged as [component,iteration]\n scree : 1D numpy array\n S component\n eigenvector : 2D numpy array\n Eigenvectors in the original space arranged as [component,features]\n\n \"\"\"\n X_kpca = kpca.fit(source_data.T)\n eigenvectors = X_kpca.alphas_.T\n eigenvalues = X_kpca.fit_transform(source_data)\n # kpca_explained_variance = np.var(kpca.fit_transform(source_data), axis=0)\n # information_content = kpca_explained_variance / np.sum(kpca_explained_variance)\n scree = kpca.lambdas_\n return eigenvalues, scree, eigenvectors\n\n\ndef rebuild_svd(h5_main, components=None, cores=None, max_RAM_mb=1024):\n \"\"\"\n Rebuild the Image from the SVD results on the windows\n Optionally, only use components less than n_comp.\n\n Parameters\n ----------\n h5_main : hdf5 Dataset\n dataset which SVD was performed on\n components : {int, iterable of int, slice} optional\n Defines which components to keep\n Default - None, all components kept\n\n Input Types\n integer : Components less than the input will be kept\n length 2 iterable of integers : Integers define start and stop of component slice to retain\n other iterable of integers or slice : Selection of component indices to retain\n cores : int, optional\n How many cores should be used to rebuild\n Default - None, all but 2 cores will be used, min 1\n max_RAM_mb : int, optional\n Maximum ammount of memory to use when rebuilding, in Mb.\n Default - 1024Mb\n\n Returns\n -------\n rebuilt_data : HDF5 Dataset\n the rebuilt dataset\n\n \"\"\"\n\n hdf = ioHDF5(h5_main.file)\n comp_slice = get_component_slice(components)\n dset_name = h5_main.name.split('/')[-1]\n\n # Ensuring that at least one core is available for use / 2 cores are available for other use\n max_cores = max(1, cpu_count() - 2)\n # print('max_cores',max_cores)\n if cores is not None:\n cores = min(round(abs(cores)), max_cores)\n else:\n cores = max_cores\n\n max_memory = min(max_RAM_mb * 1024 ** 2, 0.75 * getAvailableMem())\n if cores != 1:\n max_memory = int(max_memory / 2)\n\n '''\n Get the handles for the SVD results\n '''\n try:\n h5_svd = findH5group(h5_main, 'SVD')[-1]\n\n h5_S = h5_svd['S']\n h5_U = h5_svd['U']\n h5_V = h5_svd['V']\n\n except KeyError:\n warnstring = 'SVD Results for {dset} were not found.'.format(dset=dset_name)\n warn(warnstring)\n return\n except:\n raise\n\n func, is_complex, is_compound, n_features, n_samples, type_mult = check_dtype(h5_V)\n\n '''\n Calculate the size of a single batch that will fit in the available memory\n '''\n n_comps = h5_S[comp_slice].size\n mem_per_pix = (h5_U.dtype.itemsize + h5_V.dtype.itemsize * h5_V.shape[1]) * n_comps\n fixed_mem = h5_main.size * h5_main.dtype.itemsize\n\n if cores is None:\n free_mem = max_memory - fixed_mem\n else:\n free_mem = max_memory * 2 - fixed_mem\n\n batch_size = int(round(float(free_mem) / mem_per_pix))\n batch_slices = gen_batches(h5_U.shape[0], batch_size)\n\n print('Reconstructing in batches of {} positions.'.format(batch_size))\n print('Batchs should be {} Mb each.'.format(mem_per_pix * batch_size / 1024.0 ** 2))\n\n '''\n Loop over all batches.\n '''\n ds_V = np.dot(np.diag(h5_S[comp_slice]), func(h5_V[comp_slice, :]))\n rebuild = np.zeros((h5_main.shape[0], ds_V.shape[1]))\n for ibatch, batch in enumerate(batch_slices):\n rebuild[batch, :] += np.dot(h5_U[batch, comp_slice], ds_V)\n\n rebuild = transformToTargetType(rebuild, h5_V.dtype)\n\n print('Completed reconstruction of data from SVD results. Writing to file.')\n '''\n Create the Group and dataset to hold the rebuild data\n '''\n rebuilt_grp = MicroDataGroup('Rebuilt_Data_', h5_svd.name[1:])\n\n ds_rebuilt = MicroDataset('Rebuilt_Data', rebuild,\n chunking=h5_main.chunks,\n compression=h5_main.compression)\n rebuilt_grp.addChildren([ds_rebuilt])\n\n if isinstance(comp_slice, slice):\n rebuilt_grp.attrs['components_used'] = '{}-{}'.format(comp_slice.start, comp_slice.stop)\n else:\n rebuilt_grp.attrs['components_used'] = components\n\n h5_refs = hdf.writeData(rebuilt_grp)\n\n h5_rebuilt = getH5DsetRefs(['Rebuilt_Data'], h5_refs)[0]\n copyAttributes(h5_main, h5_rebuilt, skip_refs=False)\n\n hdf.flush()\n\n print('Done writing reconstructed data to file.')\n\n return h5_rebuilt\n\n\ndef get_component_slice(components):\n \"\"\"\n Check the components object to determine how to use it to slice the dataset\n\n Parameters\n ----------\n components : {int, iterable of ints, slice, or None}\n Input Options\n integer: Components less than the input will be kept\n length 2 iterable of integers: Integers define start and stop of component slice to retain\n other iterable of integers or slice: Selection of component indices to retain\n None: All components will be used\n Returns\n -------\n comp_slice : slice or numpy array of uints\n Slice or array specifying which components should be kept\n\n \"\"\"\n\n comp_slice = slice(None)\n\n if isinstance(components, int):\n # Component is integer\n comp_slice = slice(0, components)\n elif hasattr(components, '__iter__') and not isinstance(components, dict):\n # Component is array, list, or tuple\n if len(components) == 2:\n # If only 2 numbers are given, use them as the start and stop of a slice\n comp_slice = slice(int(components[0]), int(components[1]))\n else:\n # Convert components to an unsigned integer array\n comp_slice = np.uint(np.round(components)).tolist()\n elif isinstance(components, slice):\n # Components is already a slice\n comp_slice = components\n elif components is not None:\n raise TypeError('Unsupported component type supplied to clean_and_build. '\n 'Allowed types are integer, numpy array, list, tuple, and slice.')\n\n return comp_slice\n","repo_name":"gangaiitk/pycroscopy","sub_path":"pycroscopy/processing/svd_utils.py","file_name":"svd_utils.py","file_ext":"py","file_size_in_byte":13020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"2890378143","text":"import pygame as pg\nimport time\nimport math\nimport sys\nfrom random import randint\n\nscreen_x = 1600 #スクリーンの横サイズ\nscreen_y = 900 #スクリーンの縦サイズ\njumping = False #ジャンプ中かの判定\nbg_x = 0 \nv = 0 \n\ndef main():\n global bg_x, jumping, v\n t_sta = time.time()\n clock = pg.time.Clock()\n #スクリーンの設定\n pg.display.set_caption(\"避けろ!こうかとん\")\n scrn_sfc = pg.display.set_mode((screen_x,screen_y))\n scrn_rct = scrn_sfc.get_rect()\n\n #背景の設定\n bg_sfc = pg.image.load(\"fig/pg_bg3.jpg\")\n bg_rct = bg_sfc.get_rect()\n\n #こうかとんの設定\n tori_sfc = pg.image.load(\"fig/2.png\")\n tori_sfc = pg.transform.rotozoom(tori_sfc,0,2.0)\n tori_rct = tori_sfc.get_rect()\n tori_rct.center = 350,450\n\n #障害物の設定\n bomb_sfc = pg.image.load(\"fig/bomb01.png\")\n bomb_sfc = pg.transform.rotozoom(bomb_sfc,0,0.18)\n bomb_rct = bomb_sfc.get_rect()\n bomb_rct.center = 1450,450\n\n #背景とスクリーン、こうかとん、障害物の描写\n while True:\n key_states = pg.key.get_pressed()\n for event in pg.event.get():\n if event.type == pg.QUIT:\n return\n if key_states[pg.K_ESCAPE]:\n return\n\n #背景スクロール設定 \n bg_x = (bg_x-2)%1600\n scrn_sfc.blit(bg_sfc,[bg_x-screen_x,0])\n scrn_sfc.blit(bg_sfc,[bg_x,0])\n bg_sfc.blit(bomb_sfc,bomb_rct)\n\n #ジャンプの設定\n if key_states[pg.K_UP] and jumping == False:\n jumping = True\n v = -5\n if jumping == True:\n v += 0.05\n tori_rct.centery += v\n if tori_rct.centery > 450:\n tori_rct.centery = 450\n jumping = False\n \n #着地が早くなる \n if key_states[pg.K_DOWN] and jumping == True:\n v += 0.5\n scrn_sfc.blit(tori_sfc,tori_rct)\n pg.display.update()\n\nif __name__ == \"__main__\":\n pg.init()\n main()\n pg.quit()","repo_name":"c0b21149/ProjExD","sub_path":"ex06/jump_koukaton.py","file_name":"jump_koukaton.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24225857676","text":"def rod_cutting(n):\n result = [0 for i in range(n+1)]\n result[0] =0\n result[1] =0 \n for i in range(2, n+1):\n for j in range(i):\n result[i] = max(result[i], j*(i-j), j*result[i-j]) \n return result[n]\n\n\nn = int(input('Enter the number '))\nprint(rod_cutting(n))\n","repo_name":"sralli/APS-2020","sub_path":"rod_max_cutting_product.py","file_name":"rod_max_cutting_product.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40496750300","text":"import sys\nfrom collections import deque\n\n# 상어의 정보 초기화 (x, y, size)\ndef initShark(N, Matrix):\n for i in range(N):\n for j in range(N):\n if Matrix[i][j] == 9:\n Matrix[i][j] = 0\n return i, j, 2 \n\ndef bfs(shark_x, shark_y, shark_size, N, Matrix):\n visited = [[False] * N for _ in range(N)]\n visited[shark_x][shark_y] = True\n q = deque([(shark_x, shark_y, 0)])\n\n dx = [-1, 0, 0, 1]\n dy = [0, -1, 1, 0]\n\n res = []\n while q:\n x, y, t = q.popleft()\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n \n # 상어 이동 조건\n if 0 <= nx < N and 0 <= ny < N and not visited[nx][ny] and Matrix[nx][ny] <= shark_size:\n # 상어 먹이 조건\n if 0 < Matrix[nx][ny] < shark_size: \n res.append((nx, ny, t + 1))\n else:\n q.append((nx, ny, t + 1))\n visited[nx][ny] = True\n if res:\n res.sort(key=lambda x: (x[2], x[0], x[1]))\n x, y, t = res[0]\n Matrix[x][y] = 0\n return (x, y, t)\n else: return None\n\ndef DinnerTime(N, Matrix):\n shark_x, shark_y, shark_size = initShark(N, Matrix)\n dinnerTime = 0\n eat_times = 0\n while True:\n res = bfs(shark_x, shark_y, shark_size, N, Matrix)\n if res == None: break\n else: \n shark_x, shark_y = res[0], res[1]\n dinnerTime += res[2]\n eat_times += 1\n if eat_times == shark_size:\n shark_size += 1\n eat_times = 0\n\n return dinnerTime\n\ndef main():\n N = int(input())\n Matrix = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]\n res = DinnerTime(N, Matrix)\n print(res)\n\nmain()","repo_name":"whxtdxsa/Algorithm","sub_path":"python/graph/simulation/16236.py","file_name":"16236.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"17413295487","text":"import pandas as pd \n\ndf = pd.read_csv('PredtandfilaGrid.dat',sep ='\\s+')\n\np = str(input('Unesite pocetak: '' '))\n\nif p == 'altitude':\n df = df['altitude']\n \nelif p == 'lat':\n df = df['lat']\n\nprint (df)","repo_name":"meteorolog90/PROGRAM","sub_path":"script/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22642004511","text":"import threading\nimport core\n\ncore.read_cache()\n\nclass Column:\n def __init__(self, name):\n self.name = name\n\n def set_type(self, type):\n self.type = type\n\nclass ColumnCharacterTester(threading.Thread):\n def __init__(self, database, table, position, index):\n super(ColumnCharacterTester, self).__init__()\n self.database = database\n self.table = table\n self.position = position\n self.index = index\n self.character = -1\n\n def run(self):\n self.character = self.get_char()\n while self.character == -1:\n self.get_char()\n\n def test_character(self, character, position, operation=\">=\"):\n injection_string = \"(SELECT count(*) FROM (SELECT TABLE_SCHEMA,TABLE_NAME, COLUMN_NAME from information_schema.COLUMNS where TABLE_SCHEMA=%s and TABLE_NAME=%s ORDER BY ORDINAL_POSITION LIMIT %d,1) \" \\\n \"as temp where ASCII(SUBSTRING(COLUMN_NAME, %d, 1))%s%d)\" % \\\n (core.char_array(self.database), core.char_array(self.table), self.index - 1, position, operation, character)\n return core.check_truth(injection_string)\n\n def get_char(self):\n min_char = 1\n max_char = 128\n character = (min_char + max_char) / 2\n\n while True:\n length_check = self.test_character(character, self.position)\n core.println(\"Checked column character %d for index %d for position %d: %s\" % (character, self.index, self.position, length_check))\n if length_check:\n min_char = character\n else:\n max_char = character - 1\n\n if min_char == max_char or max_char - min_char == 1:\n min_length_check = self.test_character(min_char, self.position, \"=\")\n if min_length_check:\n core.println(\"Found column character %d for index %d for position %d\" % (min_char, self.index, self.position))\n return min_char\n max_length_check = self.test_character(max_char, self.position, \"=\")\n if max_length_check:\n core.println(\"Found column character %d for index %d for position %d\" % (max_char, self.index, self.position))\n return max_char\n return -1\n\n character = (min_char + max_char) / 2\n\n return -1\n\n\nclass ColumnBruteForcer(threading.Thread):\n def __init__(self, database, table, index):\n super(ColumnBruteForcer, self).__init__()\n self.database = database\n self.table = table\n self.character_count = -1\n self.found_characters = \"\"\n self.index = index\n\n def get_name(self):\n return self.found_characters\n\n def run(self):\n self.get_characters()\n\n def get_characters(self):\n self.found_characters = \"\"\n self.character_count = self.get_length()\n\n for j in range((self.character_count // 10) + 1):\n column_character_testers = []\n for i in range(j * 10, (j + 1) * 10):\n if i < self.character_count:\n column_character_tester = ColumnCharacterTester(self.database, self.table, i + 1, self.index)\n column_character_tester.start()\n column_character_testers.append(column_character_tester)\n\n for column_character_tester in column_character_testers:\n column_character_tester.join()\n self.found_characters += chr(column_character_tester.character)\n\n core.println(\"Found column name at index %d is %s\" % (self.index, self.found_characters))\n\n def check_count(self, count, operation = \">=\"):\n injection_string = \"(SELECT count(*) FROM (SELECT TABLE_SCHEMA,TABLE_NAME, COLUMN_NAME from information_schema.COLUMNS where TABLE_SCHEMA=%s and TABLE_NAME=%s ORDER BY ORDINAL_POSITION LIMIT %d,1) \" \\\n \"as temp where length(COLUMN_NAME)%s%d)\" %\\\n (core.char_array(self.database), core.char_array(self.table), self.index - 1, operation, count)\n return core.check_truth(injection_string)\n\n\n def get_length(self):\n min_count = 1\n max_count = 64\n\n count = (min_count + max_count) / 2\n\n while True:\n count_check = self.check_count(count)\n core.println(\"Checked column length for index %d count %d: %s\" % (self.index, count, count_check))\n if count_check:\n min_count = count\n else:\n max_count = count - 1\n\n if min_count == max_count or max_count - min_count == 1:\n min_count_check = self.check_count(min_count, \"=\")\n if min_count_check:\n core.println(\"Found column length for index %d count %d\" % (self.index, min_count))\n return min_count\n max_count_check = self.check_count(max_count, \"=\")\n if max_count_check:\n core.println(\"Found column length for index %d count %d\" % (self.index, max_count))\n return max_count\n break\n\n count = (min_count + max_count) / 2\n return -1\n\nclass ColumnDetector:\n def __init__(self, database, table):\n self.database = database\n self.table = table\n self.columns = []\n\n def start(self):\n pass\n\n def check_count(self, count, operation = \">=\"):\n injection_string = \"(SELECT count(*) from information_schema.COLUMNS where TABLE_SCHEMA=%s and TABLE_NAME=%s)%s%d\" %\\\n (core.char_array(self.database), core.char_array(self.table), operation, count)\n return core.check_truth(injection_string)\n\n def get_count(self):\n min_count = 1\n max_count = 1024\n\n count = (min_count + max_count) / 2\n\n while True:\n count_check = self.check_count(count)\n core.println(\"Checked count %d: %s\" % (count, count_check))\n if count_check:\n min_count = count\n else:\n max_count = count - 1\n\n if min_count == max_count or max_count - min_count == 1:\n min_count_check = self.check_count(min_count, \"=\")\n if min_count_check:\n core.println(\"Found count %d\" % min_count)\n return min_count\n max_count_check = self.check_count(max_count, \"=\")\n if max_count_check:\n core.println(\"Found count %d\" % max_count)\n return max_count\n break\n\n count = (min_count + max_count) / 2\n return -1\n\n def get_columns(self):\n column_count = self.get_count()\n core.println(\"Found %d columns\\n\" % column_count)\n\n column_brute_forcers = []\n for i in range(column_count):\n column_brute_forcer = ColumnBruteForcer(self.database, self.table, i + 1)\n column_brute_forcer.start()\n column_brute_forcer.join()\n column_brute_forcers.append(column_brute_forcer)\n\n self.columns = []\n\n for column_brute_forcer in column_brute_forcers:\n # column_brute_forcer.join()\n core.println(\"Found column: %s\" % column_brute_forcer.get_name())\n self.columns.append(Column(column_brute_forcer.get_name()))\n\nif __name__ == '__main__':\n database = \"\"\n table = \"\"\n core.read_cache()\n column_detector = ColumnDetector(database, table)\n column_detector.get_columns()","repo_name":"temesgeny/MySQL-Mapper","sub_path":"columns.py","file_name":"columns.py","file_ext":"py","file_size_in_byte":7546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6947352833","text":"class Review:\r\n\tdef __init__(self, author, rating):\r\n\t\tself.author = author\r\n\t\tself.rating = rating\r\n \r\n\r\nclass Burger:\r\n\tdef __init__(self, name, price):\r\n\t\tself.name = name\r\n\t\tself.price = price\r\n\t\tself.reviews = []\r\n\t\tself.listauthors = []\r\n\t\tself.average = 0\r\n\r\n\tdef add_review(self, author, rating):\r\n\t\tx = Review(author, rating)\r\n\t\tself.reviews.append(x)\r\n\r\n\tdef list_authors(self):\r\n\t\tif len(self.reviews) == 0:\r\n\t\t\treturn self.reviews\r\n\t\telse:\r\n\t\t\ti = 0\r\n\t\t\twhile i < len(self.reviews):\r\n\t\t\t\tself.listauthors.append(self.reviews[i].author)\r\n\t\t\t\ti += 1\r\n\t\t\treturn self.listauthors\r\n\t\r\n\tdef average_rating(self):\r\n\t\tself.total = 0\r\n\t\tself.count = 0\r\n\t\tif len(self.reviews) == 0:\r\n\t\t\treturn 3\r\n\t\telse:\r\n\t\t\tj = 0\r\n\t\t\twhile j < len(self.reviews):\r\n\t\t\t\tself.total += self.reviews[j].rating\r\n\t\t\t\tself.count += 1\r\n\t\t\t\tj += 1\r\n\t\t\tself.average = self.total / self.count\r\n\t\t\treturn (self.average)\r\n\r\ndef top_burgers(burgers, minimum_rating):\r\n goodfood = []\r\n \r\n if type(burgers) != list or type(minimum_rating) != float:\r\n raise TypeError\r\n \r\n i = 0\r\n while i < len(burgers):\r\n if burgers[i].average >= minimum_rating:\r\n goodfood.append(burgers[i])\r\n i += 1\r\n \r\n return goodfood\r\n\t\t\t\t\r\ncheeseburger = Burger('Cheeseburger', 4.99)\r\ncheeseburger.add_review('Aaron', 2)","repo_name":"jonathanthen/INFO1110-and-DATA1002-CodeDump","sub_path":"classburger.py","file_name":"classburger.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"35911126212","text":"import argparse\nimport os\nimport sys\n\nfrom keras.models import load_model\nfrom skimage import io\n\nfrom reconstruction.reconstruction import reconstruct_image\n\n\ndef build_argparse():\n prog_desc = \\\n'''\nReconstruct an image by a trained autoencoder.\n'''\n parser = argparse.ArgumentParser(description=prog_desc, formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('image_path', type=str, help='Input Image.')\n parser.add_argument('autoencoder', type=str, help='AutoEncoder2D (*.h5).')\n parser.add_argument('output_image_path', type=str,\n help='Output Image Path.')\n\n return parser\n\n\ndef print_args(args):\n print('--------------------------------------------')\n print('- Input Image: %s' % args.image_path)\n print('- AutoEncoder2D: %s' % args.autoencoder)\n print('- Output Image Path: %s' % args.output_image_path)\n print('--------------------------------------------\\n')\n\n\ndef validate_args(args):\n if not args.autoencoder.endswith('.h5'):\n sys.exit('Invalid AutoEncoder2D extension: %s\\nTry *.h5' %\n args.autoencoder)\n\n parent_dir = os.path.dirname(args.output_image_path)\n if parent_dir and not os.path.exists(parent_dir):\n os.makedirs(parent_dir)\n\n\ndef main():\n parser = build_argparse()\n args = parser.parse_args()\n print_args(args)\n validate_args(args)\n\n\n print('- Loading Input Image')\n img = io.imread(args.image_path)\n\n print('- Loading AutoEncoder2D')\n autoencoder = load_model(args.autoencoder)\n\n print('- Reconstructing Image')\n img_out = reconstruct_image(img, autoencoder)\n\n print('- Saving Reconstruct Image')\n io.imsave(args.output_image_path, img_out)\n\n print('\\n- Done...')\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iamsamucoding/IBIX-CAE","sub_path":"vaumc/reconstruct_image.py","file_name":"reconstruct_image.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"525946329","text":"import pickle\nimport time\nimport zmq\nimport numpy as np\nfrom pyquaternion import Quaternion\n\nDEMO_DATA = \"/data/seita/softgym_mm/data_demo/physicalEnv_v01_rotationsAlso_exp_1/BC_sess7_bc_thisOne_8.pkl\"\n\nif __name__ == \"__main__\":\n # Start inference by running\n # python -m bc.inference [args]\n # Run `python -m bc.inference -h` for help\n\n # If you want to start inference programmatically, do something like\n\n # from bc.inference import Inference\n # inference = Inference(\n # obs_shape=(2000, 6),\n # exp_config=EXP_CONFIG DICTIONARY,\n # model_path=MODEL_PATH,\n # )\n # inference.start()\n\n # You can use inference.stop() to stop it.\n\n # Init zmq\n context = zmq.Context()\n obs_socket = context.socket(zmq.PUB)\n obs_socket.setsockopt(zmq.SNDHWM, 0)\n obs_socket.bind(\"tcp://127.0.0.1:2024\")\n\n # Note! You can send observations and receive actions\n # from two different processes/threads.``\n # e.x. observations arrive at 10 Hz and are sent by the camera process\n # robot.py can just listen to act_socket\n\n from zmq import ssh\n\n print('done with output sock')\n\n act_socket = context.socket(zmq.SUB)\n act_socket.subscribe(\"\")\n tunnel = ssh.tunnel_connection(act_socket, \"tcp://127.0.0.1:5698\", \"sarthak@omega.rpad.cs.cmu.edu\")\n # act_socket.connect(\"ipc://@act_out\")\n\n print('no longer blocked!')\n\n # Now, we are free to send observations and receive actions\n # Note that observations and actions are not necessarily 1:1\n # You can send observations at any rate to inference\n # If inference starts to fall behind, it starts to drop observations\n # This is why an id should be attached to each observation sent\n # so you can distinguish which observation is attached to which action\n\n # We organize obs as a dictionary like so:\n # obs = {\n # \"id\": unique id (ideally monotonically increasing),\n # \"obs\": torch observation tensor (shape: (n_points, 6))\n # this observation should be the same format as the bc data\n # it expects [4:6]-dims to be a one-hot vector encoding\n # (tool, target, distractors)\n # \"info\": torch or np array, containing the EE position in the\n # first 3 items\n # }\n\n # For some reason, we need to send something to wake up the connection\n obs_socket.send_pyobj(\"wakeup\")\n time.sleep(1)\n\n # Load demo data\n with open(DEMO_DATA, \"rb\") as f:\n data = pickle.load(f)\n len_o = len(data['obs'])\n\n actions = []\n\n start = time.time()\n for t in range(len_o):\n obs_tuple = data['obs'][t]\n obs = obs_tuple[3]\n # obs = torch.tensor(obs_tuple[3], dtype=torch.float32)\n info = obs_tuple[0]\n # So Python 2 assumes ascii as the default encoding when transacting data, whereas Python 3 assumes UTF-8.\n # twofish runs Python 2 since it needs ROS Kinectic to talk to the Sawyers', so we need to swap the encoding before send it over\n # using zmq\n # In order to change the encoding we first change the object into bytes, and then to UTF-8. Same\n #for the numpy characters and t integer\n obs = np.char.decode(obs.astype(np.bytes_), 'UTF-8')\n info = np.char.decode(info.astype(np.bytes_), 'UTF-8')\n t = str(t).decode(\"utf-8\")\n # Create observation dict\n obs_dict = {\n \"id\": t,\n \"obs\": obs,\n \"info\": info,\n }\n\n # Send observation\n\n obs_socket.send_pyobj(obs_dict)\n print(\"Sent obs {}\".format(t))\n\n # Receive action\n action = act_socket.recv_pyobj()\n\n quaternion = Quaternion(axis = [action['action'][3], action['action'][4], action['action'][5]], angle = 1)\n print('Translation: {} Rotation Axis Angle: {} Quaternion: {}'.format(action['action'][:3], action['action'][3:], quaternion))\n\n end = time.time()\n print(\"Time elapsed: {}\".format(end - start))\n print(\"Frame rate: {}\".format(len_o / (end - start)))","repo_name":"SarthakJShetty/tfn-robot","sub_path":"deprecated/inference_demo.py","file_name":"inference_demo.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"5367604519","text":"from plone import api\nfrom plone.memoize import ram\nfrom time import time\nfrom Products.CMFPlone.utils import human_readable_size\nfrom Products.CMFCore.WorkflowCore import WorkflowException\n\nfrom ..helpers import get_user_settings\nfrom ..helpers import can_user_delete\nfrom ..helpers import can_user_answer\nfrom ..helpers import can_user_comment\nfrom ..helpers import can_user_vote\nfrom ..helpers import can_user_approve\nfrom ..helpers import can_user_disapprove\nfrom ..helpers import is_question_open\nfrom ..vocabularies import QuestionSubjectsVocabularyFactory\nfrom ..content.qa_answer import IQaAnswer\n\n\ndef _user_fields_cachekey(method, username, qa_folder_uid):\n cache_time = str(time() // (60 * 60)) # 60 minutes\n return (username, qa_folder_uid, cache_time)\n\n\n@ram.cache(_user_fields_cachekey)\ndef _cached_user_fields(username, qa_folder_uid):\n fallback = username.split('@')[0]\n result = {\n 'fullname': fallback,\n 'id': ''\n }\n api.env.adopt_roles(roles=['Manager'])\n qa_folder = api.content.get(UID=qa_folder_uid)\n if qa_folder is None:\n return result\n\n us = get_user_settings(username, qa_folder)\n if us:\n display_name = getattr(us, 'display_name', None) or fallback\n result['fullname'] = display_name.split('@')[0]\n result['id'] = us.UID()\n else:\n plone_user = api.user.get(username=username)\n if plone_user:\n fullname = plone_user.getProperty('fullname', '')\n lastname = plone_user.getProperty('lastname', '')\n if fullname or lastname:\n result['fullname'] = ' '.join([fullname, lastname])\n return result\n\n\ndef get_user_fields(username, qa_folder):\n qa_folder_uid = qa_folder.UID()\n return _cached_user_fields(username, qa_folder_uid)\n\n\ndef get_tags_fields(context, tags):\n subjects_vocabulary = QuestionSubjectsVocabularyFactory(context)\n result = []\n for tag in tags:\n if tag in subjects_vocabulary:\n term = subjects_vocabulary.getTerm(tag)\n result.append({\n 'id': term.value,\n 'name': term.title\n })\n return result\n\n\ndef get_question_fields(item, is_preview=False):\n\n # A brute way to manage both brain and real objects...\n if hasattr(item, 'getObject'):\n obj = item.getObject()\n else:\n obj = item\n item = None\n\n qa_folder = obj.aq_parent\n author = obj.creators and obj.creators[0] or 'REMOVED USER'\n approved_answer = obj.approved_answer.to_object if obj.approved_answer else None\n\n # subjects_vocabulary = QuestionSubjectsVocabularyFactory(obj)\n # tags = []\n # for tag in obj.subjects:\n # if tag in subjects_vocabulary:\n # term = subjects_vocabulary.getTerm(tag)\n # tags.append({\n # 'id': term.value,\n # 'name': term.title\n # })\n\n result = {\n '@id': obj.absolute_url(),\n 'id': obj.id,\n 'title': obj.title,\n 'description': item and item.Description or '', # The description is from the brain only\n 'author': get_user_fields(author, qa_folder),\n 'approved': approved_answer and True or False,\n 'approved_answer_user': {},\n 'subs': obj.answer_count(),\n 'added_at': obj.created() and obj.created().asdatetime().isoformat() or '1976-04-29',\n 'closed_at': None,\n 'view_count': obj.view_count(),\n 'comment_count': obj.commment_count(),\n 'vote_count': obj.points(),\n 'tags': get_tags_fields(obj, obj.subjects),\n 'is_open': is_question_open(obj),\n 'can_answer': can_user_answer(obj),\n 'can_comment': can_user_comment(obj),\n 'can_vote': can_user_vote(obj),\n 'message': obj.message,\n }\n\n if item is not None:\n result['last_activity'] = {\n 'at': item.last_activity_at.asdatetime().isoformat() if item.last_activity_at else None,\n 'by': get_user_fields(item.last_activity_by, qa_folder) if item.last_activity_by else None,\n 'what': item.last_activity_what if item.last_activity_what else None,\n }\n elif obj is not None:\n result['last_activity'] = obj.last_activity()\n result['last_activity']['at'] = result['last_activity']['at'].asdatetime().isoformat() if result['last_activity']['at'] else None\n result['last_activity']['by'] = get_user_fields(result['last_activity']['by'], qa_folder) if result['last_activity']['by'] else None\n\n result['has_activity'] = result['last_activity']['what'] in ['comment', 'answer']\n\n if not result['is_open']:\n workflow_tool = api.portal.get_tool('portal_workflow')\n try:\n with api.env.adopt_roles(roles=['Manager']):\n review_history = workflow_tool.getInfoFor(obj, \"review_history\")\n except WorkflowException:\n review_history = []\n\n if review_history:\n last_transition_time = review_history[-1]['time']\n result['closed_at'] = last_transition_time.asdatetime().isoformat()\n else:\n # Fallback if not workflow history\n result['closed_at'] = result.get('last_activity', {}).get('at', None)\n\n\n if result['approved']:\n approved_answer_username = approved_answer.creators and approved_answer.creators[0] or 'REMOVED USER'\n result['approved_answer_user'] = get_user_fields(approved_answer_username, qa_folder)\n\n if not is_preview:\n result['text'] = obj.text and obj.text.output_relative_to(obj) or ''\n result['vote_up_count'] = obj.voted_up_count()\n result['vote_down_count'] = obj.voted_down_count()\n result['voted_up_by'] = []\n result['voted_down_by'] = []\n result['can_delete'] = can_user_delete(obj)\n result['attachments'] = []\n\n for attachment in obj.listFolderContents(contentFilter={\"portal_type\": \"File\"}):\n result['attachments'].append(get_attachment_fields(attachment))\n\n return result\n\n\ndef get_answer_fields(item):\n comments = [get_comment_fields(c) for c in item.listFolderContents(contentFilter={\"portal_type\": \"qa Comment\"})]\n author = item.creators and item.creators[0] or 'REMOVED USER'\n qa_folder = item.aq_parent.aq_parent\n result = {\n 'id': item.id,\n 'author': get_user_fields(author, qa_folder),\n 'text': item.text and item.text.output_relative_to(item) or '',\n 'approved': item.is_approved_answer(),\n 'deleted': api.content.get_state(item) == 'deleted',\n '_meta':\n {\n 'type': item.Type(),\n 'portal_type': item.portal_type\n },\n # 'link': item.absolute_url(),\n # 'rel': item.absolute_url(1),\n 'path': f'{item.aq_parent.getId()}/{item.getId()}',\n 'added_at': item.created() and item.created().asdatetime().isoformat() or '1976-04-29',\n 'vote_up_count': item.voted_up_count(),\n 'vote_down_count': item.voted_down_count(),\n 'vote_count': item.points(),\n 'comments': comments,\n 'hasComments': len(comments) > 0,\n 'can_delete': can_user_delete(item),\n 'can_approve': can_user_approve(item),\n 'can_comment': can_user_comment(item),\n 'can_vote': can_user_vote(item),\n 'can_disapprove': can_user_disapprove(item)\n }\n\n result['attachments'] = []\n\n for attachment in item.listFolderContents(contentFilter={\"portal_type\": \"File\"}):\n result['attachments'].append(get_attachment_fields(attachment))\n\n result['voted_up_by'] = []\n result['voted_down_by'] = []\n\n return result\n\n\ndef get_comment_fields(item):\n author = item.creators and item.creators[0] or 'REMOVED USER'\n parent = item.aq_parent\n if IQaAnswer.providedBy(parent):\n qa_folder = parent.aq_parent.aq_parent\n path = f'{parent.aq_parent.getId()}/{parent.getId()}/{item.getId()}'\n else:\n qa_folder = parent.aq_parent\n path = f'{parent.getId()}/{item.getId()}'\n\n return {\n 'id': item.id,\n 'author': get_user_fields(author, qa_folder),\n 'text': item.text,\n 'deleted': api.content.get_state(item) == 'deleted',\n '_meta':\n {\n 'type': item.Type(),\n 'portal_type': item.portal_type\n },\n # 'link': item.absolute_url(),\n # 'rel': item.absolute_url(1),\n 'path': path,\n 'added_at': item.created() and item.created().asdatetime().isoformat() or '1976-04-29',\n 'can_delete': can_user_delete(item),\n }\n\n\ndef get_attachment_fields(item):\n return {\n 'title': item.Title(),\n 'size': human_readable_size(item.get_size()),\n 'filename': item.file.filename,\n 'file_type': item.file.contentType,\n 'url': item.absolute_url() + '/download',\n 'id': item.getId()\n }\n","repo_name":"reflab-it/reflab.plone.qa","sub_path":"src/reflab/plone/qa/services/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":8821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19993169488","text":"#!/usr/bin/env python\n#coding=utf-8\n\nfrom __future__ import division, print_function\nfrom ROOT import TH1D, TCanvas, TFile, TH2D\nimport sys\n\nfrom style import *\n\nstyle.SetHistLineWidth(1)\nstyle.SetOptStat(110011)\nstyle.cd()\n\n\nselections = {\"2\":\n [\n \"1000 < ch3\",\n \"ch3 < 1300\",\n \"600 < ch0\",\n \"ch0 < 750\",\n \"700 < ch1\",\n \"ch1 < 850\"\n ],\n \"3\":\n [\n \"1000 < ch3\",\n \"ch3 < 1300\",\n \"ch0 < 600\",\n \"ch1 < 750\",\n \"ch2 < 600\",\n \"300 < ch0\",\n \"400 < ch1\",\n \"300 < ch2\"\n ],\n }\n\n\n#C = a + b E\n#((a, sigma_a), (b, sigma_b)...\ncalibration = [\n ((37.2947460204, 0.0821031655421),\n (1.24197127531, 0.00013659538661)),\n ((128.613363685, 0.0853601013818),\n (1.2933026704, 0.000132253787011)),\n ((48.9236376144, 0.0912198919444),\n (1.24853478884, 0.000149853387339)),\n ]\n\n\n\nselection = \" && \".join(selections[sys.argv[1][0]])\n\n\nroot_file_name = sys.argv[1]\nroot_file = TFile(root_file_name)\ntree = root_file.Get(\"pjmca\")\n\n#can2 = TCanvas(\"2dcan\", \"2dcan\")\n#twodim = TH2D(\"2d\", \"2d\", 1950, 50, 2000, 1950, 50, 2000)\n##tree.Project(\"2d\", \"ch0:ch1\", \"1000 < ch3 && ch3 < 1300\")\n#tree.Project(\"2d\", \"ch0:ch1\")\n#twodim.SetMarkerStyle(1)\n#twodim.Draw()\n#input()\n\ncanvas = TCanvas(root_file_name, root_file_name)\ncanvas.Divide(2, 2)\nhists = [TH1D(root_file_name + str(i), root_file_name + str(i), 1950, 50,\n 2000) for i in range(3)]\nhists.append(\n TH1D(root_file_name + \"3\", root_file_name + \"3\", 800, 800,\n 1600))\n\nfor i in range(4):\n canvas.cd(i + 1)\n tree.Project(root_file_name + str(i), \"ch{0}\".format(i), selection)\n hists[i].Draw()\n\ncanvas.SaveAs(root_file_name + \".eps\")\n\ncentroids = [(h.GetMean(), h.GetMeanError()) for h in hists[:3]]\n\nfor cal, centroid in zip(calibration, centroids):\n energy = (centroid[0] - cal[0][0]) / cal[1][0]\n \n sigma_energy = energy * (\n cal[0][1] / cal[0][0] +\n cal[1][1] / cal[1][0] +\n centroid[1] / centroid[0]\n )\n\n print(\"Canale centroide = {0[0]:.1f} \\pm {0[1]:.1f} ch\".format(centroid))\n print(\"Energia centroide = {0:.1f} \\pm {1:.1f} keV\".format(energy, sigma_energy))\n\n\nraw_input()\n","repo_name":"Enucatl/lab-unipd","sub_path":"2011/positronio/analisi.dati.py","file_name":"analisi.dati.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"31137137572","text":"# simulation.py\n# this class makes the two players play against one another and manages the statistics \n\nimport numpy as np\n\nfrom actor import Actor\n\nclass Simulation:\n def __init__(self, pL : np.ndarray, oL : np.ndarray, T):\n # for non zero sum games, oL (opponent loss matrix) != -pL.transpose() (player loss matrix)\n self.T = T\n\n # pL[i, j] : loss du player quand le joueur joue i, l'adversaire joue j\n # oL[i, j] : loss de l'adv. le joueur joue i et que l'adv. joue j\n # par simplicite, le joueur est donc toujours en premier indice\n # pour un zero sum game, on a donc oL = - pL (si on a pas un zero sum game,\n # les deux matrices doivent donc etre specifiees)\n\n self.loss_matrices = {'player' : pL, 'opponent' : oL}\n self.minlosses = {'player' : np.min(pL), 'opponent' : np.min(oL)}\n\n self.actors = {'player' : None, 'opponent' : None}\n self.actions = {'player' : [], 'opponent' : []}\n # losses = depend on (in)complete information\n self.losses = {'player' : [], 'opponent' : []}\n # true loss = loss incured by the player + action\n self.bandit_losses = {'player' : [], 'opponent' : []}\n self.comp_losses = {'player' : [], 'opponent' : []}\n\n # set player and opponent\n\n def set_player(self, player : Actor):\n self.actors['player'] = player\n self.actors['player'].set_simulation(self)\n\n def set_opponent(self, opponent : Actor):\n self.actors['opponent'] = opponent\n self.actors['opponent'].set_simulation(self)\n\n # \"bandit\" information : only get loss of the two actions taken\n\n def get_bandit_loss(self, _actor = 'player'):\n # return L(I_t, J_t)\n it, jt = self.actions['player'][-1], self.actions['opponent'][-1] \n return self.loss_matrices[_actor][it, jt]\n\n # complete information : get the losses of all actions given other player's action\n\n def get_complete_loss(self, _actor = 'player'):\n # return L[:, J_t] if player and L[I_t, :] if opponent\n it, jt = self.actions['player'][-1], self.actions['opponent'][-1]\n if _actor == 'player':\n return self.loss_matrices[_actor][:, jt]\n elif _actor == 'opponent':\n return self.loss_matrices[_actor][it, :] \n\n def run(self):\n # alternatively run player and opponent + update their strategies\n for t in range(self.T):\n self.actions['player'].append(self.actors['player'].play())\n self.actions['opponent'].append(self.actors['opponent'].play())\n \n self.update()\n\n # update losses w.r.t last actions played\n\n def update(self):\n # each player computes its loss and updates its strategy\n for _actor in ['player', 'opponent']:\n actor = self.actors[_actor]\n loss = None\n\n # loss depends on bandit or complete information\n # (will either be real number or vector)\n if actor.complete_info:\n loss = self.get_complete_loss(_actor) \n else:\n loss = self.get_bandit_loss(_actor)\n # if min loss has to be 0 (e.g with Exp3)\n if actor.strategy.b_require_minloss:\n loss -= self.minlosses[_actor]\n\n self.losses[_actor].append(loss)\n \n self.bandit_losses[_actor].append(self.get_bandit_loss(_actor))\n self.comp_losses[_actor].append(self.get_complete_loss(_actor))\n actor.update(loss)\n","repo_name":"lclarte/onlinelearning","sub_path":"code/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19485367516","text":"import os\n\nfrom flask import Flask, render_template\n\nfrom data import make_image\nfrom data import random_org\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'yandexlyceum_secret_key'\n\nAPI_TEMPLATE = 'https://www.random.org/'\n\nNUM_TO_ROOM = {\n 0: 'yellow room',\n 1: 'red room',\n 2: 'green room',\n 3: 'blue room'\n}\n\n\n@app.route('/')\ndef main():\n # generate markers\n\n markers = random_org.get_random_range(0, 3)\n\n # green markers 2 first\n\n # print(f'GREEN markers: {[NUM_TO_ROOM[marker] for marker in markers[:2]]}')\n # print(f'WHITE markers: {[NUM_TO_ROOM[marker] for marker in markers[2:]]}')\n\n # generate frames\n\n frames = random_org.get_random_range(0, 2)\n\n NUM_TO_COL = {\n 0: 'black',\n 1: 'red',\n 2: 'yellow',\n 3: 'null'\n }\n\n # print(f'FRAMES: {[NUM_TO_COL[frame] for frame in frames]}')\n\n # generate cubes\n\n cubes = random_org.get_random_range(0, 3)\n\n # print(\n # f'CUBES: {[(NUM_TO_ROOM[el[0]], el[1]) for el in (enumerate([NUM_TO_COL[cube] for cube in cubes]))]}')\n\n make_image.create_field(markers, frames, cubes)\n\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n make_image.init_resources()\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n","repo_name":"Anton-beep/wro_random","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2035689819","text":"import os\r\nimport shutil\r\nfrom threading import Lock\r\n\r\nimport ruamel.yaml\r\n\r\n# 种子名/文件名要素分隔字符\r\nSPLIT_CHARS = r\"\\.|\\s+|\\(|\\)|\\[|]|-|\\+|【|】|/|~|;|&|\\||#|_|「|」|(|)|~\"\r\n# 默认User-Agent\r\nDEFAULT_UA = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36\"\r\n# 支持的媒体文件后缀格式\r\nRMT_MEDIAEXT = ['.mp4', '.mkv', '.ts', '.iso',\r\n '.rmvb', '.avi', '.mov', '.mpeg',\r\n '.mpg', '.wmv', '.3gp', '.asf',\r\n '.m4v', '.flv', '.m2ts', '.strm']\r\n# 支持的字幕文件后缀格式\r\nRMT_SUBEXT = ['.srt', '.ass', '.ssa']\r\n# 电视剧动漫的分类genre_ids\r\nANIME_GENREIDS = ['16']\r\n# 默认过滤的文件大小,150M\r\nRMT_MIN_FILESIZE = 150 * 1024 * 1024\r\n# TMDB信息缓存定时保存时间\r\nMETAINFO_SAVE_INTERVAL = 600\r\n# fanart的api,用于拉取封面图片\r\nFANART_MOVIE_API_URL = 'https://webservice.fanart.tv/v3/movies/%s?api_key=d2d31f9ecabea050fc7d68aa3146015f'\r\nFANART_TV_API_URL = 'https://webservice.fanart.tv/v3/tv/%s?api_key=d2d31f9ecabea050fc7d68aa3146015f'\r\n# 默认背景图地址\r\nDEFAULT_TMDB_IMAGE = 'https://s3.bmp.ovh/imgs/2022/07/10/77ef9500c851935b.webp'\r\n# 默认TMDB代理服务地址\r\nDEFAULT_TMDB_PROXY = 'https://tmdb.nastool.cn'\r\n# TMDB图片地址\r\nTMDB_IMAGE_W500_URL = 'https://image.tmdb.org/t/p/w500%s'\r\nTMDB_IMAGE_ORIGINAL_URL = 'https://image.tmdb.org/t/p/original%s'\r\nTMDB_IMAGE_FACE_URL = 'https://image.tmdb.org/t/p/h632%s'\r\nTMDB_PEOPLE_PROFILE_URL = 'https://www.themoviedb.org/person/%s'\r\n# 电影默认命名格式\r\nDEFAULT_MOVIE_FORMAT = '{title} ({year})/{title} ({year})-{part} - {videoFormat}'\r\n# 电视剧默认命名格式\r\nDEFAULT_TV_FORMAT = '{title} ({year})/Season {season}/{title} - {season_episode}-{part} - 第 {episode} 集'\r\n# 辅助识别参数\r\nKEYWORD_SEARCH_WEIGHT_1 = [10, 3, 2, 0.5, 0.5]\r\nKEYWORD_SEARCH_WEIGHT_2 = [10, 2, 1]\r\nKEYWORD_SEARCH_WEIGHT_3 = [10, 2]\r\nKEYWORD_STR_SIMILARITY_THRESHOLD = 0.2\r\nKEYWORD_DIFF_SCORE_THRESHOLD = 30\r\nKEYWORD_BLACKLIST = ['中字', '韩语', '双字', '中英', '日语', '双语', '国粤', 'HD', 'BD', '中日', '粤语', '完全版',\r\n '法语', '西班牙语', 'HRHDTVAC3264', '未删减版', '未删减', '国语', '字幕组', '人人影视',\r\n 'www66ystv', '人人影视制作', '英语', 'www6vhaotv', '无删减版', '完成版', '德意']\r\n\r\n# WebDriver路径\r\nWEBDRIVER_PATH = {\r\n \"Docker\": \"/usr/lib/chromium/chromedriver\",\r\n \"Synology\": \"/var/packages/NASTool/target/bin/chromedriver\"\r\n}\r\n_config_path = os.path.join(os.path.dirname(__file__), \"config.yaml\")\r\nwith open(_config_path, mode='r', encoding='utf-8') as cf:\r\n try:\r\n # 读取配置\r\n print(\"正在加载配置:%s\" % _config_path)\r\n _config = ruamel.yaml.YAML().load(cf)\r\n except Exception as e:\r\n print(\"【Config】配置文件 config.yaml 格式出现严重错误!请检查:%s\" % str(e))\r\n _config = {}\r\nfrom dotmap import DotMap\r\nconfig = DotMap(_config)\r\n\r\n# 线程锁\r\nlock = Lock()\r\n\r\n# 全局实例\r\n_CONFIG = None\r\n\r\n\r\ndef singleconfig(cls):\r\n def _singleconfig(*args, **kwargs):\r\n global _CONFIG\r\n if not _CONFIG:\r\n with lock:\r\n _CONFIG = cls(*args, **kwargs)\r\n return _CONFIG\r\n\r\n return _singleconfig\r\n\r\n\r\n@singleconfig\r\nclass Config(object):\r\n _config = {}\r\n _config_path = None\r\n\r\n def __init__(self):\r\n self._config_path = os.path.join(os.path.dirname(__file__), \"config.yaml\")\r\n if not os.environ.get('TZ'):\r\n os.environ['TZ'] = 'Asia/Shanghai'\r\n self.init_config()\r\n\r\n def init_config(self):\r\n try:\r\n if not self._config_path:\r\n print(\"【Config】NASTOOL_CONFIG 环境变量未设置,程序无法工作,正在退出...\")\r\n quit()\r\n if not os.path.exists(self._config_path):\r\n cfg_tp_path = os.path.join(self.get_inner_config_path(), \"../../config.yaml\")\r\n cfg_tp_path = cfg_tp_path.replace(\"\\\\\", \"/\")\r\n shutil.copy(cfg_tp_path, self._config_path)\r\n print(\"【Config】config.yaml 配置文件不存在,已将配置文件模板复制到配置目录...\")\r\n with open(self._config_path, mode='r', encoding='utf-8') as cf:\r\n try:\r\n # 读取配置\r\n print(\"正在加载配置:%s\" % self._config_path)\r\n self._config = ruamel.yaml.YAML().load(cf)\r\n except Exception as e:\r\n print(\"【Config】配置文件 config.yaml 格式出现严重错误!请检查:%s\" % str(e))\r\n self._config = {}\r\n except Exception as err:\r\n print(\"【Config】加载 config.yaml 配置出错:%s\" % str(err))\r\n return False\r\n\r\n def get_proxies(self):\r\n return self.get_config('app').get(\"proxies\")\r\n\r\n def get_ua(self):\r\n return self.get_config('app').get(\"user_agent\") or DEFAULT_UA\r\n\r\n def get_config(self, node=None):\r\n if not node:\r\n return self._config\r\n return self._config.get(node, {})\r\n\r\n def get_config_path(self):\r\n return os.path.dirname(self._config_path)\r\n\r\n def get_temp_path(self):\r\n return os.path.join(self.get_config_path(), \"temp\")\r\n\r\n @staticmethod\r\n def get_root_path():\r\n return os.path.dirname(os.path.realpath(__file__))\r\n\r\n def get_inner_config_path(self):\r\n return os.path.join(self.get_root_path(), \"\")\r\n\r\n def get_domain(self):\r\n domain = (self.get_config('app') or {}).get('domain')\r\n if domain and not domain.startswith('http'):\r\n domain = \"http://\" + domain\r\n return domain\r\n\r\n @staticmethod\r\n def get_timezone():\r\n return os.environ.get('TZ')\r\n\r\n\r\nif __name__ == '__main__':\r\n Config()\r\n","repo_name":"veinkr/AliEmby","sub_path":"config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22840023388","text":"# -*- coding: utf-8 -*-\r\nfrom clasificadores.clasificador import Clasificador, NodoDT, ClasificadorNoEntrenado\r\nfrom clasificadores.utils import proporcionClase, subconjuntoValorAtributo\r\nimport math\r\n\r\n'''ClasificadorDT es subclase de Clasificador, a lo que se añade\r\nun campo entrenado que indica si el entrenamiento se ha realizado,\r\nen caso negativo, se devuelve una excepción ClasificadorNoEntrenado.'''\r\n\r\nclass ClasificadorDT(Clasificador):\r\n \r\n def __init__(self, clasificacion, clases, atributos):\r\n super().__init__(clasificacion, clases, atributos)\r\n self.entrenado = False\r\n \r\n def entrena(self, entrenamiento, medida=\"entropia\", maxima_frecuencia=1, minimo_ejemplos=0):\r\n self.entrenamiento = entrenamiento\r\n self.medida = medida\r\n self.maxima_frecuencia = maxima_frecuencia\r\n self.minimo_ejemplos = minimo_ejemplos\r\n self.arbol = entrenador(self.entrenamiento,self.medida,self.maxima_frecuencia,self.minimo_ejemplos,self.atributos)\r\n self.entrenado = True\r\n \r\n def clasifica(self, ejemplo):\r\n if self.entrenado:\r\n return clasificador(ejemplo,self.arbol)\r\n else:\r\n return ClasificadorNoEntrenado(Exception)\r\n \r\n def evalua(self, prueba):\r\n if self.entrenado:\r\n return evaluador(prueba,self.arbol)\r\n else:\r\n return ClasificadorNoEntrenado(Exception)\r\n \r\n def imprime(self):\r\n if self.entrenado:\r\n return imprimir(self.arbol,self.atributos)\r\n else:\r\n return ClasificadorNoEntrenado(Exception)\r\n\r\n'''La función medidas realiza la función que se utilizará\r\npara cuantificar el grado de clasificación dependiendo de la\r\nmedida que entre como entrada.'''\r\n\r\ndef medidas(medida, conjunto):\r\n result = 0.0\r\n if medida==\"entropia\":\r\n proporcionClases = proporcionClase(conjunto,True)\r\n for p in proporcionClases:\r\n result = result + proporcionClases[p]*math.log2(proporcionClases[p])\r\n result = -result\r\n \r\n elif medida==\"error\":\r\n proporcionClases = proporcionClase(conjunto)\r\n result = 1 - proporcionClases[max(proporcionClases, key=proporcionClases.get)] / len(conjunto)\r\n \r\n elif medida==\"gini\":\r\n proporcionClases = proporcionClase(conjunto,True)\r\n for p in proporcionClases:\r\n result = result + proporcionClases[p]*proporcionClases[p]\r\n result = 1 - result\r\n \r\n return result\r\n\r\n'''Devuelve el nº de ejemplos del conjunto de entrada por valor\r\ndel atributo para conocer la proporción de ejemplos respecto al\r\ntotal dependiendo del índice (que determina el atributo). Ejemplo\r\nde salida: {'1st': 193, '2nd': 168, '3rd': 422}. Al igual que\r\nen proporcionClase, si porcentaje=True, se devuelven los valores\r\nrespecto al número total de ejemplos.'''\r\n\r\ndef proporcionEjemplo(conjunto, indice=0, valorAtributo=None, porcentaje=False):\r\n proporcion = dict()\r\n if valorAtributo == None:\r\n for x in conjunto:\r\n valor = x[indice]\r\n if valor not in proporcion.keys():\r\n proporcion[valor] = 1\r\n else:\r\n proporcion[valor] += 1\r\n else:\r\n for x in conjunto:\r\n valor = x[indice]\r\n if valor == valorAtributo and valor not in proporcion.keys():\r\n proporcion[valor] = 1\r\n elif valor == valorAtributo:\r\n proporcion[valor] += 1\r\n if porcentaje:\r\n for x in proporcion:\r\n proporcion[x] = proporcion[x]/len(conjunto)\r\n return proporcion\r\n\r\n'''indiceAtributo devuelve un diccionario con todos los atributos\r\ncomo clave y su posición en la lista como valor, para una entrada tal que: \r\n[('clase',['1st','2nd','3rd']),('edad',['niño','adulto']),('genero',['male','female'])],\r\nel resultado sería {'clase': 0, 'edad': 1, 'genero': 2}.'''\r\n\r\ndef indiceAtributo(atributos):\r\n indices = dict()\r\n for atributo in atributos:\r\n indices[atributo[0]] = atributos.index(atributo)\r\n return indices\r\n\r\n'''La función clasificador recibe un ejemplo como parámetro de entrada\r\ncon valores por cada atributo y, a partir del árbol generado en el\r\nentrenamiento, se devuelve un valor de clasificación final. Se recorre\r\nel árbol por cada valor de atributo del ejemplo hasta llegar al nodo\r\nhoja de dicho camino y se devuelve como resultado'''\r\n\r\ndef clasificador(ejemplo,arbol):\r\n if arbol.ramas != None:\r\n for valor in arbol.ramas:\r\n if ejemplo[arbol.atributo] == valor:\r\n clasificacion = clasificador(ejemplo,arbol.ramas[valor])\r\n else:\r\n clasificacion = arbol.clase\r\n return clasificacion\r\n\r\n'''La función evaluador comprueba mediante un conjunto de validación\r\nel rendimiento del árbol obtenido en el entrenamiento. Para cada ejemplo\r\ndel conjunto de validación, crea una copia, elimina el último elemento\r\n(que es el valor de clasificación) y aprovecha la función clasificador\r\npara que devuelva el valor de clasificación que tendría según el árbol.\r\nUna vez hecho esto, comprueba si dicho valor es igual al que tiene\r\nen dicho ejemplo, en caso afirmativo, suma 1 al número de aciertos. Una \r\nvez finalizado el bucle, se devuelve el número de aciertos dividido entre\r\n el número total de ejemplos del conjunto de validación.'''\r\n\r\ndef evaluador(prueba,arbol):\r\n aciertos = 0\r\n for ejemplo in prueba:\r\n copiaEjemplo = list(ejemplo)\r\n clase = copiaEjemplo.pop()\r\n if clasificador(copiaEjemplo,arbol) == clase:\r\n aciertos += 1\r\n return aciertos/len(prueba)\r\n\r\n'''La función imprimirRec devuelve una representación del árbol que\r\nse pasa como parámetro de entrada a partir de los distintos subárboles\r\nde sus ramas y la profundidad en el árbol. Se representa el valor de \r\ncada nivel de profundidad y el valor del atributo por cada rama, además\r\nde una indentación que depende de la profundidad para una representación\r\nmás clara de las ramas. Si el nodo que se trata no tiene ramas, es un nodo\r\nhoja y se representa el valor de clasificación.'''\r\n\r\ndef imprimirRec(arbol,atributos,profundidad):\r\n resultado = \"\"\r\n if arbol.ramas != None:\r\n for rama in arbol.ramas:\r\n resultado += \"\\n\"+\"\\t\"*profundidad+atributos[arbol.atributo][0]+\": (\"+str(rama)+\")\"+imprimirRec(arbol.ramas[rama],atributos,profundidad+1)\r\n else:\r\n resultado = \" -> [\"+arbol.clase+\"]\\n\"\r\n return resultado\r\n\r\n'''La función imprimir llama a imprimirRec, genera la cadena del árbol\r\ny la imprime.'''\r\n\r\ndef imprimir(arbol,atributos,profundidad=0):\r\n arbolRes = imprimirRec(arbol,atributos,profundidad)\r\n print(arbolRes)\r\n\r\n'''La función entrenador desarrolla el árbol de manera recursiva llamando\r\na la propia función variando el conjunto de entrada, y los atributos por\r\ncada iteración. Para los casos en los que se trata de un nodo hoja, se realiza\r\nuna comprobación de la proporción de clases (si para el conjunto de entrada el\r\nvalor de clasificación es el mismo para todos los casos), de los atributos (si\r\nno quedan más que estudiar) o si el conjunto de entrada es vacío (por lo que la\r\nvariable atributoElegido valdría None y se crearía un nodo hoja). En cualquier\r\notro caso, se trata de un nodo interior y se trata cada valor de los atributos\r\nde entrada y se observa el mejor valor dependiendo de la medida, cogiendo el mínimo.\r\nEs en ese bucle donde se realiza la comprobación de la máxima frecuencia y el mínimo\r\nde ejemplos, en la cual si se cumple alguna, no se elige el atributo candidato'''\r\n\r\ndef entrenador(conjunto, medida, maxFrecuencia, minEjemplos, atributos, indices=None, claseMax=None):\r\n if indices == None:\r\n indices = indiceAtributo(atributos)\r\n valorMin = 100.0\r\n atributosCopia = list(atributos)\r\n proporcionClases = proporcionClase(conjunto)\r\n atributoElegido = None\r\n if len(proporcionClases) == 1 or indices == {} or atributosCopia == [] or len(conjunto) == 0:\r\n if proporcionClases == {}:\r\n arbol = NodoDT(None,proporcionClases,None,max(claseMax,key=claseMax.get))\r\n else: \r\n arbol = NodoDT(None,proporcionClases,None,max(proporcionClases,key=proporcionClases.get))\r\n \r\n else:\r\n ramas = dict()\r\n for atributo in atributosCopia:\r\n nombreAtributo = atributo[0]\r\n valoresAtributo = atributo[1]\r\n medidaValor = 0.0\r\n for valor in valoresAtributo:\r\n subconjunto = subconjuntoValorAtributo(conjunto,indices[nombreAtributo],valor)\r\n medidaValor = medidaValor + medidas(medida,subconjunto)\r\n medidaValor = medidaValor*len(subconjunto)/len(conjunto)\r\n proporcionClaseSub = proporcionClase(subconjunto,True)\r\n if proporcionClaseSub != {}:\r\n if medidaValor <= valorMin and proporcionClaseSub[max(proporcionClaseSub,key=proporcionClaseSub.get)] <= maxFrecuencia and len(subconjunto)/len(conjunto) >= minEjemplos:\r\n atributoElegido = atributo\r\n valorMin = medidaValor\r\n if atributoElegido == None:\r\n arbol = NodoDT(None,proporcionClases,None,max(proporcionClases,key=proporcionClases.get))\r\n else:\r\n atributosCopia.remove(atributoElegido)\r\n for valor in atributoElegido[1]:\r\n ramas[valor] = entrenador(subconjuntoValorAtributo(conjunto,indices[atributoElegido[0]],valor),medida,maxFrecuencia,minEjemplos,atributosCopia,indices,proporcionClases)\r\n arbol = NodoDT(indices[atributoElegido[0]],proporcionClases,ramas,None)\r\n return arbol","repo_name":"joapingut/aia-1718-t2","sub_path":"clasificadores/clasificadorDT.py","file_name":"clasificadorDT.py","file_ext":"py","file_size_in_byte":9722,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29971587383","text":"from openerp.osv import osv, fields\nfrom openerp.tools.translate import _\nfrom openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT\nfrom openerp.addons.pc_connect_master.product_uom_ext import UOM_AGING_SELECTION_VALUES\nfrom datetime import datetime, timedelta\nfrom openerp.addons.pc_connect_master.utilities.date_utilities import get_number_of_natural_days\n\n\nclass stock_picking_config(osv.Model):\n _inherit = 'configuration.data'\n\n def check_old_stock_picking_out(self, cr, uid, context=None):\n ''' Checks for old pickings which have been for too much time opened,\n this includes also the back-orders; although the parameter to set\n the time to wait for back-orders and regular pickings are different,\n the logic is the same.\n '''\n config = self.get(cr, uid, [], context=context)\n\n issue_obj = self.pool.get('project.issue')\n stock_picking_obj = self.pool.get('stock.picking')\n\n now = datetime.now()\n\n common_domain = [('state', 'not in', ['done', 'cancel'])]\n domain_backorders = []\n domain_regular_pickings = []\n\n # Gets the target date for regular pickings.\n if config.stock_picking_out_max_open_age:\n if config.stock_picking_out_max_open_age_uom != 'days':\n limit_delta = timedelta(**{config.stock_picking_out_max_open_age_uom: config.stock_picking_out_max_open_age})\n target_date = datetime.strftime(now - limit_delta, DEFAULT_SERVER_DATETIME_FORMAT)\n else: # if config.stock_picking_out_max_open_age_uom == 'days':\n # If we selected 'days' as the unit of measure, then we take into account only weekdays.\n actual_weekdays = config.get_open_days_support(context=context)\n num_natural_days = get_number_of_natural_days(now, config.stock_picking_out_max_open_age, 'backward', actual_weekdays)\n target_date = datetime.strftime(now - timedelta(days=num_natural_days), DEFAULT_SERVER_DATETIME_FORMAT)\n domain_regular_pickings.append(('date', '<', target_date))\n\n # Gets the target date for back-orders\n if config.open_backorder_alarming_age_days:\n actual_weekdays = config.get_open_days_support(context=context)\n num_natural_days = get_number_of_natural_days(now, config.open_backorder_alarming_age_days, 'backward', actual_weekdays)\n target_date = datetime.strftime(now - timedelta(days=num_natural_days), DEFAULT_SERVER_DATETIME_FORMAT)\n domain_backorders.extend([('backorder_id', '!=', False),\n ('date', '<', target_date),\n ])\n\n # Looks for old pickings.\n old_picking_ids = None\n if domain_regular_pickings:\n domain_regular_pickings.extend(common_domain)\n old_picking_ids = set(stock_picking_obj.search(cr, uid, domain_regular_pickings, context=context))\n\n # Looks for old back-orders.\n old_backorders_ids = None\n if domain_backorders:\n domain_backorders.extend(common_domain)\n old_backorders_ids = set(stock_picking_obj.search(cr, uid, domain_backorders, context=context))\n\n pickings_to_alarm_ids = list(old_picking_ids | old_backorders_ids)\n for stock_picking in stock_picking_obj.browse(cr, uid, pickings_to_alarm_ids, context=context):\n msg = _('{0} with ID={1}, has exceeded the alarming date.'.format(stock_picking.name, stock_picking.id))\n tags = []\n if stock_picking.id in old_picking_ids:\n tags.append('delivery-order-max-open-age')\n if stock_picking.id in old_backorders_ids:\n tags.append('backorder-order-max-open-age')\n\n issue_ids = issue_obj.find_resource_issues(cr, uid, 'stock.picking', stock_picking.id,\n tags=tags, create=True, reopen=True, context=context)\n for issue in issue_obj.browse(cr, uid, issue_ids, context=context):\n if issue.create_date == issue.write_date:\n # Only write message when just created\n issue.message_post(msg)\n\n return True\n\n _columns = {\n 'stock_picking_out_max_open_age': fields.integer('Open Delivery Order Alarming Age', required=True),\n 'stock_picking_out_max_open_age_uom': fields.selection(UOM_AGING_SELECTION_VALUES, string='Open Delivery Order Alarming Age UOM', required=True),\n 'stock_picking_in_partly_fullfilment_alarm_wait': fields.integer('Open Incoming Shipment Alarming Age', required=True),\n 'stock_picking_in_partly_fullfilment_alarm_wait_uom': fields.selection(UOM_AGING_SELECTION_VALUES, string='Open Incoming Shipment Alarming Age UOM', required=True),\n }\n\n _defaults = {\n 'stock_picking_out_max_open_age_uom': UOM_AGING_SELECTION_VALUES[0][0],\n 'stock_picking_in_partly_fullfilment_alarm_wait_uom': UOM_AGING_SELECTION_VALUES[0][0],\n }\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","repo_name":"brain-tec/swisspost-odoo-yellowcube","sub_path":"pc_connect_warehouse/stock_picking_alarming_config.py","file_name":"stock_picking_alarming_config.py","file_ext":"py","file_size_in_byte":5108,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"21524637683","text":"\ndef parse_file():\n f = open('../data/day05.txt', 'r')\n lines = f.read().strip().split('\\n')\n f.close()\n\n mx, my = 0, 0\n parse_point = lambda c: map(int, c.split(','))\n\n def update_max(point):\n nonlocal mx, my\n x, y = point\n mx = max(mx, x)\n my = max(my, y)\n\n for i, line in enumerate(lines):\n components = line.split()\n\n start = tuple(parse_point(components[0]))\n end = tuple(parse_point(components[2]))\n\n update_max(start)\n update_max(end)\n\n lines[i] = start, end\n\n return lines, mx, my\n\n# Complexity: O(n^2)\ndef calc_num_overlapping(lines, mx, my, diagonals):\n grid = [[0 for _ in range(my + 1)] for _ in range(mx + 1)]\n\n sign = lambda x: -1 if x < 0 else 1\n\n for start, end in lines:\n x1, y1 = start\n x2, y2 = end\n \n if x1 == x2:\n # Vertical line\n s = sign(y2 - y1)\n for i in range(y1, y2 + s, s):\n grid[i][x1] += 1\n elif y1 == y2:\n # Horizontal line\n s = sign(x2 - x1)\n for i in range(x1, x2 + s, s):\n grid[y1][i] += 1\n elif diagonals:\n sx = sign(x2 - x1)\n sy = sign(y2 - y1)\n magnitude = abs(x2 - x1) + 1\n\n for i in range(magnitude):\n grid[y1 + i * sy][x1 + i * sx] += 1\n \n num_overlapping = 0\n for row in grid:\n for val in row:\n if val > 1:\n num_overlapping += 1\n \n return num_overlapping\n\nlines, mx, my = parse_file()\nprint(calc_num_overlapping(lines, mx, my, False))\nprint(calc_num_overlapping(lines, mx, my, True))","repo_name":"VenomPaco/advent-of-code-2021","sub_path":"python/day05.py","file_name":"day05.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32032953307","text":"print(\"Enter LIST of Numbers. Any character to end.\")\ntry:\n my_list1=[]\n while True:\n my_list1.append(int(input()))\nexcept:\n print(\"List:\",my_list1)\nprint(\"\\nEnter SUBLIST of Numbers. Any character to end.\")\ntry:\n my_list2=[]\n while True:\n my_list2.append(int(input()))\nexcept:\n print(\"SubList:\",my_list2)\nflag=1\nfor i in my_list2:\n if i not in my_list1:\n flag=0\nif flag==0:\n print(\"NOT a SUBLIST\")\nelse:\n print(my_list2,\"is a subset of\",my_list1)","repo_name":"nixxby/Repo_Assignment1","sub_path":"Assignment_1_List/A3.py","file_name":"A3.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41401440430","text":"# -*- encoding: utf-8 -*-\n'''\n@File : listime.py\n@Time : 2020/03/29 17:01:03\n@Author : xdbcb8 \n@Version : 1.0\n@Contact : xdbcb8@qq.com\n@WebSite : www.xdbcb8.com\n'''\n\n# here put the import lib\n\n#1 定义一个10个元素的列表,通过列表自带的函数,实现元素在尾部插入和头部插入并记录程序运行的时间;用deque来实现,同样记录程序所耗费的时间;输出这2个时间的差值;\n#提示:列表原生的函数实现头部插入数据:list.insert(0, v);list.append(2)\n\nimport datetime\nfrom collections import deque\n\nlistx=['sd',14,'stbrt',907,235,(1,2,3),[99,88],{'k':'v'},'erv53',691]\nstart1 = datetime.datetime.now()\nprint(start1)\nlistx.insert(0,555)\nlistx.append('555')\nend1 = datetime.datetime.now()\nprint(end1)\ntime1=(end1 - start1).seconds\nprint(time1)\n\nprint('*************************************************')\n\nstart2 = datetime.datetime.now()\nprint(start2)\nq=deque(listx)\nq.appendleft(555)\nq.appendleft('555')\nend2 = datetime.datetime.now()\nprint(end2)\ntime2=(end2 - start2).seconds\nprint(time2)\n\nprint('差值为:{}'.format(time2-time2))","repo_name":"JiangYajun1027/Python","sub_path":"homework4/listime.py","file_name":"listime.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"248362532","text":"#!/usr/bin/python\nimport requests\nfrom bs4 import BeautifulSoup\nimport urllib\n\nindex = 0\nlinkList = []\nlinkFile = open(\"../example/ex-members-links.txt\", 'r')\nfor line in linkFile:\n\tlinkList.append(line)\n\nnameFile = open('../example/ex-members-names.txt', 'r');\nfor line in nameFile:\n\tname = line;\n\tname = name.replace('\\n', '');\n\tnameU = name.replace(' ', '_');\n\tdossier = open('../example/dossiers/'+nameU+'.html', 'w');\n\tdossierContents = '''\n\n\n'''+name+'''\n\n\n\n\n

Personal biography, '''+name+'''

\n\n

My name is '''+name+''' and I am a proud pedophile. I am sexually aroused by seeing all children, both children and images, between four and nine. My orientation for sexual activity with children is not different in any way from people who are gay and attracted to people of the same sex as them. Being a pedophile entitles me to the same rights as people who are gay.

\n\n\\n\n\\n\n''';\n\tdossier.write(dossierContents);\n\tdossier.close();\n\tindex = index + 1\n","repo_name":"DondreKhan/Perimeter","sub_path":"R-36_Satan.py","file_name":"R-36_Satan.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"71813920981","text":"from pyspark import SparkContext, SparkConf\nfrom pyspark.sql import SparkSession, Window\nfrom pyspark.sql.types import DoubleType, DateType, TimestampType\nfrom pyspark.sql.functions import sum as _sum, desc, to_date, from_utc_timestamp, from_unixtime, to_timestamp, trunc, add_months\nimport argparse\n\n# conf = SparkConf().setAppName('PointsAggregatorStandaloneApp')\n# sc = SparkContext(conf=conf)\n\nenvironment = \"\"\nfiles_directory = \"\"\nfiles_out_directory = \"\"\nfiles_out_filename = \"\"\n\nspark = SparkSession \\\n .builder \\\n .appName(\"PointsAggregatorStandaloneApp\").getOrCreate()\n\n\ndef parseInputArguments():\n global files_directory, environment, files_out_directory, files_out_filename\n parser = argparse.ArgumentParser(description='group points')\n parser.add_argument('--directory', dest='directory',\n default=\"./data/*.orc\",\n help='full path to orc files')\n parser.add_argument('--out-directory', dest='out_directory',\n default=\"./out/*.orc\",\n help='full path to out orc files')\n parser.add_argument('--out-filename', dest='out_filename',\n default=\"my_file.orc\",\n help='full path to out orc files')\n parser.add_argument('--environment', dest='environment',\n default=\"local\",\n help='name of environment(local or cluster)')\n args = parser.parse_args()\n files_directory = args.directory\n environment = args.environment\n files_out_directory = args.out_directory\n files_out_filename = args.out_filename\n print(\"Current file directory: {}\".format(files_directory))\n print(\"Current environment: {}\".format(environment))\n\n\ndef getGroupedPointOwners():\n if environment == \"local\":\n pass\n elif environment == \"cluster\":\n pass\n else:\n raise AssertionError(\n \"Bad environment variable (environment = {}). Should be local or cluster\".format(environment))\n\n points_df = spark.read.format(\"orc\").load(files_directory)\n\n # df modification\n # points_df = points_df.withColumn(\"qty\", points_df[\"qty\"].cast(DoubleType()))\n # points_df = points_df.withColumn(\"period_full_date\",\n # from_unixtime(points_df[\"period\"] / 1000, 'yyyy-MM-dd hh:mm:ss'))\n points_df = points_df.withColumn(\"period\", from_unixtime(points_df[\"period\"] / 1000, 'yyyy-MM-dd hh:mm:ss'))\n #\n # print(points_df.printSchema)\n # points_df.show(10)\n #\n # points_stats = points_df \\\n # .groupBy([\"period_year_month\", \"organisationid\", \"customerid\", \"typeid\"]) \\\n # .agg(_sum(\"qty\").alias(\"total_qty\")).orderBy(desc(\"period_year_month\"))\n\n points_df = points_df.withColumn(\"qty\", points_df[\"qty\"].cast(DoubleType()))\n points_df = points_df.withColumn('month', trunc(points_df['period'], 'MM'))\n\n points_df = points_df.groupby(['organisationid', 'customerid', 'typeid', 'month']).sum('qty')\n\n points_df = points_df.withColumn(\"cumulativeSum\",\n _sum('sum(qty)').over(\n Window.partitionBy(['organisationid', 'customerid', 'typeid']).orderBy(\n 'month')))\n\n points_df = points_df.withColumn('aggdate', add_months(points_df['month'], 1))\n points_df = points_df.withColumn('aggdate_ts', to_timestamp(points_df['aggdate']))\n points_df = points_df.withColumn('aggdate_date', points_df['month'].cast(DateType()))\n points_df = points_df.withColumn(\"qty\", points_df[\"cumulativeSum\"])\n\n points_df = points_df.drop('cumulativeSum')\n points_df = points_df.drop('sum(qty)')\n points_df = points_df.drop('month')\n\n points_df.show(100)\n\n return points_df\n\n\nif __name__ == '__main__':\n # pass\n parseInputArguments()\n points_stats = getGroupedPointOwners()\n points_stats.write.mode('overwrite').format(\"orc\").save(files_out_directory + '/' + files_out_filename)\n","repo_name":"neg0diay/orc-agg-app","sub_path":"pointsaggregator.py","file_name":"pointsaggregator.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6298955156","text":"from first_homeworks.oop_geometry_figures.src.rectangle import Rectangle\n\n\nclass Square(Rectangle):\n\n def __init__(self, a):\n self.name = \"square\"\n if a > 0:\n self.a = self.b = a\n else:\n raise ValueError(\"Square sides must be greater than 0.\")\n","repo_name":"triders/otus-python-qa","sub_path":"first_homeworks/oop_geometry_figures/src/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"28042779374","text":"from PIL import Image\nimport csv\nimport json\nfrom collections import OrderedDict\n\nopen_space_img = 'data/open_space.png'\ndiversity_img = 'data/natural_diversity.png'\ngroup_file = 'data/groups.csv'\nfout = open('intersections_computed.json', 'w')\n\ngroups = []\n\ntop_bound = 42.1\nbottom_bound = 41\nleft_bound = -73.7\nright_bound = -71.8\nres = 4\n\narea_pixel_count = dict()\narea_percentage_report = dict()\n\ndef try_convert_to_float(string:str):\n try:\n return float(string)\n except:\n return string\n\ndef get_groups(file):\n groups = []\n dev = 0.05\n with open(file, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n for row in reader:\n row = [try_convert_to_float(e) for e in row if isinstance(try_convert_to_float(e), float)]\n if len(row) > 1:\n row.pop(0)\n groups.append(row)\n group_coordinates = []\n for group in groups:\n p1 = [group[0] + dev, group[1] - dev]\n p2 = [group[0] + dev, group[1] + dev]\n p3 = [group[0] - dev, group[1] + dev]\n p4 = [group[0] - dev, group[1] - dev]\n p5 = [group[0] + dev, group[1] - dev]\n polygon_coordinates = [p1, p2, p3, p4, p5]\n group_coordinates.append(polygon_coordinates)\n return group_coordinates\n\ndef convert_to_coordinates(x_pos:float, y_pos:float) -> list:\n x_coord = (right_bound - left_bound) * x_pos + left_bound\n y_coord = (top_bound - bottom_bound) * y_pos + bottom_bound\n return [y_coord, x_coord]\n\ndef convert_to_pixels(x_coord, y_coord, imagesize) -> list:\n x_pos = imagesize[0] * ((x_coord - left_bound) / (right_bound - left_bound))\n y_pos = imagesize[1] * ((y_coord - bottom_bound) / (top_bound - bottom_bound))\n return [x_pos, y_pos]\n\ndef bounds(polygon:list) -> list:\n top = -10000000\n bottom = 10000000\n left = 10000000\n right = -10000000\n for coordinate in polygon:\n if coordinate[0] < bottom:\n bottom = coordinate[0]\n if coordinate[0] > top:\n top = coordinate[0]\n if coordinate[1] < left:\n left = coordinate[1]\n if coordinate[1] > right:\n right = coordinate[1]\n return [left, right, bottom, top]\n \ndef get_area_pixels(area, imagesize):\n global groups\n polygon = groups[area-1]\n bound = bounds(polygon)\n topright = convert_to_pixels(bound[1], bound[3], imagesize)\n bottomleft = convert_to_pixels(bound[0], bound[2], imagesize)\n width = topright[0] - bottomleft[0]\n height = topright[1] - bottomleft[1]\n areasize = width * height\n return areasize\n\n\ndef in_area(coordinate:list) -> int:\n i = 1\n for area in get_groups(group_file):\n bound = bounds(area)\n if coordinate[1] >= bound[0]-0.0001 and coordinate[1] < bound[1] and coordinate[0] >= bound[2]-0.0001 and coordinate[0] < bound[3]:\n return i\n i+=1\n return 0\n\ndef get_area_occupancy(file):\n img = Image.open(file)\n img_width, img_height = img.size\n print('img size: ' + str(img_width) + ',' + str(img_height))\n\n for x in range(0, img_width):\n if x % res != 1:\n continue\n for y in range(0, img_height):\n if y % res != 1:\n continue\n x_loc_float = float(x)/img_width\n y_loc_float = float(y)/img_height\n coords = convert_to_coordinates(x_loc_float, y_loc_float)\n area = in_area(coords)\n color = img.getpixel((x,y))\n if (color[0] < 20):\n area_pixel_count.update({area:area_pixel_count.get(area, 0)+1})\n print(f'area: {area}, dark pixels found: {area_pixel_count.get(area)}')\n print(f'progress: {x}/{img_width}')\n\n for area, count in area_pixel_count.items():\n area_pixels = get_area_pixels(area, img.size)\n area_pixels = area_pixels/(res*res)\n global area_percentage_report\n area_percentage_report.update({area:count/area_pixels})\n\n\ngroups = get_groups(group_file)\nget_area_occupancy(diversity_img)\nfout.write(json.dumps(area_percentage_report, indent=4))\nfout.close()\n\nnewimg = Image.new(mode=\"RGB\",size=(600,400),color=(255,255,255))\nfor area, percent in area_percentage_report.items():\n bound = bounds(groups[area-1])\n print(f'bounds: {bound}')\n topright = convert_to_pixels(bound[1], bound[3],(600,400))\n bottomleft = convert_to_pixels(bound[0], bound[2],(600,400))\n print(f'bounds: {topright},{bottomleft}')\n for x in range(int(bottomleft[0]), int(topright[0])):\n for y in range(int(bottomleft[1]), int(topright[1])):\n value = int(255-percent*255)\n newimg.putpixel((x,y),(value,value,value))\nnewimg.show()","repo_name":"ITWSDataScience/Group7_2022","sub_path":"q3 data/image_reading.py","file_name":"image_reading.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32219528925","text":"import numpy as np\nfrom scipy.ndimage import shift, rotate\n\n\ndef expfield_2d(kvec, zarr, xarr, dx, dz):\n field = np.exp(1j * 2 * np.pi * (kvec[0] * xarr * dx + kvec[1] * zarr * dz))\n return field\n\n\ndef structillum_2d(\n shape,\n dx=0.01,\n dz=0.01,\n NA=1.42,\n nimm=1.515,\n wvl=0.488,\n linespacing=0.2035,\n extraz=0,\n side_intensity=0.5,\n ampcenter=1.0,\n ampratio=1.0,\n nangles=100,\n spotratio=0.035,\n):\n \"\"\" Simulate a plane of structured illumination intensity either for 1 or 2 objectives\n '2d' means I'm only creating one sheet of illumination since every sheet will be the\n same side_intensity (0~1) -- the amplitude of illum from one objective;\n for the other it's 1 minus this value\n ampcenter -- the amplitude of the center illum beam;\n if 0 and OneObj, then it's for 2D SIM\n ampratio -- the amplitude of the side beams relative to center beam, which is 1.0\n nangles -- the number of triplets (or sextets) we'd divide the illumination beams\n into because the beams assume different incident angles (multi-mode fiber)\n \"\"\"\n\n # theta_arr = np.arange(-nangles/2, nangles/2+1, dtype=np.float32) * anglespan/nangles\n\n nz, nx = shape\n\n # I think NA is half angle, therefore 2*\n anglespan = spotratio * 2 * np.arcsin(NA / nimm)\n\n NA_span = np.sin(anglespan)\n NA_arr = (\n np.arange(-nangles / 2, nangles / 2 + 1, dtype=np.float32) * NA_span / nangles\n )\n\n kmag = nimm / wvl\n\n # The contribution to the illum is dependent on theta, since the middle of the circle\n # has more rays than the edge\n # kmag*np.sin(anglespan/2)) is the radius of each circular illumination spot\n # weight_arr is essentially the \"chord\" length as a function of theta_arr\n # weight_arr = np.sqrt(\n # (kmag*np.sin(anglespan/2)) ** 2 - (kmag*np.sin(theta_arr))**2 )\n # / (kmag*np.sin(anglespan/2))\n weight_arr = np.sqrt((kmag * NA_span / 2) ** 2 - (kmag * NA_arr) ** 2) / (\n kmag * NA_span / 2\n )\n\n # plus_sidetheta_arr = np.arcsin( (kmag * np.sin(theta_arr) + 1/linespacing/2)/kmag )\n # minus_sidetheta_arr = -plus_sidetheta_arr[::-1]\n\n plus_sideNA_arr = (1 / linespacing / 2 + kmag * NA_arr) / kmag\n minus_sideNA_arr = -plus_sideNA_arr[::-1]\n\n # intensity = np.zeros((nz+extraz,nx), np.float32)\n intensity = np.zeros((3, nz + extraz, nx), np.float32)\n\n amplitude = np.zeros((6, nz + extraz, nx), np.complex64)\n zarr, xarr = np.indices((nz + extraz, nx)).astype(np.float32)\n zarr -= (nz + extraz) / 2\n xarr -= nx / 2\n\n amp_plus = np.sqrt(1.0 - side_intensity)\n # amp_minus = np.sqrt(side_intensity)\n\n kvec_arr = kmag * np.stack([NA_arr, np.sqrt(1 - NA_arr ** 2)]).transpose()\n plus_side_kvec_arr = (\n kmag\n * np.stack([plus_sideNA_arr, np.sqrt(1 - plus_sideNA_arr ** 2)]).transpose()\n )\n minus_side_kvec_arr = (\n kmag\n * np.stack([minus_sideNA_arr, np.sqrt(1 - minus_sideNA_arr ** 2)]).transpose()\n )\n\n for i in range(nangles + 1):\n # construct intensity field over all triplets (or sextets in I5S)\n # print \"i=\",i\n # amplitude[:]=0j\n # kvec = kmag * np.stack([np.sin(theta_arr[i]), np.cos(theta_arr[i])])\n # amplitude += expfield_2d(kvec, zarr, xarr, dx, dz)\n amplitude[0] = (\n amp_plus * expfield_2d(kvec_arr[i], zarr, xarr, dx, dz) * ampcenter\n )\n\n # kvec = kmag * np.stack([np.sin(plus_sidetheta_arr[i]),\n # np.cos(plus_sidetheta_arr[i])])\n # amplitude += expfield_2d(kvec, zarr, xarr, dx, dz) * ampratio\n amplitude[2] = (\n amp_plus * expfield_2d(plus_side_kvec_arr[i], zarr, xarr, dx, dz) * ampratio\n )\n # kvec = kmag * np.array([np.sin(minus_sidetheta_arr[i]),\n # np.cos(minus_sidetheta_arr[i])])\n # amplitude += expfield_2d(kvec, zarr, xarr, dx, dz) * ampratio\n amplitude[4] = (\n amp_plus\n * expfield_2d(minus_side_kvec_arr[i], zarr, xarr, dx, dz)\n * ampratio\n )\n # intensity += np.absolute(np.sum(amplitude, 0) * weight_arr[i]) ** 2\n intensity[0] += (\n (\n amplitude[0] * amplitude[0].conj()\n + amplitude[2] * amplitude[2].conj()\n + amplitude[4] * amplitude[4].conj()\n )\n * weight_arr[i]\n ).real\n intensity[1] += (\n 2\n * np.real(\n amplitude[0] * amplitude[2].conj() + amplitude[0] * amplitude[4].conj()\n )\n * weight_arr[i]\n )\n intensity[2] += 2 * np.real(amplitude[2] * amplitude[4].conj()) * weight_arr[i]\n\n del amplitude\n\n if extraz > 0:\n # blend = F.zeroArrF(extraz, nx)\n aslope = np.arange(extraz, dtype=np.float32) / extraz\n blend = np.transpose(\n np.transpose(intensity[:extraz, :]) * aslope\n + np.transpose(intensity[-extraz:, :]) * (1 - aslope)\n )\n intensity[:extraz, :] = blend\n intensity[-extraz:, :] = blend\n return intensity[extraz // 2 : -extraz // 2, :]\n else:\n return intensity\n\ndef _single_period(*args, **kwargs):\n resolution = 100\n _dz = kwargs.get(\"dz\", 0.01)\n kwargs[\"linespacing\"] = kwargs.get(\"linespacing\", 0.2035)\n kwargs[\"dx\"] = kwargs[\"linespacing\"] / resolution\n kwargs[\"dz\"] = kwargs[\"dx\"]\n args = list(args)\n args[0] = (int(args[0][0] * _dz / kwargs[\"dz\"]), 2 * resolution)\n return structillum_2d(*args, **kwargs), kwargs[\"dx\"]\n\n\n\ndef crop_center(img, cropx, cropy):\n z, y, x = img.shape\n startx = x // 2 - (cropx // 2)\n starty = y // 2 - (cropy // 2)\n return img[:, starty : starty + cropy, startx : startx + cropx]\n\n\ndef structillum_3d(\n shape,\n angles=None,\n nphases=5,\n linespacing=0.2035,\n dx=0.01,\n dz=0.01,\n defocus=0,\n *args,\n **kwargs,\n):\n\n if isinstance(angles, (int, float)):\n # if a single number is provided, assume it is the first of three\n angles = [angles, angles + np.deg2rad(60), angles + np.deg2rad(120)]\n assert isinstance(\n angles, (list, tuple)\n ), \"Angles argument should be a list of angles in radians\"\n nangles = len(angles)\n phaseshift = 2 * linespacing / nphases\n kwargs[\"linespacing\"] = linespacing\n kwargs[\"dz\"] = dz\n kwargs[\"dx\"] = dx\n nz, ny, nx = shape\n\n # adding a single pixel to z and removing to make focal plane centered\n _shape = (shape[0] + 1, int(np.ceil(shape[1] * 1.55)))\n ill_2d = structillum_2d(_shape, *args, **kwargs).sum(0)[:-1]\n\n out = np.zeros((nangles, nphases, *shape), \"single\") # APZYX shape\n for p in range(nphases):\n shiftedIllum = shift(ill_2d, (defocus / dz, - p * phaseshift / dx))\n ill_3d = np.repeat(\n shiftedIllum[:, :, np.newaxis], np.ceil(shape[2] * np.sqrt(2)), axis=2\n )\n\n for a, angle in enumerate(angles):\n print(f\"p: {p}, a: {a}\")\n if angle == 0:\n rotatedillum = ill_3d\n else:\n rotatedillum = rotate(ill_3d, np.rad2deg(angle))\n out[a, p] = crop_center(rotatedillum, nx, ny)\n return out\n\n\nif __name__ == \"__main__\":\n i = structillum_2d((500, 2200))\n\n import tifffile as tf\n import matplotlib.pyplot as plt\n\n tf.imshow(i, photometric=\"minisblack\")\n plt.show()\n tf.imshow(i.sum(0))\n plt.show()\n","repo_name":"tlambert03/simsim","sub_path":"simsim/old/illum.py","file_name":"illum.py","file_ext":"py","file_size_in_byte":7466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"74672990420","text":"from tkinter import *\n\n\nclass View:\n def cmd_calcul(self):\n print(\"C'est ici que l'on calcule\")\n print(\"le poids est\", self.var_poids.get())\n print(\"la taille est\", self.var_taille.get())\n self.model.poids = self.var_poids.get()\n self.model.taille = self.var_taille.get()\n print(\"l'IMC est :\", self.model.calcul())\n self.label_imc.config(text=str(self.model.calcul()))\n\n def cmd_raz(self):\n print(\"Je remets les champs à 0\")\n self.label_imc.config(text=\"\")\n self.var_poids.set(0)\n self.var_taille.set(0)\n\n def create_widgets(self):\n #\n # Les labels\n #\n label_poids = Label(self.root, text=\"Poids en kg\")\n label_poids.pack()\n self.var_poids = DoubleVar()\n entry_poids = Entry(self.root, textvariable=self.var_poids)\n entry_poids.pack()\n label_taille = Label(self.root, text=\"Taille en m\")\n label_taille.pack()\n self.var_taille = DoubleVar()\n entry_taille = Entry(self.root, textvariable=self.var_taille)\n entry_taille.pack()\n label_imc1 = Label(self.root, text=\"IMC\")\n label_imc1.pack()\n self.label_imc = Label(self.root, text=\" \")\n self.label_imc.pack()\n\n #\n # Les boutons\n #\n #\n #\n bouton_calculer = Button(self.root, text=\"Calculer\", command=self.cmd_calcul)\n bouton_calculer.pack()\n bouton_raz = Button(self.root, text=\"RAZ\", command=self.cmd_raz)\n bouton_raz.pack()\n button_quitter = Button(self.root, text=\"Quitter\", command=self.root.destroy)\n button_quitter.pack()\n\n\n def __init__(self, model):\n self.root = Tk()\n self.model = model\n self.create_widgets()\n self.run()\n\n def run(self):\n self.root.mainloop()\n","repo_name":"jolegrand10/ec2","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5667709168","text":"import os\nfrom flask import Flask, g\nfrom peewee import SqliteDatabase\n\napp = Flask(__name__)\n\n# config\napp.config.update(dict(\n SECRET_KEY='my_session',\n TYTUL='Expenses',\n DATABASE=os.path.join(app.root_path, 'test.db'),\n))\n\nprint(\"my_path \" + app.root_path)\n\n# db instance\nbase = SqliteDatabase(app.config['DATABASE'])\n\n\n@app.before_request\ndef before_request():\n g.db = base\n g.db.connection()\n\n\n@app.after_request\ndef after_request(response):\n g.db.close()\n return response\n","repo_name":"sebatech90/work","sub_path":"WebAppCost/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23546248005","text":"from collections import deque\n\nf = open('./18.input')\ninput = f.read().rstrip()\n\ndef solve(input):\n program = input.split('\\n')\n\n pc = [0,0]\n regs = [{'p': 0},{'p': 1}]\n rcv_q = [deque([]),deque([])]\n pid = 0\n\n cnt_snd_1 = 0\n\n while True:\n instr_parts = program[pc[pid]].split()\n op = instr_parts[0]\n reg_name = instr_parts[1]\n operand_val = None\n\n if len(instr_parts) > 2:\n operand = instr_parts[2]\n try:\n int(operand)\n operand_val = int(operand)\n except ValueError:\n operand_val = regs[pid][operand]\n\n if op == 'set':\n regs[pid][reg_name] = operand_val\n pc[pid] += 1\n elif op == 'add':\n if reg_name in regs[pid]:\n regs[pid][reg_name] += operand_val\n else:\n regs[pid][reg_name] = operand_val\n pc[pid] += 1\n elif op == 'mul':\n if reg_name in regs[pid]:\n regs[pid][reg_name] *= operand_val\n else:\n regs[pid][reg_name] = 0\n pc[pid] += 1\n elif op == 'mod':\n if reg_name in regs[pid]:\n regs[pid][reg_name] %= operand_val\n else:\n regs[pid][reg_name] = 0\n pc[pid] += 1\n elif op == 'jgz':\n sneaky_operand = reg_name\n try:\n int(sneaky_operand)\n sneaky_operand_val = int(sneaky_operand)\n except ValueError:\n sneaky_operand_val = regs[pid][sneaky_operand]\n\n if sneaky_operand_val > 0:\n pc[pid] += operand_val\n else:\n pc[pid] += 1\n elif op == 'snd':\n if reg_name in regs[pid]:\n rcv_q[pid^1].append(regs[pid][reg_name])\n else:\n rcv_q[pid^1].append(0)\n\n pc[pid] += 1\n\n if pid == 1:\n cnt_snd_1 += 1\n elif op == 'rcv':\n if len(rcv_q[pid]) > 0:\n regs[pid][reg_name] = rcv_q[pid].popleft()\n pc[pid] += 1\n else:\n pid ^= 1\n\n # Deadlock\n if len(rcv_q[pid]) == 0 and program[pc[pid]].split()[0] == 'rcv':\n return cnt_snd_1\n\nprint(solve(input))\n","repo_name":"morisan/aoc-2017-py","sub_path":"18b.py","file_name":"18b.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"36384985860","text":"import collections\nimport math\nimport unittest\nfrom typing import List\n\nimport utils\n\n\n# O(n) time. O(n) space. Math, GCD, hash table.\nclass Solution:\n def hasGroupsSizeX(self, deck: List[int]) -> bool:\n return math.gcd(*collections.Counter(deck).values()) > 1\n\n\nclass Test(unittest.TestCase):\n def test(self):\n cases = utils.load_test_json(__file__).test_cases\n\n for case in cases:\n args = str(case.args)\n actual = Solution().hasGroupsSizeX(**case.args.__dict__)\n self.assertEqual(case.expected, actual, msg=args)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"chrisxue815/leetcode_python","sub_path":"problems/test_0914.py","file_name":"test_0914.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"42027459917","text":"from Tkinter import *;\nfrom dataobject import *;\nfrom dataoperationframe import *;\nimport easygui;\nimport pylab;\nfrom tmp_pickrange import *;\n\nclass Fr_PeakInfoGauss(DataOperationFrame,Tmp_PickRange):\n\tdef __init__(self,master=None,framename=None):\n\t\t#print 'type of arg datawindow', type(datawindow)\n\t\tDataOperationFrame.__dict__['__init__'](self,master,framename=framename);\t\n\t\tself.__ginit();\n\t\tTmp_PickRange.ginit(self);\n\t\t\n\tdef __ginit(self):\n\t\tself.gettmpdata('savename').set('Gausspeak');\n\t\t#for i in range(self['numofsourcedatabase']):\n\t\tself.ginitsourcedataentry('spectra2analyze');\n\t\tself.gettmpdata(\"groupstr\").set(\"'min(x)','max(x)'\");\n\t\t\n\t\tself['method']=IntVar();\n\t\tself[\"method\"].set(0);\n\t\t\n\t\tparameterframe=self['parameterframe'];\n\t\tr=Radiobutton(parameterframe, text=\"Plain\", variable=self['method'],value=0);\n\t\tr.pack(side=LEFT);\n\t\tr=Radiobutton(parameterframe, text=\"Base line\", variable=self['method'],value=1);\n\t\tr.pack(side=LEFT);\n\t\tr=Radiobutton(parameterframe, text=\"Base line and slope\", variable=self['method'],value=2);\n\t\tr.pack(side=LEFT);\n\t\t\n\tdef analyze(self,igroup):\n\t\tdatabase=self.gettmpdata('database');\n\t\tspectra=database[0]['resultdatatablegroups'][igroup];\n\t\tspectranew=spectra.getemptyinstance();\n\t\t\t#print xmin,xmax\n\t\txmin=float(self['xminstr'].get());\n\t\txmax=float(self['xmaxstr'].get());\n\t\t#resolution=float(self['resolution'].get());\n\t\t\n\t\tmethod=self['method'].get();\n\t\tfor k in spectra.keys():\n\t\t\tspect=spectra[k];\n\t\t\tspectnew=spect.copyxy();\n\t\t\tspectnew.pick(xmin,xmax);\n\t\t\tparas=spectnew.gaussfit(method=method);\n\t\t\t\n\t\t\tamp=paras[0];\n\t\t\tcenter=paras[1];\n\t\t\tsigma=paras[2];\n\t\t\tslope=paras[3];\n\t\t\tintercept=paras[4];\n\t\t\t\n\t\t\tspectnew['amp']=amp;\n\t\t\tspectnew['center']=center;\n\t\t\tspectnew['sigma']=sigma;\n\t\t\tspectnew['slope']=slope;\n\t\t\tspectnew['intercept']=intercept;\n\t\t\t\n\t\t\t#print \"spectranew type after:\", type(spectranew);\n\t\t\tspectranew.insert(spectnew,k);\n\t\t\t\n\t\tspectm0=spectranew.uicolumn2xy(ycolumn='amp');\n\t\tspectm1=spectranew.uicolumn2xy(ycolumn='center');\n\t\tspectm2=spectranew.uicolumn2xy(ycolumn='sigma');\n\t\tspectm3=spectranew.uicolumn2xy(ycolumn='slope');\n\t\tspectm4=spectranew.uicolumn2xy(ycolumn='intercept');\n\t\t\n\t\tspectranew1=spectranew.getemptyinstance();\n\t\tspectranew1.insert(spectm0,'amp');\n\t\tspectranew1.insert(spectm1,'center');\n\t\tspectranew1.insert(spectm2,'sigma');\n\t\tspectranew1.insert(spectm3,'slope');\n\t\tspectranew1.insert(spectm4,'intercept');\n\t\t\n\t\tdatabase[0]['resultdatatablegroups'][igroup]=spectranew;\n\t\tspectranew.plot();\n\t\timport xpyfigure;\n\t\txpyfigure.XpyFigure();\n\t\tsubplot(3,2,1);spectm0.plot();\n\t\tsubplot(3,2,2);spectm1.plot();\n\t\tsubplot(3,2,3);spectm2.plot();\n\t\tsubplot(3,2,4);spectm3.plot();\n\t\tsubplot(3,2,5);spectm4.plot();\n\t\t\n\t\tresultdata=database[0].copy();\n\t\tresultdata['resultdatatablegroups']=database[0]['resultdatatablegroups'][:];\n\t\tresultdata['resultdatatablegroups'][igroup]=spectranew1;\n\t\tresultdata['savestr']=\"moments\"\n\t\tdatabase.append(resultdata);\n\t\t#pass;\n","repo_name":"charleseagle/Data-Analysis-Software-Python","sub_path":"lib/emwspectexp/gui/analysisframe/peakanalysis/fr_peakinfogauss.py","file_name":"fr_peakinfogauss.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18076318707","text":"\"\"\"\nplace_04.py\nlayout manager\nplace\nrelwidth, relheight\nrange: 0 - 1\n\"\"\"\nfrom tkinter import *\nroot = Tk()\nroot.title('Python GUI - place')\nroot.geometry('640x480+300+300')\nroot.config(bg='#ddddff')\np1 = Label(root, text = 'Labele 1')\np2 = Label(root, text ='Labele 2')\np1.place(x=10, y=50)\n# p2 does not show\np2.place(x=50, y=100, relx=0.2, rely=0.2,\n relwidth=0.2, relheight=0.2,\n anchor=CENTER)\nroot.mainloop()","repo_name":"smarty-kev/stem1401py","sub_path":"py210110_python3/day70_py210314/place_04_relwidthrelheight.py","file_name":"place_04_relwidthrelheight.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"39608318618","text":"\"\"\"for x in range(3):\n for y in range(3):\n print(f\"{x} {y}\")\"\"\"\n\nprint(\"ARIF KHAN\")\nname = \"John Smith\"\nage = 20\nis_new_patient = True\n\n\"\"\"name = input(\"What is your name ? \")\ncolor = input(\"What is your favorite color ? \")\nprint(name+\" likes \"+color)\"\"\"\n\n\"\"\"message = '{} [{}] is a coder'.format(name, age)\nprint(message)\"\"\"\n\n\"\"\"is_credit_good = False\nprice = 1000000\nif is_credit_good:\n downpayment = 0.1*price\nelse:\n downpayment = 0.2*price\nprint('Downpayment is ${}'.format(downpayment))\"\"\"\n\n\"\"\"secret_number = 9\ni = 0\nwhile i < 3:\n guess_number = int(input(\"Guess the number: \"))\n if guess_number == 9:\n print(\"You Won!\")\n break\n i+=1\nelse:\n print(\"Try Again\")\"\"\"\n\n\"\"\"command = \"\"\nstart = False\nstop = False\nwhile command != \"quit\":\n command = input(\"> \").lower()\n if command == \"start\":\n if start:\n print(\"Dude Car has already been started...\")\n else:\n print(\"Car Starting...\")\n start = True\n elif command == \"stop\":\n if not start:\n print(\"Come on Man! Car has already been stopped...\")\n else:\n print(\"Car Stopped.\")\n start = False\n elif command == \"help\":\n print(\"start to start the car stop to stop the car quit to exit\")\n elif command == \"quit\":\n break\n else:\n print(\"Sorry, I don't understand. Please type help for commands.\")\"\"\"\n\n\"\"\"numbers = [5, 2, 5, 2, 2]\nfor num in numbers:\n print(\"#\"*num)\nfor num in numbers:\n character = ''\n for count in range(num):\n character += 'x'\n print(character)\n\nnumbers = [1, 6, 9, 8, 10]\nmax = numbers[0]\nfor num in numbers:\n if num > max:\n max = num\nprint(max) Watch merciful servant . Islam on demand . Islamic guidance . And sheikh hamza yusufs lectures .\nThe book of assistance \n\nmatrix = [\n [1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]\n]\n\nfor row in matrix:\n for col in row:\n print(col)\"\"\"\n\ngyan = {\n \"hi\": \"Don\",\n \"age\": 56\n}\n\nprint(gyan)\n\n","repo_name":"Ichigo-lab/PythonBegins","sub_path":"Basics/HelloWorld.py","file_name":"HelloWorld.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28530411903","text":"import json\nimport requests\n\n\n\n# this is where you get your playlist Json from spotify\n# https://developer.spotify.com/documentation/web-api/reference/playlists/get-playlists-tracks/\n# run the curl command in same directory and save to file 'playlist.json'\n\n\nwith open('playlist.json') as data:\n data = json.load(data)\n\ntrackAndArtist = []\n# We need to get max size of playlist\nmaxSize = 48\nfor i in range(0, maxSize):\n trackName = data[\"items\"][i][\"track\"][\"name\"]\n trackArtist = data[\"items\"][i][\"track\"][\"album\"][\"artists\"][0][\"name\"]\n\n trackAndArtist.append(trackName + \" - \" + trackArtist)\n\ntrackAndArtist.sort()\nfor i in range(len(trackAndArtist)):\n print(trackAndArtist[i])\nprint(str(len(trackAndArtist)) + \" songs total\")\n\n\n\n# we output a list of strings of tracknames + artists here to be used in search.py\nwith open('youtubePlaylist.json', 'w') as outfile:\n json.dump(trackAndArtist, outfile)\n","repo_name":"MinhHo102/spotifyToYoutube","sub_path":"spotifyJson.py","file_name":"spotifyJson.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74330360022","text":"#_*_coding:utf-8_*_\r\n\r\n__author__ = 'zhaobin022'\r\nfrom monitor.settings import STATUS_DATA_OPTIMIZATION\r\nimport json\r\nimport time\r\nimport copy\r\n\r\nclass DataHandler(object):\r\n def __init__(self,client_id,service,data,REDIS_OBJ):\r\n self.client_id = client_id\r\n self.service_name = service.name\r\n self.service_check_interval = service.interval\r\n self.data = data\r\n self.redis_obj = REDIS_OBJ\r\n\r\n def get_redis_slice(self,redis_key , interval, service_check_interval):\r\n count = interval/service_check_interval\r\n count += 5\r\n ret_list = []\r\n redis_slice = self.redis_obj.lrange(redis_key,-count,-1)\r\n current_time = time.time()\r\n redis_slice.reverse()\r\n for s in redis_slice:\r\n data_point , timestamp = json.loads(s)\r\n if current_time - timestamp < interval:\r\n ret_list.append(s)\r\n else:\r\n break\r\n\r\n return ret_list\r\n\r\n def optimized_data(self,current_redis_key, redis_slice):\r\n one_data_point = redis_slice[0]\r\n one_data_point = json.loads(one_data_point)\r\n data = one_data_point[0]\r\n timestamp = one_data_point[1]\r\n temp_dic = {}\r\n if 'data' in data.keys():\r\n\r\n sub_data = data['data']\r\n for k,v_dic in sub_data.items():\r\n # k like lo or eth0\r\n temp_dic[k] = {}\r\n for sub_key , sub_val in v_dic.items():\r\n temp_dic[k][sub_key] = []\r\n ret_dic = copy.deepcopy(temp_dic)\r\n for p in redis_slice:\r\n p = json.loads(p)\r\n data , update_time = p\r\n for k,sub_dic in data['data'].items():\r\n # k like lo or eth0\r\n for sub_key,sub_val in sub_dic.items():\r\n temp_dic[k][sub_key].append(round(float(sub_val),2))\r\n\r\n for k,sub_dic in temp_dic.items():\r\n for sub_key,sub_val in sub_dic.items():\r\n avg_ret = self.get_avg(sub_val)\r\n max_ret = self.get_max(sub_val)\r\n min_ret = self.get_min(sub_val)\r\n mid_ret = self.get_mid(sub_val)\r\n ret_dic[k][sub_key] = [avg_ret,max_ret,min_ret,mid_ret]\r\n self.redis_obj.rpush(current_redis_key,json.dumps([{'data':ret_dic},time.time()] ))\r\n\r\n else:\r\n for key in data.keys():\r\n if key == 'status':continue\r\n temp_dic[key] = []\r\n\r\n ret = copy.deepcopy(temp_dic)\r\n for p in redis_slice:\r\n p = json.loads(p)\r\n point_data, update_time = p\r\n for k,v in point_data.items():\r\n if k == 'status':continue\r\n temp_dic[k].append(round(float(v),2))\r\n for k,v_list in temp_dic.items():\r\n avg_ret = self.get_avg(v_list)\r\n max_ret = self.get_max(v_list)\r\n min_ret = self.get_min(v_list)\r\n mid_ret = self.get_mid(v_list)\r\n ret[k] = [avg_ret,max_ret,min_ret,mid_ret]\r\n self.redis_obj.rpush(current_redis_key,json.dumps([ret,time.time()] ))\r\n\r\n def get_max(self,data_list):\r\n if len(data_list) > 0:\r\n return max(data_list)\r\n else:\r\n return 0\r\n\r\n def get_min(self,data_list):\r\n if len(data_list) > 0:\r\n return min(data_list)\r\n else:\r\n return 0\r\n\r\n def get_mid(self,data_list):\r\n if len(data_list) > 0:\r\n data_list.sort()\r\n return data_list[len(data_list)/2]\r\n\r\n def get_avg(self,data_list):\r\n if len(data_list) > 0:\r\n ret = sum(data_list)/len(data_list)\r\n ret = round(float(ret),2)\r\n return ret\r\n else:\r\n return 0\r\n\r\n def process(self):\r\n for time_range,time_range_list in STATUS_DATA_OPTIMIZATION.items():\r\n interval = time_range_list[0]\r\n count = time_range_list[1]\r\n current_redis_key = 'StatusData_%s_%s_%s' % (self.client_id,self.service_name,time_range)\r\n\r\n last_point_from_redis = self.redis_obj.lrange(current_redis_key,-1,-1)\r\n\r\n if not last_point_from_redis:\r\n self.redis_obj.rpush(current_redis_key,json.dumps([None,time.time()] ))\r\n continue\r\n last_point_from_redis = json.loads(last_point_from_redis[0])\r\n last_update_time = last_point_from_redis[1]\r\n if interval == 0:\r\n if self.data.has_key(\"data\"):\r\n if self.data[\"data\"]:\r\n self.redis_obj.rpush(current_redis_key,json.dumps([self.data,time.time()] ))\r\n else:\r\n self.redis_obj.rpush(current_redis_key,json.dumps([self.data,time.time()] ))\r\n else:\r\n if time.time() - last_update_time > interval:\r\n lastest_data_key_in_redis = \"StatusData_%s_%s_latest\" %(self.client_id,self.service_name)\r\n redis_slice = self.get_redis_slice(lastest_data_key_in_redis,interval,self.service_check_interval)\r\n if len(redis_slice)> 0:\r\n self.optimized_data(current_redis_key, redis_slice)\r\n while self.redis_obj.llen(current_redis_key) > count:\r\n self.redis_obj.lpop(current_redis_key)\r\n\r\n\r\n","repo_name":"zhaobin022/monitor","sub_path":"background/data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":5457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42439180560","text":"import sys\nimport os\nfrom sqlalchemy import create_engine\nimport pandas as pd\nfrom keras.preprocessing.text import Tokenizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import Pipeline\nimport multiprocessing\nimport pickle\n\n\ndef load_data(database_filepath):\n '''Uses sqlalchemy engine to load the data from sqlite, splits into X and\n Y, drops the child_alone column (due to lack of sample data), then returns\n X, Y, and the column names.'''\n\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql('messages_table', engine)\n X = df.iloc[:, 0:3]\n Y = df.iloc[:, 4:-1]\n Y.drop(columns=['child_alone'], inplace=True)\n category_names = Y.columns\n return X, Y, category_names\n\n\ndef tokenize(text, vocab_size):\n '''Loads the Keras tokenizer trained in train_classifier.py, tokenizes the\n text, and then returns the tokenized text.'''\n tokenizer = Tokenizer(num_words=vocab_size)\n tokenizer.fit_on_texts(text)\n encoded_docs = tokenizer.texts_to_matrix(text, mode='count')\n return tokenizer, encoded_docs\n\n\ndef build_model():\n '''Gets the number of available CPU's, threads the classifier accordingly\n builds the classifier pipeline, returns the pipeline'''\n num_cpus = multiprocessing.cpu_count()\n pipeline = Pipeline([('classifier', MultiOutputClassifier(\n RandomForestClassifier(n_estimators=40, n_jobs=num_cpus - 1,\n verbose=1)))])\n return pipeline\n\n\ndef evaluate_model(model, X_test, Y_test):\n '''Predicts the outcomes of Y_test and evaluates against X_test. Prints\n the outputs of the sklearn classification report.'''\n Y_pred = model.predict(X_test)\n for i in range(0, len(Y_test.iloc[0])):\n print(i)\n truth = Y_test.iloc[:,i:i+1]\n prediction = Y_pred[:,i:i+1]\n print(Y_test.columns[i], \"\\n\",\n classification_report(truth, prediction))\n\n\ndef run_grid_search(model, X_train, Y_train):\n '''Runs GridSearchCV to compare multiple model parameters against each\n other to identify the optimal configuration for accuracy'''\n parameters = [{'classifier__estimator__n_estimators': [50, 100],\n 'classifier__estimator__criterion': ['gini', 'entropy']}]\n cv = GridSearchCV(model, parameters, cv=5)\n cv.fit(X_train, y_train)\n means = cv.cv_results_['mean_test_score']\n stds = cv.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, cv.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n\n\ndef save_model(tok, model, model_filepath):\n '''Saves models to disk'''\n \n tokenizer_filename = \"tokenizer.pickle\"\n classifier_filename = \"classifier.pickle\"\n\n tokenizer_file = open(model_filepath + tokenizer_filename, 'wb')\n pickle.dump(tok, tokenizer_file)\n\n classifier_file = open(model_filepath + classifier_filename, 'wb')\n pickle.dump(model, classifier_file)\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n\n # The below can be uncommented for dynamic testing\n # database_filepath = \"data\\messages.db\"\n # model_filepath = ''\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n \n print('Tokenizing messages...')\n tok, X = tokenize(X['message'], 5000)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n print('CPU Count {}'.format(multiprocessing.cpu_count()))\n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test)\n\n print('Running GridSearchCV...')\n run_grid_search(model, X_train, Y_train)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(tok, model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sharpie-007/figure8pipeline","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"11657093523","text":"from utils import read_data_into_layers\n\n\ndef flood_fill_iteration(layers):\n\n def try_fill(x, y, z):\n # secondary effects FTW\n try:\n layers[z][y][x] = 1\n except IndexError:\n pass\n\n flood_coordinates = [] # we store them\n for z in layers:\n for y in range(0, len(layers[z])):\n for x in range(0, len(layers[z][y])):\n if layers[z][y][x] == 1:\n flood_coordinates.append((x, y, z))\n for flood_coordinate in flood_coordinates: # and then, whe use them.\n x, y, z = flood_coordinate\n try_fill(x + 1, y, z)\n try_fill(x - 1, y, z)\n try_fill(x, y + 1, z)\n try_fill(x, y - 1, z)\n try_fill(x, y, z - 1)\n\n\n return layers\n\n\nif __name__ == \"__main__\":\n layers = read_data_into_layers()\n\n flood_fill_iteration(layers)\n\n print(\"output:\")\n for height in layers:\n print()\n for row in layers[height]:\n print(\" \".join((str(coord) for coord in row)))\n","repo_name":"miguelemosreverte/Efficient-Programming","sub_path":"Python/naive/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12427635955","text":"urls = {\n 'index': \"http://xk.suda.edu.cn/\", # 首页\n 'home': \"http://xk.suda.edu.cn/default_szdx.aspx\", # 用户首页\n 'kb': \"http://xk.suda.edu.cn/xskbcx.aspx\", # 课表\n 'cj': \"http://xk.suda.edu.cn/xscjcx_dq.aspx\", # 成绩\n 'jh': \"http://xk.suda.edu.cn/pyjh.aspx\", # 培养计划(每年推荐选课)\n 'xk': \"http://xk.suda.edu.cn/xsxkqk.aspx\", # 选课情况\n 'ks': \"http://xk.suda.edu.cn/xskscx.aspx\" # 考试时间\n}\n\nurls_tao = {\n 'index': \"http://xk.liontao.xin/\", # 首页\n 'home': \"http://xk.liontao.xin/default_szdx.aspx\", # 用户首页\n 'kb': \"http://xk.liontao.xin/xskbcx.aspx\", # 课表\n 'cj': \"http://xk.liontao.xin/xscjcx_dq.aspx\", # 成绩\n 'jh': \"http://xk.liontao.xin/pyjh.aspx\", # 培养计划(每年推荐选课)\n 'xk': \"http://xk.liontao.xin/xsxkqk.aspx\", # 选课情况\n 'ks': \"http://xk.liontao.xin/xskscx.aspx\" # 考试时间\n}\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Host': \"xk.liontao.xin\"\n}\n\npost_data = {\n \"__VIEWSTATE\":\"\", # CSRF\n \"__EVENTTARGET\": \"\", # 修改字段名 固定\n \"__EVENTARGUMENT\": \"\", # 未知字段 固定\n \"ddlXN\": \"\", # 学年\n \"ddlXQ\": \"\", # 学期\n \"btnCx\": \" 查 询 \" # 按钮 固定\n}\n\nparams = {\n 'kb': {'xh': \"\", \"xm\": \"\", 'gnmkdm': \"N121603\"},\n 'cj': {'xh': \"\", \"xm\": \"\", 'gnmkdm': \"N121604\"},\n 'jh': {'xh': \"\", \"xm\": \"\", 'gnmkdm': \"N121607\"},\n 'xk': {'xh': \"\", \"xm\": \"\", 'gnmkdm': \"N121610\"},\n 'ks': {'xh': \"\", \"xm\": \"\", 'gnmkdm': \"N121615\"}\n}\n\n\ndef wttn():\n \"\"\"\n What's the time now?\n :return: int(%yymmdd)\n \"\"\"\n import datetime\n\n now = datetime.datetime.now()\n return int(str(now.year) + str(now.month) + str(now.day))\n\n\ndef headless_chrome():\n \"\"\"\n Chrome Headless Mode With Selenium\n :return: Selenium Chrome Driver\n \"\"\"\n from selenium import webdriver\n from selenium.webdriver.chrome.options import Options\n\n chrome_opt = Options()\n chrome_opt.add_argument('--headless')\n chrome_opt.add_argument('--disable-gpu')\n return webdriver.Chrome(chrome_options=chrome_opt)\n\n\ndef get_code(browser, filename):\n \"\"\"\n 在 browser 当前界面获取截图\n :return: Pillow Image\n \"\"\"\n from PIL import Image\n browser.save_screenshot(filename)\n code_ele = browser.find_element_by_id('icode')\n left = code_ele.location['x']\n top = code_ele.location['y']\n right = code_ele.location['x'] + code_ele.size['width']\n bottom = code_ele.location['y'] + code_ele.size['height']\n img = Image.open(filename).crop((left, top, right, bottom))\n img.save(filename, format='PNG')\n return img\n\n\ndef save_cookies(xh, cookies):\n import json\n import sys\n with open(sys.path[0] + '/cookies/' + xh + '.json', \"w\") as json_f:\n json.dump(cookies, json_f)\n\n\ndef read_cookies(xh):\n from json import load\n from sys import path\n from os.path import exists\n from os import stat\n from datetime import datetime\n import time\n cookie_path = path[0] + '/cookies/' + xh + '.json'\n if not exists(cookie_path):\n return False\n loc_t = datetime.now()\n modi_t = time.localtime(stat(cookie_path).st_mtime)\n modi_t = datetime(modi_t.tm_year, modi_t.tm_mon, modi_t.tm_mday, modi_t.tm_hour, modi_t.tm_min, modi_t.tm_sec)\n if (loc_t-modi_t).seconds > 1200:\n return False\n with open(cookie_path) as json_f:\n return load(json_f)\n\n\ndef get_referer(user, page_flag):\n from urllib import parse\n\n data = params[page_flag].copy()\n data['xh'] = user['xh']\n data['xm'] = user['xm']\n return urls_tao[page_flag] + '?' + parse.urlencode(data, encoding='gbk')\n\n\ndef save_to_csv(filename, header, table):\n import csv\n with open(filename, \"w\", encoding='utf-8-sig') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n writer.writerows(table)\n\n\ndef login_prepare(capt_path):\n \"\"\"\n 获取验证码截图、当时 Cookies 以及 CSRF\n :param capt_path: 验证码存储位置\n :return: (csrf, cookies)\n \"\"\"\n from bs4 import BeautifulSoup\n browser = headless_chrome()\n browser.get(urls_tao['index'])\n get_code(browser, capt_path)\n chrome_cookies = browser.get_cookies()\n html = browser.page_source\n browser.close()\n cookies = {}\n for cookie in chrome_cookies:\n cookies[cookie['name']] = cookie['value']\n csrf = BeautifulSoup(html, 'lxml').find(\"input\", type=\"hidden\")['value']\n return csrf, cookies\n\n\ndef init_session(form):\n import json\n import requests\n session = requests.Session()\n requests.utils.cookiejar_from_dict(json.loads(form['xk_cookies']), session.cookies)\n headers = {\n 'Host': 'xk.liontao.xin',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Referer': 'http://xk.liontao.xin/',\n 'Content-Type': 'application/x-www-form-urlencoded',\n }\n data = {\n '__VIEWSTATE': form['xk_csrf'],\n 'Button1': '',\n 'TextBox1': form['xh'],\n 'TextBox2': form['pwd'],\n 'TextBox3': form['auth']\n }\n res = session.post(urls_tao['home'], headers=headers, data=data)\n return res.text, session\n\n\ndef login_prepare_splash(capt_path):\n from urllib.parse import quote\n import requests\n import base64\n with open('xk_crawler/login_prepare.lua') as lua_f:\n lua_source = lua_f.read()\n url = 'http://wzhzzmzzy.xyz:8888/execute?lua_source=' + quote(lua_source)\n res = requests.get(url).json()\n with open(capt_path, 'wb') as capt_f:\n capt_f.write(base64.b64decode(res['icode']))\n cookies = dict(((i['name'], i['value']) for i in res['cookies']))\n return res['csrf'], cookies\n\n\nif __name__ == '__main__':\n login_prepare_splash('app/static/test.png')\n","repo_name":"wzhzzmzzy/Amber.com","sub_path":"xk_crawler/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6005,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"5326062933","text":"import requests\n\n#UA 伪装\nif __name__ == '__main__':\n url = \"https://www.sogou.com/web\"\n kw = input('enter a word:')\n param = {\n 'query': kw\n }\n headers ={\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n }\n response = requests.get(url, param,headers = headers)\n print(response.text)\n fileName = kw+'.html'\n with open(fileName, 'w', encoding='utf-8') as fp:\n fp.write(response.text)\n print(\"end...\")\n","repo_name":"li-xiansheng/python-crawler-practice","sub_path":"爬虫/requests模块/requests_sogo_params.py","file_name":"requests_sogo_params.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"16155397779","text":"from django import template\nfrom star_ratings.models import Rating\n\nregister = template.Library()\n\n\n@register.filter\ndef color(a):\n if a < 2:\n return 'grey'\n\n elif a == 2:\n return 'yellow'\n\n elif a == 3:\n return 'green'\n\n\n@register.filter\ndef rating_product(pk):\n try:\n star = Rating.objects.get(object_id=pk)\n return star.average\n except Exception as e:\n return 0\n","repo_name":"cardapioz/cardapioz","sub_path":"core/templatetags/cardapioz_extras.py","file_name":"cardapioz_extras.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"4107094908","text":"# Useless REST Server! \n# Get last 10 requests (only GET/POST) to the server.\n# Formate: GET/POST | PATH | BODY | Time\n# Set server port by setting URS_PORT environment variable\n# Based on https://blog.anvileight.com/posts/simple-python-http-server/\n\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\n\nfrom io import BytesIO\nimport time\nimport os\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n log_list = []\n\n def store_log(self, method, path, body=b''):\n self.log_list.append(\n \"{} | {} | {} | {}\".format(\n method,\n path,\n body.decode('utf-8'),\n time.time()\n ))\n self.log_list = self.log_list[-10:]\n \n def get_logs(self):\n rstr = \"\\n\".join(self.log_list)\n return rstr.encode('utf-8')\n\n def do_GET(self):\n self.send_response(200)\n self.end_headers()\n self.store_log('GET', self.path)\n self.wfile.write(self.get_logs())\n\n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n self.send_response(200)\n self.end_headers()\n self.store_log('POST', self.path, body)\n self.wfile.write(self.get_logs())\n\n\n\nport = int(os.getenv('URS_PORT', 8000))\n\n\n\nprint('<<>>')\nprint('Start server port={}'.format(port))\nhttpd = HTTPServer(('', port), SimpleHTTPRequestHandler)\nhttpd.serve_forever()\n","repo_name":"AbdManian/useless-rest-server","sub_path":"urs.py","file_name":"urs.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40032843746","text":"from typing import Any, List, Type\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models.query import QuerySet\nfrom django.http import HttpRequest, HttpResponse\nfrom django.views import View\n\nfrom .csv import BaseQuerySetWriter, BulkQuerySetWriter, write_csv\nfrom .models import CsvDownload\nfrom .settings import MAX_ROWS\nfrom .types import OptionalSequence\n\n\ndef download_csv(\n user: settings.AUTH_USER_MODEL,\n filename: str,\n queryset: QuerySet,\n *columns: str,\n header: bool = True,\n max_rows: int = MAX_ROWS,\n column_headers: OptionalSequence = None,\n writer_klass: Type[BaseQuerySetWriter] = BulkQuerySetWriter,\n **writer_kwargs: Any,\n) -> HttpResponse:\n \"\"\"Download queryset as a CSV.\"\"\"\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n row_count = write_csv(\n response,\n queryset,\n *columns,\n header=header,\n max_rows=max_rows,\n column_headers=column_headers,\n writer_klass=writer_klass,\n **writer_kwargs,\n )\n response[\"X-Row-Count\"] = row_count\n CsvDownload.objects.create(\n user=user,\n row_count=row_count,\n filename=filename,\n columns=\", \".join(columns),\n )\n return response\n\n\nclass CsvDownloadView(View):\n \"\"\"CBV for downloading CSVs.\"\"\"\n\n writer_klass = BulkQuerySetWriter\n\n def get_writer_klass(self) -> Type[BaseQuerySetWriter]:\n # Override to provide a different writer\n return self.writer_klass\n\n def get_writer_kwargs(self) -> dict:\n # custom kwargs for initialising the writer\n return {}\n\n def has_permission(self, request: HttpRequest) -> bool:\n \"\"\"Return True if the user has permission to download this file.\"\"\"\n return True\n\n def get_max_rows(self, request: HttpRequest) -> int:\n \"\"\"Override to set custom MAX_ROWS on a per-request basis.\"\"\"\n return MAX_ROWS\n\n def add_header(self, request: HttpRequest) -> bool:\n \"\"\"Return True to include header row in CSV.\"\"\"\n return True\n\n def get_user(self, request: HttpRequest) -> settings.AUTH_USER_MODEL:\n \"\"\"\n Return the user against whom to record the download.\n\n This is provided for cases where the request.user may not be the\n user you wish to record the download against. Impersonation is the\n canonical use case for overriding this.\n\n \"\"\"\n return request.user\n\n def get_filename(self, request: HttpRequest) -> str:\n \"\"\"Return download filename.\"\"\"\n raise NotImplementedError\n\n def get_columns(self, request: HttpRequest) -> List[str]:\n \"\"\"Return columns to extract from the queryset.\"\"\"\n raise NotImplementedError\n\n def get_column_headers(self, request: HttpRequest) -> List[str]:\n \"\"\"Return column headers to apply to the CSV.\"\"\"\n return self.get_columns(request)\n\n def get_queryset(self, request: HttpRequest) -> QuerySet:\n \"\"\"Return the data to be downloaded.\"\"\"\n raise NotImplementedError\n\n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Download data as CSV.\"\"\"\n if not self.has_permission(request):\n raise PermissionDenied\n\n return download_csv(\n self.get_user(request),\n self.get_filename(request),\n self.get_queryset(request),\n *self.get_columns(request),\n header=self.add_header(request),\n max_rows=self.get_max_rows(request),\n column_headers=self.get_column_headers(request),\n writer_klass=self.get_writer_klass(),\n **self.get_writer_kwargs(),\n )\n","repo_name":"yunojuno/django-csv-downloads","sub_path":"django_csv/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4931189767","text":"import abc\nimport json\nfrom monasca_common.kafka_lib.client import KafkaClient\nfrom monasca_common.kafka_lib.producer import SimpleProducer\nfrom monasca_common.simport import simport\nfrom oslo_config import cfg\n\n\nclass MessageAdapter(object):\n\n @abc.abstractmethod\n def do_send_metric(self, metric):\n raise NotImplementedError(\n \"Class %s doesn't implement do_send_metric(self, metric)\"\n % self.__class__.__name__)\n\n\nclass KafkaMessageAdapter(MessageAdapter):\n\n adapter_impl = None\n\n def __init__(self):\n client_for_writing = KafkaClient(cfg.CONF.messaging.brokers)\n self.producer = SimpleProducer(client_for_writing)\n self.topic = cfg.CONF.messaging.topic\n\n @staticmethod\n def init():\n # object to keep track of offsets\n KafkaMessageAdapter.adapter_impl = simport.load(\n cfg.CONF.messaging.adapter)()\n\n def do_send_metric(self, metric):\n self.producer.send_messages(\n self.topic,\n json.dumps(metric, separators=(',', ':')))\n return\n\n @staticmethod\n def send_metric(metric):\n if not KafkaMessageAdapter.adapter_impl:\n KafkaMessageAdapter.init()\n KafkaMessageAdapter.adapter_impl.do_send_metric(metric)\n\n\nclass KafkaMessageAdapterPreHourly(MessageAdapter):\n\n adapter_impl = None\n\n def __init__(self):\n client_for_writing = KafkaClient(cfg.CONF.messaging.brokers)\n self.producer = SimpleProducer(client_for_writing)\n self.topic = cfg.CONF.messaging.topic_pre_hourly\n\n @staticmethod\n def init():\n # object to keep track of offsets\n KafkaMessageAdapterPreHourly.adapter_impl = simport.load(\n cfg.CONF.messaging.adapter_pre_hourly)()\n\n def do_send_metric(self, metric):\n self.producer.send_messages(\n self.topic,\n json.dumps(metric, separators=(',', ':')))\n return\n\n @staticmethod\n def send_metric(metric):\n if not KafkaMessageAdapterPreHourly.adapter_impl:\n KafkaMessageAdapterPreHourly.init()\n KafkaMessageAdapterPreHourly.adapter_impl.do_send_metric(metric)\n","repo_name":"geshuro/monasca-transform","sub_path":"monasca_transform/messaging/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"29743760437","text":"# 전구와 스위치\n\nimport sys\nfrom copy import deepcopy\n\n# 첫번째 전구를 켜는 경우와 그렇지 않은 경우를 두가지로 나누어서 계산해본다\ninput = sys.stdin.readline\nn = int(input())\ncurr = list(map(int,input().rstrip()))\ntarget = list(map(int,input().rstrip()))\nans1, ans2 = 0,1\nif n > 2 :\n curr_2 = [1-k for k in curr[0:2]] + curr[2:]\nelif n == 2 : \n curr_2 = [1-k for k in curr[0:2]]\nfor i in range(1,n-1) : \n if curr[i-1] != target[i-1] :\n curr[i-1:i+2] = [1-k for k in curr[i-1:i+2]]\n ans1 += 1\n if curr_2[i-1] != target[i-1] :\n curr_2[i-1:i+2] = [1-k for k in curr_2[i-1:i+2]]\n ans2 += 1\nif curr[n-2] != target[n-2] : \n curr[n-2:] = [1-k for k in curr[n-2:]]\n ans1 += 1\nif curr_2[n-2] != target[n-2] : \n curr_2[n-2:] = [1-k for k in curr_2[n-2:]]\n ans2 += 1\nif curr != target : \n ans1 = sys.maxsize\nif curr_2 != target : \n ans2 = sys.maxsize\nfinal_ans = min(ans1,ans2)\nif final_ans == sys.maxsize : \n print(-1)\nelse : \n print(final_ans)","repo_name":"thomas783/coding_test","sub_path":"baekjoon/greedy/2138.py","file_name":"2138.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28265398309","text":"from itertools import chain\r\nfrom algoritmia.utils import infinity, min\r\nfrom algoritmia.problems.generalizedcoinchange.dynamicprogramming3 import IterativeDynamicCoinChanger\r\n\r\nclass SpaceSavingDynamicCoinChanger(IterativeDynamicCoinChanger):#[class\r\n def weight(self, Q: \"int\") -> \"Real\":\r\n current = [0] + [infinity] * Q\r\n previous = [None] * (Q+1)\r\n for n in range(1, self.n+1):\r\n previous, current = current, previous\r\n for q in range(Q+1):\r\n current[q] = min((previous[q-i*self.v[n-1]] + i*self.w[n-1] \\\r\n for i in range(q//self.v[n-1]+1)), ifempty=infinity)\r\n return current[Q]#]class\r\n","repo_name":"DavidLlorens/algoritmia1","sub_path":"src/algoritmia/problems/generalizedcoinchange/dynamicprogramming4a.py","file_name":"dynamicprogramming4a.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"70021231061","text":"import sys\nfrom itertools import product\n\n# Read data\ninput_file = sys.argv[1]\nwith open(input_file) as f:\n data = f.read()\n\n# Parse instructions\ninstructions = []\nfor line in data.split('\\n'):\n if len(line) == 0:\n pass\n else:\n instr, arg = line.split(\" = \")\n if instr.startswith(\"mask\"):\n instructions.append((\"mask\", arg))\n elif instr.startswith(\"mem\"):\n address = int(instr[len(\"mem[\"):-1])\n value = int(arg)\n instructions.append((\"mem\", (address, value)))\n else:\n raise ValueError(\"Invalid instruction: {}\".format(instr))\n\n# Part 1: apply bit masks to the values written to memory\nmask_dontcares = 0\nmask_set = 0\nmem = {}\nfor instr, arg in instructions:\n if instr == \"mask\":\n mask_dontcares = 0\n mask_set = 0\n for i, c in enumerate(arg[::-1]):\n if c == 'X':\n mask_dontcares |= 1 << i\n else:\n mask_set |= int(c) << i\n elif instr == \"mem\":\n address, value = arg\n value &= mask_dontcares\n value |= mask_set\n mem[address] = value\n else:\n pass\n\nsum_of_data = sum(mem.values())\nprint(f\"Sum: {sum_of_data}\")\n\n# Part 2: apply the bit masks to the memory address\nmask_dontcares = [(0, 0)]\nmask_set = 0\nmem = {}\nfor instr, arg in instructions:\n if instr == \"mask\":\n masked_idx = []\n mask_set = 0\n for i, c in enumerate(arg[::-1]):\n if c == 'X':\n masked_idx.append(i)\n else:\n mask_set |= int(c) << i\n\n mask_dontcares = []\n for mask in product([0, 1], repeat=len(masked_idx)):\n m_reset = 0\n m_permute = 0\n for j, k in zip(masked_idx, mask):\n m_reset |= 1 << j\n m_permute |= k << j\n mask_dontcares.append((m_reset, m_permute))\n elif instr == \"mem\":\n address, value = arg\n address |= mask_set\n for m_reset, m_permute in mask_dontcares:\n address &= ~m_reset\n address |= m_permute\n mem[address] = value\n else:\n pass\n\nsum_of_data = sum(mem.values())\nprint(f\"Sum: {sum_of_data}\")\n","repo_name":"lcvisser/adventofcode","sub_path":"2020/14/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"69816914583","text":"from itertools import count, cycle\n\ndef func_count():\n number_start = int(input('C какого числа начинать список? '))\n number_final = int(input('До какого числа продолжить список? '))\n for el in count(number_start):\n if el > number_final:\n break\n else:\n print(el)\n\ndef func_cycle():\n foo = input('Введите элементы списка:')\n bar = int(input('Введите число, сколько раз будем перебрать элементы списка: '))\n с = 0\n for el in cycle(foo):\n if с > bar:\n break\n print(el)\n с += 1\n\n\nprint('Итератор 1: Генерирует целые числа, начиная с указанного\\nИтератор 2: Повторяет элементы списка.' )\nchoice = input('Введите 1, для запуска первого итератора или\\nВведите 2, для запуска второго итератора: ')\n\nif choice == '1':\n func_count()\n\nelif choice == '2':\n func_cycle()\n\nelse:\n print('Ошибка! Нужно ввести 1 или 2.')\n\n\n# done\n","repo_name":"Maxim-chernets/geekbrains_HW","sub_path":"lesson4/task_6.py","file_name":"task_6.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23852687430","text":"\nclass Node:\n def __init__(self):\n pass\nn1 = Node()\nn1.val = 1\nn1.num = 0\nn1.neighbors = set([3,1,2])\n\nn2 = Node()\nn2.val = 4\nn2.num = 1\nn2.neighbors = set([3,0,2])\n\nn3 = Node()\nn3.val = 6\nn3.num = 2\nn3.neighbors = set([1,0])\n\nn4 = Node()\nn4.val = 2\nn4.num = 3\nn4.neighbors = set([1,0])\n\nG = {0:n1, 1:n2, 2:n3, 3:n4}\nimport math\ndef pathsp(graph,n1,n2):\n paths = []\n def dfs(n, curpath):\n nonlocal paths\n nonlocal n2\n nonlocal graph\n if n.num == n2.num:\n curpath.append(n2.num)\n paths.append(curpath)\n return\n curpath.append(n.num)\n temp = n.val\n n.val = float('nan')\n for neigh in n.neighbors:\n if not math.isnan(graph[neigh].val): dfs(graph[neigh], curpath.copy())\n n.val = temp\n dfs(n1,[])\n for path in paths: print(path)\n\npathsp(G,n1,n4)","repo_name":"jptboy/interviewpractice","sub_path":"allpaths.py","file_name":"allpaths.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6823126038","text":"import copy\n\nrow = int(input())\ncol = int(input())\npos = []\nfor i in range(row):\n tmp = [j for j in input().split(' ')]\n pos.append(copy.deepcopy(tmp))\nprint(pos)\n# for tmp in pos:\n# print(tmp)\nhor = []\nver = []\nfor r in range(row):\n for c in range(col):\n if(pos[r][c]=='1'):\n hor.append(r)\n ver.append(c)\n\nhor.sort()\nver.sort()\nmid = len(hor)//2\nmeetingPoint = [int(hor[mid]),int(ver[mid])]\n\ndistance = 0\nfor ro in range(row):\n for co in range(col): \n if (pos[ro][co] == '1'):\n distance += abs(meetingPoint[0] - ro) + abs(meetingPoint[1] - co); \nprint(distance)\n\n\n \n\n ","repo_name":"Sanchi02/Dojo","sub_path":"PracticeComp/2DMeeting.py","file_name":"2DMeeting.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"11639511852","text":"from . scene import *\nfrom core.controller.menu_controller import *\nfrom core.resource_manager import *\nfrom core.score_database import *\n\nclass MainMenu(Scene):\n\n\tdef __init__(self):\n\t\tsuper(MainMenu, self).__init__()\n\t\tself.__menu_controller = MenuController()\n\t\tself.add_controller(self.__menu_controller)\n\t\tself.set_camera(Camera())\n\t\tself.__font = self.__font = ResourceManager.get_instance().get_font(\"fonts/Roboto-Regular\", 50)\n\t\tself.__music = ResourceManager.get_instance().get_music(\"music/main_menu_theme\")\n\t\tself.__music.play(-1, 0)\n\n\tdef __del__(self):\n\t\tself.__music.stop()\n\n\tdef draw(self, screen):\n\t\timage = ResourceManager.get_instance().get_image(\"graphics/menu_prompt\")\n\t\tscreen.blit(image,(0,0))\n\t\tsuper(MainMenu, self).draw(screen)\n\t\ttext = self.__font.render(\"Top Score:\", False, (255, 255, 255))\n\t\ttext_rect = text.get_rect()\n\t\tscore_value = ScoreDatabase.get_instance().get_scores()[0]\n\t\tscore = self.__font.render(str(score_value), False, (255, 255, 255))\n\t\tscore_rect = score.get_rect()\n\t\tx_offset = (text_rect.width - score_rect.width)/2\n\t\tscreen.blit(text, (80, 40))\n\t\tscreen.blit(score, (80+x_offset, 110))\n","repo_name":"SirDavidLudwig/StarBlasters","sub_path":"src/gui/scene/main_menu.py","file_name":"main_menu.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"43040010801","text":"# -*- coding: utf-8 -*-\nimport json\nimport uuid\nimport datetime\nimport logging\nfrom ..utilities.parsing import try_parse, normalize_mac_address\n\n\nFIXEDIO = 'fixedIO'\nFIRMWARE_UPLOAD = 'ftpFwUpd'\nFIRMWARE_STATUS = 'ftpFwUpdEventCode'\nWIFI = 'wifi'\nBLE = 'ble'\nSENSORDATA = 'sensorData'\nSENSORSECURITYEVENT = 'sensorSecurityEvent'\nSENSORSECURITYEVENTCODE = 'sensorSecurityEventCode'\nSENSORVALUES = 'sensorValues'\nSENSORNAME = 'sensorName'\nSENSORSTATUSCODE = 'sensorStatusCode'\n\n\nclass UnAuthenticatedSensor(Exception):\n def __init__(self, id):\n super(Exception, self).__init__()\n self.id = id\n\n\nclass FirmwareUploadException(Exception):\n def __init__(self, key, point, gateway_name):\n super(Exception, self).__init__()\n self.key = key\n self.point = point\n self.gateway = gateway_name\n\n\nclass DataPointFactory(object):\n \"\"\"\n Converts to a new point\n \"\"\"\n def __init__(self, db, table):\n self.db = db\n self.table = table\n self.firmware_upload_table = table\n\n def set_firmware_upload_table(self, table):\n self.firmware_upload_table = table\n\n def from_mqtt_message(self, message):\n \"\"\"\n From mosquitto message payload\n \"\"\"\n msg = message.payload.decode('utf-8')\n messages_parsed = self.from_json_string(msg)\n\n for i in range(len(messages_parsed)):\n messages_parsed[i].topic = message.topic\n\n return messages_parsed\n\n def from_json_string(self, msg):\n \"\"\"\n From JSON message payload\n \"\"\"\n decoded_msg = json.loads(msg)\n messages = []\n for key, message in decoded_msg.items():\n key = normalize_mac_address(key.replace('NMS_', ''))\n gateway_name = key \n logging.debug(f'key: {key} message: {message}')\n parsed_msg = None\n if FIXEDIO in message:\n parsed_msg = message[FIXEDIO]\n key = 'gateway_%s' % key\n elif WIFI in message:\n parsed_msg, key = self.parse_json_from_smart_sensor(WIFI, message, key)\n elif BLE in message:\n parsed_msg, key = self.parse_json_from_smart_sensor(BLE, message, key)\n elif FIRMWARE_UPLOAD in message:\n parsed_msg, key = self.parse_json_from_firmware(message, key, gateway_name)\n else:\n raise Exception('Message format is not recognized')\n\n if FIRMWARE_UPLOAD in message:\n point = FirmwareUpdateStatus(self.db, self.firmware_upload_table, parsed_msg) \n point.gateway = gateway_name\n point.sensor_id = key\n else:\n point = DataPoint(self.db, self.table, parsed_msg)\n point.gateway = gateway_name\n point.sensor_id = key\n messages.append(point)\n return messages\n\n def parse_json_from_firmware(self, message, key, gateway_name):\n obj = message[FIRMWARE_UPLOAD]\n if SENSORNAME in obj:\n key = 'device_%s_%s' % (key, obj[SENSORNAME])\n else:\n key = 'gateway_%s' % key\n\n if obj[FIRMWARE_STATUS] == 1 or obj[FIRMWARE_STATUS] == 2:\n return obj, key\n else:\n logging.debug(f'Firmware error for \"{gateway_name}\".\"{key}\"')\n raise FirmwareUploadException(key, obj, gateway_name)\n\n def parse_json_from_smart_sensor(self, connection_type, message, key):\n if SENSORSECURITYEVENT in message[connection_type]:\n key = 'device_%s_%s' % (key, message[connection_type][SENSORSECURITYEVENT][SENSORNAME])\n if message[connection_type][SENSORSECURITYEVENT][SENSORSECURITYEVENTCODE] == 1:\n raise UnAuthenticatedSensor(key)\n else:\n key = 'device_%s_%s' % (key, message[connection_type][SENSORDATA][SENSORNAME])\n\n if message[connection_type][SENSORDATA][SENSORSTATUSCODE] == 2:\n raise UnAuthenticatedSensor(key)\n\n obj = {}\n for values in message[connection_type][SENSORDATA][SENSORVALUES]:\n obj[values['name']] = try_parse(values['value'])\n return obj, key\n\nclass DataPoint:\n def __init__(self, db, table, message):\n self.id = uuid.uuid4()\n self.db = db\n self.table = table\n self.message = message\n self.timestamp = datetime.datetime.utcnow().isoformat()\n self.gateway = None\n self.topic = None\n self.sensor_id = None\n\n def _event_type(self):\n if self.topic is None:\n return ''\n return self.topic.replace('/', '.')\n\n\nclass FirmwareUpdateStatus(DataPoint):\n def __init__(self, db, table, message):\n super().__init__(db, table, message)\n","repo_name":"charIoT-h2020/chariot-base","sub_path":"chariot_base/model/point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"33525669105","text":"\"\"\"Base Game class.\"\"\"\n\nimport pygame\n\nfrom . import scene\n\n\nclass Game:\n \"\"\"Base class for implementing specific game instances.\"\"\"\n\n FPS = 60\n\n def __init__(self, screen_width: int, screen_height: int, name: str,\n icon: pygame.Surface = None):\n pygame.init()\n self.__screen_width = screen_width\n self.__screen_height = screen_height\n self.__name = name\n\n self.screen = pygame.display.set_mode((screen_width, screen_height))\n self.screen_rect = self.screen.get_rect()\n pygame.display.set_caption(name)\n if icon is not None:\n pygame.display.set_icon(icon)\n self.scene_manager = scene.SceneManager()\n self.clock = pygame.time.Clock()\n\n def add_scene(self, scene_id, scene):\n \"\"\"Adds scene to game.\"\"\"\n\n self.scene_manager.add(scene_id, scene)\n\n def set_initial_view(self, scene_id):\n \"\"\"Set the initial scene of the game\"\"\"\n\n self.scene_manager.initial_view(scene_id)\n\n def start(self) -> None:\n \"\"\"Main loop of the game.\"\"\"\n\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n self.scene_manager.update_on_event(event)\n\n self.scene_manager.show()\n self.scene_manager.update()\n\n pygame.display.update()\n self.clock.tick(self.FPS)\n pygame.quit()\n\n @property\n def name(self) -> str:\n \"\"\"Get game's name.\"\"\"\n\n return self.__name\n\n @property\n def width(self) -> int:\n \"\"\"Get the game screen width.\"\"\"\n\n return self.__screen_width\n\n @property\n def height(self) -> int:\n \"\"\"Get the game screen height.\"\"\"\n\n return self.__screen_height\n","repo_name":"smolBlackCat/python-game-engine","sub_path":"src/basic_engine/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74726142101","text":"start: int=None\nend: int=None\nosszeg: int=0\nszorzat:int=1\n\nprint(\"Kérem a kezdő és a végző értékeket!\")\nstart=int(input())\nend=int(input())\n\nif(start % 2 == 0):\n for i in range(start, end + 1, 2):\n osszeg=osszeg+i\n for i in range(start + 1, end + 1, 2):\n szorzat=szorzat*i\nelse:\n for i in range(start + 1, end + 1, 2):\n osszeg=osszeg+i\n for i in range(start, end + 1, 2):\n szorzat=szorzat*i\nprint(osszeg)\nprint(szorzat)","repo_name":"jamaikaivasutas/python-1","sub_path":"07 - For Ciklus/Feladat 11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"8987021044","text":"import json\nfrom .models import *\n\n\ndef cookieCart(request):\n try:\n cart = json.loads(request.COOKIES['cart'])\n print(\"Cart: \", cart)\n except:\n cart = {}\n order = {'get_cart_total': 0, 'get_cart_items': 0, 'shipping': False}\n cart_items = order['get_cart_items']\n orderitems = []\n\n for i in cart:\n try:\n cart_items += cart[i]['quantity']\n\n product = Product.objects.get(id=i)\n total = product.price * cart[i]['quantity']\n\n order['get_cart_total'] += total\n order['get_cart_items'] += cart[i]['quantity']\n\n item = {\n 'product': {\n 'id': product.id,\n 'name': product.name,\n 'price': product.price,\n 'imageURL': product.imageURL,\n },\n 'quantity': cart[i]['quantity'],\n 'get_total': total,\n }\n orderitems.append(item)\n\n if not product.digital:\n order['shipping'] = True\n\n except:\n pass\n\n return {\n 'order': order,\n 'orderitems': orderitems,\n 'cart_items': cart_items,\n }\n\n\ndef cartData(request):\n if request.user.is_authenticated:\n order, created = Order.objects.get_or_create(customer=request.user.customer, complete=False)\n cart_items = order.get_cart_items\n orderitems = order.orderitem_set.all()\n else:\n cookieData = cookieCart(request)\n order = cookieData['order']\n cart_items = cookieData['cart_items']\n orderitems = cookieData['orderitems']\n return {\n 'order': order,\n 'orderitems': orderitems,\n 'cart_items': cart_items,\n }\n\ndef guestOrder(data, request):\n print('User is not logged in')\n\n name = data['form']['name']\n email = data['form']['email']\n\n cookieData = cookieCart(request)\n items = cookieData['orderitems']\n\n customer, created = Customer.objects.get_or_create(\n email=email\n )\n customer.name = name\n customer.save()\n\n order = Order.objects.create(\n customer=customer,\n complete=False\n )\n\n for item in items:\n product = Product.objects.get(id=item['product']['id'])\n\n order_item = OrderItem.objects.create(\n product=product,\n order=order,\n quantity=item['quantity'],\n )\n return customer, order\n","repo_name":"wyatt-h/ecommerce_cp","sub_path":"store/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25687089888","text":"import heapq\nimport sys\n\ninput = sys.stdin.readline\n\nt = int(input())\nfor _ in range(t):\n k = int(input())\n heap = list(map(int, input().split()))\n heapq.heapify(heap)\n ans = 0\n while len(heap) > 1:\n temp1 = heapq.heappop(heap)\n temp2 = heapq.heappop(heap)\n ans += temp1 + temp2\n heapq.heappush(heap, temp1 + temp2)\n print(ans)","repo_name":"KINHYEONJI/mad-algorithm","sub_path":"SEPTEMBER/SHLEE/BOJ13975.py","file_name":"BOJ13975.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"70147681301","text":"\"\"\"\nAn example script to demonstrate the use of the s3_download function to download file,\nfrom an S3 bucket.\n\"\"\"\n\nimport tamr_toolbox as tbox\nimport boto3\n\ns3_client = boto3.client(\"s3\")\n\n# download file from AWS S3\n# download a file on GCS \"s3://my-bucket/path-to-file\" to \"my_local_directory/my_file.txt\"\ntbox.filesystem.cloud.s3_download(\n cloud_client=s3_client,\n source_filepath=\"path-to-file\",\n destination_filepath=\"my_local_directory/my_file.txt\",\n bucket_name=\"my-bucket\",\n)\n","repo_name":"Datatamer/tamr-toolbox","sub_path":"examples/snippets/filesystem/cloud/s3_download.py","file_name":"s3_download.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"12"} +{"seq_id":"29626906931","text":"# processes = [\n# {\"name\": \"P1\", \"arrival\": 1, \"burst\": 3, \"remain\": 3, \"start\": \"Unset\",\n# \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 1},\n# {\"name\": \"P2\", \"arrival\": 3, \"burst\": 7, \"remain\": 7, \"start\": \"Unset\",\n# \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 1},\n# {\"name\": \"P3\", \"arrival\": 5, \"burst\": 1, \"remain\": 1, \"start\": \"Unset\",\n# \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 1},\n# {\"name\": \"P4\", \"arrival\": 11, \"burst\": 5, \"remain\": 5, \"start\": \"Unset\",\n# \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 1},\n# {\"name\": \"P5\", \"arrival\": 13, \"burst\": 2, \"remain\": 2, \"start\": \"Unset\",\n# \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 1}]\n#\n# control = ['--', 'P1', 'P1', 'P2', 'P2', 'P1', 'P3', 'P2', 'P2', 'P2',\n# 'P2', 'P4', 'P4', 'P2', 'P5', 'P5', 'P4', 'P4', 'P4', '--']\n\nprocesses = [\n {\"name\": \"P5\", \"arrival\": 1, \"burst\": 7, \"remain\": 7, \"start\": \"Unset\",\n \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 0},\n {\"name\": \"P1\", \"arrival\": 6, \"burst\": 6, \"remain\": 6, \"start\": \"Unset\",\n \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 1},\n {\"name\": \"P2\", \"arrival\": 9, \"burst\": 5, \"remain\": 5, \"start\": \"Unset\",\n \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 2},\n {\"name\": \"P3\", \"arrival\": 11, \"burst\": 2, \"remain\": 2, \"start\": \"Unset\",\n \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 5},\n {\"name\": \"P4\", \"arrival\": 12, \"burst\": 4, \"remain\": 4, \"start\": \"Unset\",\n \"last\": 0, \"end\": \"Unset\", \"segments\": 1, \"empty\": 7}]\n\ncontrol = ['P1', 'P1', 'P2', 'P2', 'P3', 'P1', 'P1', 'P4', 'P4', 'P5',\n 'P5', 'P1', 'P1', 'P4', 'P5', 'P5', '--', '--', '--', '--']\n\nactive = []\ntemp = []\nrunning_queue = []\nwait_queue = []\nfinal_queue = []\n\ndone = 0\ncurrent_time = 0\nquantum = 2\n\n\ndef load_active():\n if wait_queue[0][\"start\"] == \"Unset\":\n wait_queue[0][\"start\"] = current_time\n wait_queue[0][\"last\"] = current_time\n active.append(wait_queue[0])\n wait_queue.pop(0)\n\n\ndef unload_active():\n active[0][\"end\"] = current_time\n running_queue.append(active[0])\n active.pop(0)\n\n\nwhile not done:\n for p in processes:\n if p[\"arrival\"] == current_time:\n wait_queue.append(p)\n\n if active and active[0][\"remain\"] > 0:\n active[0][\"remain\"] -= 1\n if active[0][\"remain\"] == 0:\n unload_active()\n if active and active[0][\"last\"] == current_time - quantum:\n active[0][\"segments\"] += 1\n active[0][\"empty\"] = current_time\n temp = active[0]\n active.pop(0)\n\n if active and active[0][\"remain\"] == 0:\n unload_active()\n\n if not active and not wait_queue:\n if temp:\n wait_queue.insert(0, temp)\n temp = []\n\n if not active and wait_queue:\n if temp:\n if temp[\"empty\"] < wait_queue[0][\"arrival\"]:\n wait_queue.insert(0, temp)\n else:\n wait_queue.append(temp)\n temp = []\n if wait_queue[0][\"arrival\"] <= current_time:\n load_active()\n\n if active:\n final_queue.append(active[0][\"name\"])\n else:\n final_queue.append(\"--\")\n\n print(\"Time:\", current_time, end=\" \\t> \")\n if active:\n print(active[0][\"name\"])\n else:\n print(\"--\")\n\n current_time += 1\n\n if current_time == 40:\n done = 1\n\nresponse = []\nturnaround = []\nwait = []\n\nprint()\n\nfor p in running_queue:\n tt = p[\"end\"] - p[\"arrival\"]\n turnaround.append(tt)\n wt = tt - p[\"burst\"]\n wait.append(wt)\n rt = p[\"start\"] - p[\"arrival\"]\n response.append(rt)\n print(p[\"name\"], \"tt:\", tt, \"wt:\", wt, \"rt:\", rt)\n\nprint()\n\nif turnaround:\n print(\"Average turnaround time:\", sum(turnaround) / len(turnaround))\nif wait:\n print(\"Average waiting time:\", sum(wait) / len(wait))\nif response:\n print(\"Average response time:\", sum(response) / len(response))\n\nprint(\"\\nControl:\\t\", control)\nif final_queue == control:\n print(\"Success:\\t\", final_queue)\nelse:\n print(\"Failure:\\t\", final_queue)\n","repo_name":"smekras/scheduler-calculator","sub_path":"cpu/robin.py","file_name":"robin.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40544960565","text":"import random\r\nscore_list = []\r\n\r\ndef start_game():\r\n random_number = random.randrange(1,10)\r\n name = input(\"<==== Hello there !! ====>\\nWelcome to the number guessing game! \\nWhat is your name? \")\r\n try :\r\n score_list.sort()\r\n print(f'The current high score is {score_list[0]} attempt(s)..')\r\n except :\r\n print(f'Good luck, {name}!')\r\n attempt_count = 1\r\n guessed_number = input('Guess a number between 1 and 10: ')\r\n\r\n try :\r\n guessed_number = int(guessed_number)\r\n except ValueError as err :\r\n print(f'Incorrect input. \\n{err}')\r\n guessed_number = input('Try again.. Guess an integer between 1 and 10: ')\r\n attempt_count += 1\r\n guessed_number = int(guessed_number)\r\n\r\n if guessed_number > 10 and guessed_number != random_number :\r\n print('Number out of range, please try again')\r\n elif guessed_number > random_number and guessed_number < 11 and guessed_number != random_number :\r\n print('Too high!')\r\n elif guessed_number < random_number and guessed_number != random_number :\r\n print('Too low!')\r\n\r\n while guessed_number != random_number :\r\n try :\r\n second_guess = int(input(f'Not quite, {name}.. Try again :'))\r\n if second_guess > 10 :\r\n print('Number out of range')\r\n attempt_count += 1\r\n continue\r\n elif second_guess > random_number and second_guess < 11 :\r\n print('Too high!')\r\n attempt_count += 1\r\n continue\r\n elif second_guess < random_number :\r\n print('Too low!')\r\n attempt_count += 1\r\n continue\r\n elif second_guess > 10 :\r\n print('Number out of range..')\r\n elif second_guess == random_number :\r\n attempt_count += 1\r\n print(f'Great job! It took you {attempt_count} tries to get it right!')\r\n score_list.append(attempt_count)\r\n break\r\n\r\n except ValueError as err :\r\n print(f'That is not a correct input.. We are looking for an integer. \\n{err}')\r\n attempt_count += 1\r\n\r\n if guessed_number == random_number :\r\n print(f'Wow, {name}, you did it first try! Congratulations!')\r\n score_list.append(attempt_count)\r\n\r\n play_again = input('Would you like to play again? (y/n) ')\r\n if play_again == 'Y'.lower() :\r\n start_game()\r\n\r\nstart_game()\r\n\r\nprint('<=== Thanks for playing! ===>')\r\n","repo_name":"tmilio/Techdegree-Project","sub_path":"Random_Number_Game.py","file_name":"Random_Number_Game.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12236194035","text":"# Hopfield classification\r\nimport math\r\nimport numpy as np\r\n\r\nclass Neuron:\r\n def __init__(self, weights, bias, tfuncction):\r\n self.w = weights\r\n self.b = bias\r\n self.tf = tfuncction\r\n \r\n def purelin(self, x):\r\n return x\r\n\r\n def satlins(self, x):\r\n x = np.where(x<-1, -1, x)\r\n return np.where(x>1, 1, x)\r\n\r\n def calculate(self, inputs):\r\n res = np.dot(self.w, inputs) + self.b\r\n\r\n if (self.tf == \"SATLINS\"):\r\n res = self.satlins(res)\r\n else:\r\n raise Exception('TF not set.')\r\n\r\n return res\r\n\r\nw = np.matrix([[0.2, 0, 0], [0, 1.2, 0], [0, 0, 0.2]])\r\nb = np.matrix([0.9, 0, -0.9]).T\r\nhn = Neuron(w, b, \"SATLINS\")\r\n\r\n# test\r\ninput_vector = np.matrix(([-1, -1, -1])).T\r\n\r\na_old = hn.calculate(input_vector)\r\na_new = hn.calculate(a_old)\r\nc = 2\r\nmax_loops = 1000\r\n\r\nwhile ((not (a_old == a_new).all) and (c < max_loops)):\r\n a_old = a_new\r\n a_new = hn.calculate(a_old)\r\n c += 1\r\n\r\nprint(a_new)\r\nprint(c)\r\n\r\n","repo_name":"PasiNN/sketch","sub_path":"nnd3hopc.py","file_name":"nnd3hopc.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25759534276","text":"#oef13\n\ndef check_email_format(email):\n # 1 staat er een @ in?\n\n position_at = email.find(\"@\")\n if(position_at < 3):\n return False\n\n #2 eindigt op \"stundent.howest.be\n if( email[position_at +1 :] != \"student.howest.be\"):\n return False\n\n #3 een punt met naam.voornaam\n position_punt = email[0:position_at].find(\".\")\n if(position_punt == -1): return False\n voornaam= email[0:position_punt]\n naam = email[position_punt: position_at]\n if(voornaam == \"\"): return False\n if(naam == \"\"): return False\n\n #4\n return True\n\nif check_email_format(\"lotte.rombaut@student.howest.be\"):\n print(\"dit is een geldig emailadres\")\nelse:\n print(\"ongeldig\")","repo_name":"Howest-1NMCT1/Howest-1NMCT1-1NMCT1-LaboBasicProgramming-lotterombaut","sub_path":"W03_Lussen/Oef15.py","file_name":"Oef15.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42027400737","text":"from Tkinter import *;\nfrom dataobject import *;\nfrom dataoperationframe import *;\nfrom xpyfigure import *;\nfrom xpyfun import *;\nfrom tmp_pickrange import *;\n\nimport easygui;\nimport pylab;\nimport numpy;\n\nclass Fr_SpectCalSpectAggregation(DataOperationFrame,Tmp_PickRange):\n\tdef __init__(self,master=None,framename=None):\n\t\t#print 'type of arg datawindow', type(datawindow)\n\t\tDataOperationFrame.__dict__['__init__'](self,master,framename=framename);\n\t\tself.__ginit();\n\t\tTmp_PickRange.ginit(self);\n\t\t\n\tdef __ginit(self):\n\t\tself.gettmpdata('savename').set('aggspect');\n\t\t#for i in range(self['numofsourcedatabase']):\n\t\tself.ginitsourcedataentry('spectra');\n\t\tself.gettmpdata(\"groupstr\").set(\"'min(x)','max(x)'\");\n\t\t\n\t\tself['operatorstr']=StringVar();\n\t\tself['operatorstr'].set(\"spect['y'].mean()\");\n\t\t\n\t\tparameterframe=self['parameterframe'];\n\t\tl=Label(parameterframe, text=\"Operation (e.g. spect['y'].mean())\");\n\t\tl.pack(side=TOP);\n\t\te=Entry(parameterframe, textvariable=self['operatorstr']);\n\t\te.pack();\n\t\t\n\tdef analyze(self,igroup):\n\t\tdatabase=self.gettmpdata('database');\n\t\tspectra=database[0]['resultdatatablegroups'][igroup];\n\t\t\n\t\tspectranew=spectra.getemptyinstance();\n\t\t\n\t\t#spectra.plot();\n\t\t#print \"spectranew type:\",type(spectranew);\n\t\toperatorstr=self['operatorstr'].get();\n\t\txmin=float(self['xminstr'].get());\n\t\txmax=float(self['xmaxstr'].get());\n\n\t\tfor k in spectra.keys():\n\t\t\tspect0=spectra[k];\n\t\t\tspect=spect0.copyxy();\n\t\t\tspect.pick(xmin,xmax);\n\t\t\tcmd=operatorstr;\n\t\t\tys=eval(cmd);\n\t\t\tspect['agg']=ys;\n\t\t\tspectranew.insert(spect,k);\n\t\t\t\n\t\tspectagg=spectranew.uicolumn2xy(ycolumn='agg');\n\t\tspectagg.log({\"operation\":\"spectaggregation\",\"operator\":operatorstr});\n\t\tspectranew1=spectranew.getemptyinstance();\n\t\tspectranew1.insert(spectagg,'agg');\n\t\t\n\t\t\t\t\n\t\tdatabase[0]['resultdatatablegroups'][igroup]=spectranew1;\n\t\t\t\n\t\tXpyFigure();\n\t\tspectranew1.plot('o');\n\t\t#pass;\n\t\t\n\n","repo_name":"charleseagle/Data-Analysis-Software-Python","sub_path":"lib/emwspectexp/gui/analysisframe/calculation/fr_spectcalspectaggregation.py","file_name":"fr_spectcalspectaggregation.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"75146168339","text":"from .common import add_ebus_args\nfrom .common import add_msgdef_args\nfrom .common import create_ebus\nfrom .common import disable_stdout_buffering\nfrom .common import load_msgdefs\n\n\ndef parse_args(subparsers):\n \"\"\"Parse Arguments.\"\"\"\n parser = subparsers.add_parser(\"write\", help=\"Write value to the bus\")\n add_ebus_args(parser)\n add_msgdef_args(parser)\n parser.add_argument(\"field\", help=\"Field (i.e. 'ui/OutsideTemp/temp')\")\n parser.add_argument(\"value\", help=\"Value to apply (i.e. '5'). 'NONE' is reserved for no value.\")\n parser.set_defaults(main=_main)\n\n\nasync def _main(args):\n disable_stdout_buffering()\n e = create_ebus(args)\n await load_msgdefs(e, args)\n for msgdef in e.msgdefs.resolve([args.field]):\n value = args.value if args.value != \"NONE\" else None\n await e.write(msgdef, value)\n","repo_name":"andr2000/ebus","sub_path":"ebus/cli/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35909544410","text":"import re\nimport cv2\nimport numpy as np\nimport pytesseract\nfrom pytesseract import Output\n\nIMG_DIR = '../images/'\n\n# Grayscale\ndef get_grayscale(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Noise Removal\ndef remove_noise(image):\n return cv2.medianBlur(image,5)\n\n# Thresholding\ndef thresholding(image):\n return cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n\n# Opening - Erosion + Dilation\ndef opening(image):\n kernel = np.ones((5,5),np.uint8)\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n# Deskew\ndef deskew(image):\n coords = np.column_stack(np.where(image > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n return rotated\n\n# Example Image\nimage = cv2.imread(IMG_DIR + 'example.jpg')\nb,g,r = cv2.split(image)\nrgb_img = cv2.merge([r,g,b])\n\n# Fix Rotation\n#osd = pytesseract.image_to_osd(image)\n#angle = re.search('(?<=Rotate: )\\d+', osd).group(0)\n\n#if angle == \"90\":\n #image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)\n#if angle == \"180\":\n #image = cv2.rotate(image, cv2.ROTATE_180)\n#if angle == \"270\":\n #image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)\n\n# Preprocess image\ngray = get_grayscale(image)\nthresh = thresholding(gray)\nopening = opening(gray)\n\nimages = {'gray': gray,\n 'thresh': thresh,\n 'opening': opening\n }\n\n#Output using Pytesseract\ncustom_config = r'-l hin'\n# custom_config = r'-l hin -c tessedit_char_whitelist=0123456789 --psm 6'\nprint('Original Image')\nprint('-----------------------------------------')\nprint(pytesseract.image_to_string(image, config=custom_config))\nprint('\\n-----------------------------------------')\nprint('Threshholded Image')\nprint('-----------------------------------------')\nprint(pytesseract.image_to_string(thresh, config=custom_config))\nprint('\\n-----------------------------------------')\nprint('Opened Image')\nprint('-----------------------------------------')\nprint(pytesseract.image_to_string(opening, config=custom_config))\n","repo_name":"2KAbhishek/Manyata","sub_path":"cli/manyata.py","file_name":"manyata.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"75181592661","text":"from django.utils.encoding import force_unicode\nfrom django.conf import settings\n\nPRIORITY_MAPPING = {\n \"high\": \"1\",\n \"medium\": \"2\",\n \"low\": \"3\",\n \"deferred\": \"4\",\n}\n\n# replacement for django.core.mail.send_mail\n\ndef send_mail(subject, message, from_address, to_addresses, priority=\"medium\"):\n from mailer.models import Message\n # need to do this in case subject used lazy version of ugettext\n subject = force_unicode(subject)\n priority = PRIORITY_MAPPING[priority]\n for to_address in to_addresses:\n Message(to_address=to_address,\n from_address=from_address,\n subject=subject,\n message_body=message,\n priority=priority).save()\n\ndef mail_admins(subject, message, fail_silently=False, priority=\"medium\"):\n from mailer.models import Message\n priority = PRIORITY_MAPPING[priority]\n for name, to_address in settings.ADMINS:\n Message(to_address=to_address,\n from_address=settings.SERVER_EMAIL,\n subject=settings.EMAIL_SUBJECT_PREFIX + force_unicode(subject),\n message_body=message,\n priority=priority).save()\n\nif getattr(settings, 'MAILER_FOR_CRASH_EMAILS', False):\n from django.core.handlers import base\n base.mail_admins = mail_admins\n","repo_name":"ericholscher/allfeeds","sub_path":"pinax/apps/external_apps/mailer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"11013820371","text":"class ObjetoVacio:\n pass\n\nnada = ObjetoVacio()\nprint(type (nada))\n\nclass Llanta:\n cuenta = 0\n def __init__(mi, radio=50, ancho=30, presión=1.5):\n Llanta.cuenta += 1\n mi.radio = radio\n mi.ancho = ancho\n mi.presión =presión\n\nllanta1 = Llanta(50, 30, 1.5)\nllanta2 = Llanta(presión = 1.2)\nllanta3 = Llanta()\nllanta4 = Llanta(40, 30, 1.6)\n\nclass Coche:\n def __init__(mi, ll1, ll2, ll3, ll4):\n mi.llanta1 = ll1\n mi.llanta2 = ll2\n mi.llanta3 = ll3\n mi.llanta4 = ll4\n\n\nmicoche = Coche(llanta1, llanta2, llanta3, llanta4)\n\nprint(\"Total de llantas: \", Llanta.cuenta)\nprint(\"Presión de la llanta 4 = \", llanta4.presión)\nprint(\"Radio de la llanta 4 = \", llanta4.radio)\nprint(\"Radio de la llanta 3 = \", llanta3.radio)\nprint(\"Presión de la llanta 1 de mi coche = \", micoche.llanta1.presión)\n\nclass Estudiante:\n def __init__(mi):\n mi.__nombre = ''\n def ponerme_nombre(mi, nombre):\n print('se llamo a ponerme nombre')\n mi.__nombre = nombre\n def obtener_nombre(mi):\n print('se llamó a obener_nombre')\n return mi.__nombre\n nombre=property(obtener_nombre, ponerme_nombre)\n\nestudiante = Estudiante()\n\nestudiante.nombre = \"Ivan\"\n\nprint(estudiante.nombre)\n\nclass Cuadrilatero:\n def __init__(mi, a, b, c, d):\n mi.lado1 = a\n mi.lado2 = b\n mi.lado3 = c\n mi.lado4 = d\n\n def perimetro(mi):\n p = mi.lado1 + mi.lado2 +mi.lado3 + mi.lado4\n print(\"perimetro = \", p)\n return p\n\nclass Rectangulo(Cuadrilatero):\n def __init__(self, a, b):\n super().__init__(a, b, a, b)\n\nclass Cuadrado(Rectangulo):\n def __init__(self, a):\n super().__init__(a,a)\n\n def area(self):\n area = self.lado1**2\n return area\n\ncuadrado1 = Cuadrado(5)\n\nperimetro1 = cuadrado1.perimetro()\n\narea1 = cuadrado1.area()\n\nprint(\"Perimetro = \", perimetro1)\nprint(\"Área = \", area1)\n\nclass A:\n __a: float = 0.0\n __b: float = 0.0\n __c: float = 0.0\n\n def __init__(self, a: float, b: float, c: float):\n self.a = a\n self.b = b\n self.c = c\n\nclass B:\n __d: float = 0.0\n __e: float = 0.0\n\n def __init__(self, d: float, e: float):\n self.d = d\n self.e = e\n\n def sumar_todo(self, aa: float, bb: float):\n x: float = self.d + self. e + aa + bb\n return x\n\nobjetoA = A(1.0, 2.0, 3.0)\nobjetoB = B(4.0, 5.0)\nprint(objetoB.sumar_todo(objetoA.a, objetoA.b))\n\nclass C:\n __d: float = 0.0\n __e: float = 0.0\n __Aa: A = None\n\n def __init__(self, d: float, e: float):\n self.d = d\n self.e = e\n self.Aa = A(1.0, 2.0, 3.0)\n\n def sumar_todo(self):\n x: float = self.d + self.e + self.Aa.a + self.Aa.b\n return x\n\nobjetoC = C(4.0, 5.0)\nprint(objetoC.sumar_todo())\n\nclass D:\n __d: float = 0.0\n __e: float = 0.0\n __Aa: A = None\n\n def __init__(self,d: float, e: float, Aa:A):\n self.d = d\n self.e = e\n self.Aa = Aa\n\n def sumar_todo(self):\n x: float = self.d +self.e + self.Aa.a +self.Aa.b\n return x\n\nobjetoD = D(4.0, 5.0, objetoA)\nprint(objetoD.sumar_todo())\n","repo_name":"GustavoZaraT/ParadigmsDeProgramacion","sub_path":"Objetos.py","file_name":"Objetos.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70631928341","text":"import sys\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageTk\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtTest import *\nfrom PyQt5.QtWidgets import *\n\n\"\"\"\n 사용자 모듈\n\"\"\"\nfrom camera import *\nfrom common import *\nfrom modelEL import *\nfrom ui_autoel import *\nfrom ui_config import *\n\nfrom e1214_modbus import *\n\n\n\n\nclass Main_win(QMainWindow):\n \"\"\"\n cont 를 콘트롤 하기 위한 시그널 정의\n \"\"\"\n # detect_wheelchair_signal = pyqtSignal()\n # detect_stroller_signal = pyqtSignal()\n # detect_silvercar_signal = pyqtSignal()\n # detect_scuter_signal = pyqtSignal()\n # detect_open_signal = pyqtSignal()\n # detect_closer_signal = pyqtSignal()\n # detect_call_signal = pyqtSignal()\n\n\n def __init__(self):\n super().__init__()\n\n self.model = None\n\n self.img_size_w = 400\n self.img_size_h = 300\n\n self.init_ui()\n \n # self.init_timer()\n # self.init_model()\n\n self.data = read_config(path_config)\n self.init_cam()\n self.init_signal()\n\n\n self.init_menu()\n # self.init_img_contextmenu()\n\n self.init_timer(int(self.data['comm']['read_cam_time']))\n\n self.show_autoel()\n self.auto_exe()\n \n ### 프로그램 시작 시 자동실행\n def auto_exe(self):\n #초기값을 불러온다\n\n \n\n # print(self.data)\n\n print(f\"auto_run : {self.data['comm']['auto_start']}\")\n if bool(self.data['comm']['auto_start']):\n self.show_autoel()\n # self.init_timer(int(self.data['comm']['read_cam_time']))\n \n self.timer_infer.start()\n\n\n ###check\n \n \n\n ### 메뉴\n def init_menu(self):\n menubar = self.menuBar()\n menubar.setNativeMenuBar(False)\n \n exitAction = QAction(QIcon('exit.png'), 'Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n # exitAction.triggered.connect(qApp.quit)\n exitAction.triggered.connect(self.closeEvent)\n\n \n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(exitAction)\n\n ### 카메라 메뉴\n camMenu = menubar.addMenu(\"카메라\")\n\n camstartAction = QAction('영상 재시작', self)\n camstartAction.triggered.connect(self.restart_cam)\n\n camstopAction = QAction('영상 중지', self)\n camstopAction.triggered.connect(self.stop_cam)\n\n showrectAction = QAction('영역 설정', self)\n showrectAction.triggered.connect(self.show_rectPanel)\n\n saverectAction = QAction('영역 저장', self)\n saverectAction.triggered.connect(self.save_rect)\n\n camMenu.addAction(camstartAction)\n camMenu.addAction(camstopAction)\n camMenu.addAction(showrectAction)\n camMenu.addAction(saverectAction)\n\n\n ### UP CAM1 메뉴\n up_cam1Menu = menubar.addMenu(\"UP CAM1\")\n\n up_cam1_startAction = QAction('시작', self)\n up_cam1_startAction.triggered.connect(self.restart_cam)\n\n up_cam1_stopAction = QAction('중지', self)\n up_cam1_stopAction.triggered.connect(self.stop_cam)\n\n up_cam1_poiAction = QAction('영역 설정', self)\n up_cam1_poiAction.triggered.connect(self.show_rectPanel)\n\n \n up_cam1Menu.addAction(up_cam1_startAction)\n up_cam1Menu.addAction(up_cam1_stopAction)\n up_cam1Menu.addAction(up_cam1_poiAction)\n # up_cam1Menu.addAction(saverectAction)\n\n\n\n ### I/O 제어기 메뉴\n configMenu = menubar.addMenu(\"설정\")\n\n showconfigAction = QAction('설정', self)\n # exitAction.setShortcut('Ctrl+Q')\n # showconfigAction.setStatusTip('설정')\n showconfigAction.triggered.connect(self.show_configPanel)\n\n configMenu.addAction(showconfigAction)\n\n fileDialogAction = QAction('불러오기', self)\n fileDialogAction.triggered.connect(self.file_open_dialog)\n configMenu.addAction(fileDialogAction)\n\n ### I/O 제어기\n ioMenu = menubar.addMenu(\"I/O 제어기\")\n\n showioAction = QAction('상태보기', self)\n showioAction.triggered.connect(self.show_io)\n ioMenu.addAction(showioAction)\n\n \n\n ## 카메라 플레이하기 위한 준비\n def init_cam(self):\n \n\n if self.model == None:\n self.model = Model()\n\n self.up_cam1 = None\n self.up_cam2 = None\n self.dn_cam1 = None\n self.dn_cam2 = None\n\n self.list_cam = [self.up_cam1, self.up_cam2, self.dn_cam1, self.dn_cam2]\n self.list_cam_stat = [0,0,0,0]\n self.list_url = [self.data['up']['cam1']['cam']['url'], self.data['up']['cam2']['cam']['url'], self.data['dn']['cam1']['cam']['url'], self.data['dn']['cam2']['cam']['url']]\n self.list_img = [self.ui_autoel.up_floor.img_cam1, self.ui_autoel.up_floor.img_cam2, self.ui_autoel.dn_floor.img_cam1, self.ui_autoel.dn_floor.img_cam2]\n self.list_name = [\"up cam1\", \"up cam2\", \"down cam1\", \"down cam2\"]\n self.list_detect = [self.data['up']['cam1'].get('detect'), self.data['up']['cam2'].get('detect'), self.data['dn']['cam1'].get('detect'), self.data['dn']['cam2'].get('detect')]\n self.list_cont = [self.ui_autoel.up_floor.edit_cont, self.ui_autoel.up_floor.edit_cont, self.ui_autoel.dn_floor.edit_cont, self.ui_autoel.dn_floor.edit_cont]\n self.list_poi = [self.data['up']['cam1'].get('poi'), self.data['up']['cam2'].get('poi'), self.data['dn']['cam1'].get('poi'), self.data['dn']['cam2'].get('poi')]\n self.list_io = [self.data['up'].get('io'), self.data['up'].get('io'), self.data['dn'].get('io'), self.data['dn'].get('io')]\n\n\n for i in range(4):\n self.list_img[i]._flag_show_text = True\n self.list_img[i].text_str = \"연결 안됨\"\n self.list_img[i].tag = i\n self.list_cam[i] = VideoThread()\n self.list_cam[i].set_vi(self.list_url[i], self.list_name[i])\n self.list_cam[i].tag = i\n self.list_cam[i].set_img_size(self.img_size_w, self.img_size_h)\n self.list_cam[i]._run_flag = True\n\n self.list_cont[i].init_io(self.list_io[i])\n # if bool(self.list_poi[i]['use']):\n # print(f\"{self.list_poi[i]['use']}\")\n # rect = QRect(int(self.list_poi[i]['x']), int(self.list_poi[i]['y']), int(self.list_poi[i]['e_x']), int(self.list_poi[i]['e_y']))\n # self.list_img[i].receive_rect(rect)\n # self.list_img[i].show_ract(True)\n # self.list_img[i].draw_ract(False)\n # self.list_cam[i].start()\n\n\n ## 화면 디자인, 여러화면(QStackedWidget) 생성\n def init_ui(self):\n self.setWindowTitle(\"자동호출 시스템 Ver 3.1\")\n\n self.statusBar = self.statusBar()\n self.statusBar.showMessage(\"ready\")\n\n # self.setWindowIcon(QIcon('./assets/editor.png'))\n self.setGeometry(0, 0, 1300, 600)\n\n #UI 생성\n self.ui_config = UI_config()\n \n self.ui_autoel = UI_autoel()\n\n self.stacked_widget = QStackedWidget()\n self.stacked_widget.addWidget(self.ui_config)\n self.stacked_widget.addWidget(self.ui_autoel)\n self.setCentralWidget(self.stacked_widget)\n\n def init_signal(self):\n for i in range(4):\n self.list_img[i].signal_rect.connect(self.receive_rect)\n self.list_cam[i].change_state_signal.connect(self.receive_state)\n\n @pyqtSlot(int, int)\n def receive_state(self, tag, stat):\n # print(f'receive state : {tag}, {stat} ')\n self.list_cam_stat[tag] = stat\n print(f'self.list_cam_stat : {self.list_cam_stat}')\n\n @pyqtSlot(int, QRect)\n def receive_rect(self, tag, rect):\n print(f'rect receive : {tag} {rect} {rect.left()} {rect.top()} {rect.width()} {rect.height()}')\n # print(f'{self.list_poi[tag]}')\n\n self.list_poi[tag]['x'] = str(rect.left())\n self.list_poi[tag]['y'] = str(rect.top())\n self.list_poi[tag]['e_x'] = str(rect.width())\n self.list_poi[tag]['e_y'] = str(rect.height())\n\n print(f'{self.list_poi[tag]}')\n print(f'{self.data}')\n\n write_config(path_config, self.data)\n \n\n\n ## 반복수행 타이머\n def init_timer(self, repeat_time):\n self.timer_infer = QTimer()\n self.timer_infer.setInterval(repeat_time)\n # self.tm.timeout.connect(self.time_process)\n self.timer_infer.timeout.connect(self.infer_process)\n\n\n ## 종료시 정말종료할까요 물어보기\n def closeEvent(self, event):\n quit_msg = \"Want to exit?\"\n replay = QMessageBox.question(self, \"Message\", quit_msg, QMessageBox.Yes, QMessageBox.No)\n if replay == QMessageBox.Yes:\n # event.accept()\n qApp.quit()\n else:\n event.ignore()\n\n ## file open dialog 생성\n def file_open_dialog(self):\n fname = QFileDialog.getOpenFileName(self, 'Open file', './')\n print(type(fname), fname)\n\n\n ## config 파일을 읽고, 데이터 초기화\n def read_config(self):\n self.ui_autoel.up_floor.set_config(self.data['up'], \"상부\")\n self.ui_autoel.dn_floor.set_config(self.data['dn'], \"하부\")\n\n self.ui_autoel.up_floor.edit_cont.init_io(self.data['up']['io'])\n self.ui_autoel.dn_floor.edit_cont.init_io(self.data['dn']['io'])\n \n\n\n ## 모든 카메라 재시작\n def restart_cam(self):\n self.show_autoel()\n self.stop_cam()\n QTest.qWait(100)\n\n self.auto_exe()\n # self.init_cam()\n # self.timer_infer.start()\n \n ## 모든 카메라 중지\n def stop_cam(self):\n if self.timer_infer.isActive():\n self.timer_infer.stop()\n\n for i in range(4):\n self.list_cam[i]._run_flag = False\n self.list_img[i].load_default_img()\n self.list_img[i].show_ract(False)\n self.list_img[i].draw_ract(False)\n # self.list_cam[i].start()\n\n \n ## 영상분석 프로세스\n def infer_process(self):\n # print(f'timer process : {now_time_str()}')\n # obj_cam, obj_img_label, obj_cont, obj_detect_list, name\n time_str = now_time_str()\n\n # list_cam_use [self.]\n \n\n for ii in range(4):\n if self.list_cam_stat[ii] == RUN_OK:\n img = self.list_cam[ii].get_img() ##cv_img 를 가져온다\n\n\n if self.list_poi[ii]['use']:\n ## 영역만 detect\n ## poi 영역 계산\n x= int(self.list_poi[ii]['x'])\n y= int(self.list_poi[ii]['y'])\n end_x= int(self.list_poi[ii]['e_x']) + x\n end_y= int(self.list_poi[ii]['e_y']) + y\n \n ## 관심영역 테두리 표시\n cv2.rectangle(img, (x, y), (end_x, end_y), (0,0,255), 2)\n # print(f'ROI AREA : {x} {y} {end_x} {end_y}')\n \n ## 관심영역 카피\n roi_img = img[y:end_y, x:end_x]\n # print(f'img : {type(img)} {img.shape}, roi : {type(roi_img)} {roi_img.shape}')\n\n roi_img2, label_list = self.infer(roi_img, self.list_detect[ii])\n img[y:end_y, x:end_x] = roi_img2\n\n \n else: ## 전체 detect\n img, label_list = self.infer(img, self.list_detect[ii])\n \n\n self.list_cont[ii].receive_data( self.list_name[ii], label_list)\n # self.list_cont[ii].append(f'{self.list_name[ii]}, {label_list}')\n \n self.list_img[ii].text_str = f\"수신 시각 : {time_str}\"\n self.list_img[ii].changePixmap(img)\n \n ## 필요한 처리 \n ## put_text(self, frame, text, w, h, color):\n # if img is None:\n # print(f\"get_img : image is None\")\n # else:\n # # img = self.put_text(img, time_str, 10, 10, (255, 255, 0))\n # self.list_img[ii].changePixmap(img)\n # elif self.list_cam_stat[ii] == RUN_FAIL: ## VT 멈추면 재시작\n # print(f\"VT restart\")\n # self.list_cam[ii]._flag_show_text = True\n # self.list_cam[ii]._run_flag = True\n # self.list_cam[ii].start()\n\n\n ### 영상분석\n ### input : cv_img, detect_list\n ### return : img, label_list을 \n def infer(self, cv_img, detect_list):\n\n # print(\"infer ...\")\n labels, cord = self.model.score_frame(cv_img)\n # print(f'score_frame => {labels} : {type(labels)}, {cord} : {type(cord)}')\n label_list = []\n label_dict = {}\n n = len(labels)\n frame = cv_img\n x_shape, y_shape = frame.shape[1], frame.shape[0]\n for i in range(n):\n row = cord[i]\n name = self.model.class_to_label(labels[i])\n # print(name)\n if name in detect_list:\n if row[4] > float(detect_list[name]):\n # print(f'{name} {row[4]} {type(row[4])} {detect_list[name]}, {type(float(detect_list[name]))}, {float(detect_list[name]) - row[4] }')\n\n # print(f'row[4] > float(detect_list[name]) {row[4]} {float(detect_list[name])}')\n x1, y1, x2, y2 = int(row[0] * x_shape), int(row[1] * y_shape), int(row[2] * x_shape), int(row[3] * y_shape)\n str = name + \": %0.2f\" % row[4]\n # print(f'plot_boxes str : {str}')\n label_dict[name] = \"%0.2f\" % row[4]\n cv2.rectangle(frame, (x1, y1), (x2, y2), self.model.colors(int(labels[i])), 2)\n \n cv2.putText(frame, str, (x1+5, y1+15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, self.model.colors(int(labels[i])), 1)\n \n # label_list.append(str)\n # time_str = now_time_str()\n # cv2.putText(frame, time_str, (self.img_size_w - 100, self.img_size_h - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255,255,0), 1)\n return frame, label_dict\n\n ### 설정 화면\n def show_configPanel(self):\n self.stop_cam()\n self.ui_config.clicked_read()\n # self.setCentralWidget(self.ui_config)\n self.stacked_widget.setCurrentWidget(self.ui_config) \n\n def show_rectPanel(self):\n # self.stop_cam()\n for i in range(4):\n # if bool(self.list_poi[i]['use']):\n # rect = QRect(int(self.list_poi[i]['x']), int(self.list_poi[i]['y']), int(self.list_poi[i]['w']), int(self.list_poi[i]['h']))\n # self.list_img[i].receive_rect(rect)\n self.list_img[i].show_ract(True)\n self.list_img[i].draw_ract(True)\n\n def show_autoel(self):\n # self.ui_autoel = UI_autoel()\n # self.setCentralWidget(self.ui_autoel)\n self.stacked_widget.setCurrentWidget(self.ui_autoel) \n\n def save_rect(self):\n ## 화면크기를 벋어나지 않도록 조정\n # self.img_size_w = 400, self.img_size_h = 300\n for i in range(4):\n print(f'rect begin: {self.list_img[i].begin.x()}, {self.list_img[i].begin.y()} end: {self.list_img[i].end.x()}, {self.list_img[i].end.y()}')\n print(f'type: {type(self.list_img[i].begin.x())}')\n \n # if int(self.list_img[i].begin.x()) < 0:\n # self.list_poi[i]['x'] = \"0\"\n # elif int(self.list_img[i].begin.x()) > self.img_size_w:\n # self.list_poi[i]['x'] = str(self.img_size_w)\n # else:\n # self.list_poi[i]['x'] = str(self.list_img[i].begin.x())\n\n # if int(self.list_img[i].begin.y()) < 0:\n # self.list_poi[i]['y'] = \"0\"\n # elif int(self.list_img[i].begin.y()) > self.img_size_h:\n # self.list_poi[i]['y'] = str(self.img_size_h)\n # else:\n # self.list_poi[i]['y'] = str(self.list_img[i].begin.y())\n\n # if int(self.list_img[i].end.x()) < 0:\n # self.list_poi[i]['e_x'] = \"0\"\n # elif int(self.list_img[i].end.x()) > self.img_size_w:\n # self.list_poi[i]['e_x'] = str(self.img_size_w)\n # else:\n # self.list_poi[i]['e_x'] = str(self.list_img[i].end.x() - self.list_img[i].begin.x())\n\n # if int(self.list_img[i].end.y()) < 0:\n # self.list_poi[i]['e_y'] = \"0\"\n # elif int(self.list_img[i].end.y()) > self.img_size_h:\n # self.list_poi[i]['e_y'] = str(self.img_size_h)\n # else:\n # self.list_poi[i]['e_y'] = str(self.list_img[i].end.y() - self.list_img[i].begin.y())\n\n # # self.list_poi[i]['x'] = str(self.list_img[i].begin.x())\n # # self.list_poi[i]['y'] = str(self.list_img[i].begin.y())\n # # self.list_poi[i]['e_x'] = str(self.list_img[i].end.x() - self.list_img[i].begin.x())\n # # self.list_poi[i]['e_y'] = str(self.list_img[i].end.y() - self.list_img[i].begin.y())\n\n # # print(f'{self.list_poi[i]}')\n\n # data = self.read_ui_make_data()\n # print(data)\n # write_config(path_config, self.data)\n\n def show_io(self):\n self.io_panel = None\n ip = self.data['up']['io']['value_io_ip']\n port = 502\n self.io_panel = Win_io(ip, port)\n self.io_panel.setGeometry(100,800, 300, 100)\n self.io_panel.show()\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ex = Main_win()\n ex.show()\n sys.exit(app.exec_())\n","repo_name":"San32/ael","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12289001508","text":"import pickle\nimport numpy as np\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.externals import joblib\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/result', methods=['POST'])\ndef result():\n \n #アプリ化したときにすぐに値段が出るように関数を定義\n def diamond_pred(param_ct,param_color, param_clarity, param_cut):\n #param_color = ''\n if param_color == 'D':\n param_color = 9\n elif param_color == 'E':\n param_color = 8\n elif param_color == 'F':\n param_color = 7\n elif param_color == 'G':\n param_color = 6\n elif param_color == 'H':\n param_color = 5\n elif param_color == 'I':\n param_color = 4\n elif param_color == 'J':\n param_color = 3\n elif param_color == 'K':\n param_color = 2\n elif param_color == 'L':\n param_color = 1\n \n #param_clarity = ''\n if param_clarity =='VVS1':\n param_clarity = 7\n elif param_clarity =='VVS2':\n param_clarity = 6\n elif param_clarity =='VS1':\n param_clarity = 5\n elif param_clarity =='VS2':\n param_clarity = 4\n elif param_clarity =='SI1':\n param_clarity = 3\n elif param_clarity =='SI2':\n param_clarity = 2\n elif param_clarity =='I1':\n param_clarity = 1\n \n #param_cut = ''\n if param_cut == 'EX':\n param_cut = 5\n elif param_cut == 'VG':\n param_cut = 4\n elif param_cut == 'G':\n param_cut = 3\n elif param_cut == 'FAIR':\n param_cut = 2\n elif param_cut == 'POOR':\n param_cut = 1\n \n pred = np.array([[param_ct,param_color, param_clarity, param_cut]])\n pred_poly = PolynomialFeatures(degree=3).fit(pred)\n pred_poly_2 = pred_poly.transform(pred)\n return pred_poly_2\n \n \n\n \n param_ct =request.form['ct']\n param_color =request.form['color']\n param_clarity =request.form['clarity']\n param_cut = request.form['cut']\n param =(param_ct, param_color, param_clarity, param_cut)\n\n model_Ridge = joblib.load(\"./data/model_Ridge.pkl\")\n DP = diamond_pred(param_ct,param_color, param_clarity, param_cut)\n pred_out = model_Ridge.predict(DP)\n result = '{0:.0f}'.format(pred_out[0]*7000)\n \n return render_template('result.html',result=result, param=param)\n\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"genpy/diamond_ai","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42949887820","text":"a = 40\r\nb = 60\r\nc = 20\r\n\r\nprint(f\"Состав коктеля Апероль шприц, мл: ликер\", a, \"шампанское\", b, \"содовая\", c)\r\n\r\nliquor = int(input(\"Введите объем ликера: \"))\r\nif liquor != a:\r\n print(\"Неверно\")\r\n\r\nif liquor == a:\r\n print(\"Верно\")\r\n\r\nchampagne = int(input(\"Введите объем шампанского: \"))\r\nif champagne != b:\r\n print(\"Неверно\")\r\n\r\nif champagne == b:\r\n print(\"Верно\")\r\n\r\nsoda = int(input(\"Введите объем содовой: \"))\r\nif soda != c:\r\n print(\"Неверно\")\r\n\r\nif soda == c:\r\n print(\"Верно\")\r\n\r\ncomponent_1 = input(\"Введите название первого компонента: \")\r\n\r\ncomponent_2 = input(\"Введите название второго компонента: \")\r\n\r\ncomponent_3 = input(\"Введите название третьего компонента: \")\r\n\r\n\r\nprint(f\"Состав: {component_1} {liquor} {component_2} {champagne} {component_3} {soda}\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Anynastya29/my_project","sub_path":"Task01.py","file_name":"Task01.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41740681794","text":"# Create the MT19937 stream cipher and break it\nfrom utilities import MT19937, fixed_xor\nfrom binascii import b2a_hex\nfrom os import urandom\nfrom random import randint\nfrom time import time\n\n\ndef constructPlaintext(known):\n\tunknown = urandom(randint(1,10))\n\treturn unknown + known\n\ndef trivial_stream_cipher(text,key,numbits):\n\tassert key < 2**numbits\n\tr = MT19937(key)\n\toutput = bytearray()\n\tnum = r.extract_number()\n\tfor char in text:\n\t\tif num == 0:\n\t\t\tnum = r.extract_number()\n\t\tkeystream_char = num & 0xff\n\t\tnum = num>>8\n\t\toutput.append(keystream_char^char)\n\treturn bytes(output)\n\nnumbits = 16\nseedkey = int(b2a_hex(urandom(int(numbits/8))),16)\nknownendtext = b'A'*14\nplaintext = constructPlaintext(knownendtext)\nciphertext = trivial_stream_cipher(plaintext,seedkey,numbits)\n\nassert plaintext == trivial_stream_cipher(ciphertext,seedkey,numbits)\n\ndef pw_reset_token():\n\t\tr = MT19937(time())\n\t\treturn r.extract_number()\n#Brute force\ndef brute_force_2byte_key(ciphertext,knownendtext,numbits):\n\tfor key in range(2**numbits):\n\t\tpt = trivial_stream_cipher(ciphertext,key,numbits)\n\t\tif pt[-len(knownendtext):] == knownendtext:\n\t\t\treturn key\n\tassert False, \"Did not find key\"\n\n\n\nprint(\"Orig key {} and brute force key {}\".format(seedkey,brute_force_2byte_key(ciphertext,knownendtext, numbits)))\n\n","repo_name":"alexm711/python-cryptopals","sub_path":"python3_sets/challenge24.py","file_name":"challenge24.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24927229447","text":"import os\nimport json\nimport boto3\nimport twitter\n\nsession = boto3.session.Session()\nsns = session.client('sns')\nsm = session.client('secretsmanager')\neb = session.client('events')\n\ntwitter_secret = json.loads(sm.get_secret_value(SecretId=os.environ['twitter_secret'])['SecretString'])\n\napi = twitter.Api(consumer_key = twitter_secret['consumer_key'],\n consumer_secret = twitter_secret['consumer_secret'],\n access_token_key = twitter_secret['access_token_key'],\n access_token_secret = twitter_secret['access_token_secret'])\n\nres = api.VerifyCredentials()\nuser_id = str(res.id)\n\ndef handler(event, context):\n print (json.dumps(event))\n result = {\n \"statusCode\": 200\n }\n \n try:\n # an exception will be thrown if the key doesn't exit. easier to ask for forgiveness than permission \n if event[\"requestContext\"]['httpMethod'] == \"POST\":\n \n # Queue up each tweet for processing\n body = json.loads(event[\"body\"])\n for_user_id = body['for_user_id']\n \n if for_user_id == user_id: # @reInventSChed\n for tweet in body['tweet_create_events']:\n \n # Ignore retweets \n if 'retweeted_status' in tweet:\n print(f\"Ignoring retweet {tweet['id']}\")\n continue\n \n # Don't respond to ourself\n if tweet['user']['id_str'] == user_id: # @reInventSched \n print(f\"Ignoring our own tweet {tweet['id']}\")\n continue\n \n response = eb.put_events(\n Entries=[{\n 'Source': 'reInventSched',\n 'DetailType': 'tweet',\n 'Detail': json.dumps(tweet)\n }]) \n print(response)\n\n # returns properly formatted json response\n result['statusCode'] : 200\n \n except KeyError:\n pass \n\n return result","repo_name":"AwsGeek/reinvent-sched","sub_path":"functions/twitter_webhook_func/lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"12"} +{"seq_id":"15456193969","text":"\"\"\"\nA 2D numpy array is used to represent an element (fire, water, cloud, vegetation).\nEach cell has a 'strength' value associated with it; min_z, max_z are used to normalize the strength values\nin order to compare against other neighborhoods.\n\"\"\"\n\nimport numpy as np\n\n\nclass Neighborhood:\n def __init__(self, min_z, max_z, size):\n self.relative_min, self.relative_max = min_z, max_z\n self.cells = np.random.uniform(low=min_z, high=max_z, size=size)\n self.cells_copy = self.cells.copy()\n self.shape = self.cells.shape\n","repo_name":"cagmz/3d-elements","sub_path":"neighborhood.py","file_name":"neighborhood.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25596864507","text":"from datetime import datetime\ndef async_time():\n c = str(datetime.now())\n print(c)\n b = c[0:10]\n c = c[11:-7]\n print(c)\n e = c.split(':')\n hours = int(input(\"Enter hours: \\n\"))\n if hours<10:\n hours = '0'+str(hours)\n minutes = int(input(\"Enter minutes: \\n\"))\n if minutes<10:\n minutes = '0'+str(minutes)\n seconds = int(input(\"Enter seconds: \\n\"))\n if seconds<10:\n seconds = '0'+str(seconds)\n e[0],e[1],e[2] = hours,minutes,seconds\n new_time = str(e[0])+':'+str(e[1])+':'+str(e[2])\n total = b+' '+new_time+'.'+'000000'\n while 1:\n x = str(datetime.now())\n print(f\"{x[11:-7]} - {total[11:-7]}\")\n if str(x)[11:-7]==str(total)[11:-7]:\n break\n else:\n continue\nasync_time()","repo_name":"mgkagithub/py","sub_path":"other/async_time.py","file_name":"async_time.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12643427926","text":"import os\nimport discord\nfrom discord.ext import commands\nfrom tinydb import TinyDB, Query\nimport requests\n\ntoken = os.environ['discord_key']\n\nintents = discord.Intents.default()\n\nclient = discord.Client(intents=intents)\nbot = commands.Bot(command_prefix='$', intents=intents)\n\ndb = TinyDB('./users.db')\ndbuser = Query()\n\n@client.event\nasync def on_ready():\n print(f'{client.user} has connected to discord')\n\n@bot.command()\nasync def setup(message, query: str):\n channel = message.channel\n userID = message.author.id\n\n req = f'https://nominatim.openstreetmap.org/search?q={query}&format=json'\n r = requests.get(req).json()\n\n print(r)\n # await channel.send(f\"You are {userID}, requesting for {query}\")\n await channel.send(f\"Location: {r[0]['display_name']}\")\n \n lat = r[0]['lat']\n lon = r[0]['lon']\n\n if db.search(dbuser.id == userID) == []:\n db.insert({\n 'id':userID,\n 'lat' : lat,\n 'lon' : lon,\n 'name' : r[0]['display_name']\n })\n else:\n db.update({'lat' :lat, 'lon' : lon, 'name' : r[0]['display_name']}, dbuser.id == userID)\n\n\ndef getUserLoc(userID):\n # userID = message.author.id\n userinfo = db.search(dbuser.id == int(userID))[0]\n name, lat, lon = userinfo['name'], userinfo['lat'], userinfo['lon']\n\n return name,lat,lon\n\n@bot.command()\nasync def aqi(message):\n name, lat, lon = getUserLoc(message.author.id)\n\n token = '7c3a4323f76a6485febfb00a0ec5f161984d9225'\n baseurl = 'https://api.waqi.info'\n # print(lat, lon)\n req = f'{baseurl}/feed/geo:{lat};{lon}/?token={token}'\n r = requests.get(req).json()\n # print(r)\n\n aqi = r['data']['aqi']\n o3 = r['data']['iaqi']['o3']['v']\n pm25 = r['data']['iaqi']['pm25']['v']\n\n await message.channel.send(f\"Information for {name}\")\n await message.channel.send(f\"The AQI is {aqi}\")\n await message.channel.send(f\"PM2.5: {pm25}\")\n await message.channel.send(f\"Ozone: {o3}\")\nbot.run(token) ","repo_name":"lsterzinger/aqibot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13215337973","text":"from pkg_resources import iter_entry_points\n\nfrom datetime import datetime\nimport pkg_resources\nimport time\n\nimport oaipmh\nimport oaipmh.metadata\nimport oaipmh.server\nimport oaipmh.error\n\ndef get_writer(prefix, config, db):\n for writer in iter_entry_points(group='moai.format', name=prefix):\n return writer.load()(prefix, config, db)\n else:\n raise ValueError('No such metadata format registered: %s' % prefix)\n\n\nclass OAIServer(object):\n \"\"\"An OAI-2.0 compliant oai server.\n \n Underlying code is based on pyoai's oaipmh.server'\n \"\"\"\n \n def __init__(self, db, config):\n self.db = db\n self.config = config\n\n def identify(self):\n result = oaipmh.common.Identify(\n repositoryName=self.config.name,\n baseURL=self.config.url,\n protocolVersion='2.0',\n adminEmails=self.config.admins,\n earliestDatestamp=self.db.oai_earliest_datestamp(),\n deletedRecord='transient',\n granularity='YYYY-MM-DDThh:mm:ssZ',\n compression=['identity'],\n toolkit_description=False)\n\n version = ''\n pyoai_egg = pkg_resources.working_set.find(\n pkg_resources.Requirement.parse('pyoai'))\n moai_egg = pkg_resources.working_set.find(\n pkg_resources.Requirement.parse('MOAI'))\n \n if moai_egg and pyoai_egg:\n version = '%s (using pyoai%s)' % (\n moai_egg.version,\n pyoai_egg.version)\n result.add_description(\n ''\n 'MOAI'\n '%s'\n 'http://moai.infrae.com'\n '' % version)\n \n return result\n\n def listMetadataFormats(self, identifier=None):\n result = []\n for prefix in self.config.metadata_prefixes:\n writer = get_writer(prefix, self.config, self.db)\n ns = writer.get_namespace()\n schema = writer.get_schema_location()\n result.append((prefix, schema, ns))\n return result\n \n def listSets(self, cursor=0, batch_size=20):\n for set in self.db.oai_sets(cursor, batch_size):\n yield [set['id'], set['name'], set['description']]\n\n def listRecords(self, metadataPrefix, set=None, from_=None, until=None,\n cursor=0, batch_size=10):\n \n self._checkMetadataPrefix(metadataPrefix)\n for record in self._listQuery(set, from_, until, cursor, batch_size):\n header, metadata = self._createHeaderAndMetadata(record)\n yield header, metadata, None\n\n def listIdentifiers(self, metadataPrefix, set=None, from_=None, until=None,\n cursor=0, batch_size=10):\n \n self._checkMetadataPrefix(metadataPrefix)\n for record in self._listQuery(set, from_, until, cursor, batch_size):\n yield self._createHeader(record)\n\n def getRecord(self, metadataPrefix, identifier):\n self._checkMetadataPrefix(metadataPrefix)\n header = None\n metadata = None\n for record in self._listQuery(identifier=identifier):\n header, metadata = self._createHeaderAndMetadata(record)\n if header is None:\n raise oaipmh.error.IdDoesNotExistError(identifier)\n return header, metadata, None\n \n def _checkMetadataPrefix(self, metadataPrefix):\n if metadataPrefix not in self.config.metadata_prefixes:\n raise oaipmh.error.CannotDisseminateFormatError\n\n def _createHeader(self, record):\n deleted = record['deleted']\n for setspec in record['sets']:\n if setspec in self.config.sets_deleted:\n deleted = True\n break\n return oaipmh.common.Header(record['id'],\n record['modified'],\n record['sets'],\n deleted)\n\n def _createHeaderAndMetadata(self, record):\n header = self._createHeader(record)\n metadata = oaipmh.common.Metadata(record)\n metadata.record = record\n return header, metadata\n \n def _listQuery(self, set=None, from_=None, until=None, \n cursor=0, batch_size=10, identifier=None):\n \n now = datetime.utcnow()\n if until != None and until > now:\n # until should never be in the future\n until = now\n \n if self.config.delay:\n # subtract delay from until_ param, if present\n if until is None:\n until = datetime.utcnow()\n until = until.timetuple()\n ut = time.mktime(until)-self.filter_data.delay\n until = datetime.fromtimestamp(ut)\n \n needed_sets = self.config.sets_needed.copy()\n if not set is None:\n needed_sets.add(set)\n allowed_sets = self.config.sets_allowed.copy()\n disallowed_sets = self.config.sets_disallowed.copy() \n \n return self.db.oai_query(offset=cursor,\n batch_size=batch_size,\n needed_sets=needed_sets,\n disallowed_sets=disallowed_sets,\n allowed_sets=allowed_sets,\n from_date=from_,\n until_date=until,\n identifier=identifier\n )\n\ndef OAIServerFactory(db, config):\n \"\"\"Create a new OAI batching OAI Server given a config and\n a database\"\"\"\n \n metadata_registry = oaipmh.metadata.MetadataRegistry()\n for prefix in config.metadata_prefixes:\n metadata_registry.registerWriter(prefix,\n get_writer(prefix, config, db))\n \n return oaipmh.server.BatchingServer(\n OAIServer(db, config),\n metadata_registry=metadata_registry,\n resumption_batch_size=config.batch_size\n )\n","repo_name":"infrae/moai","sub_path":"moai/oai.py","file_name":"oai.py","file_ext":"py","file_size_in_byte":6330,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"12"} +{"seq_id":"42632284809","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nimport torch.optim as optim\n\nimport argparse\nimport os\nimport matplotlib.pyplot as plt\n\nfrom dataset import My_Dataset\nimport models\n\n# parameters\nparser = argparse.ArgumentParser(description='Sentiment Analysis') \nparser.add_argument('--model', type=str, default='CNN', help='Model Name')\nparser.add_argument('--train', default=False, action='store_true', help='Train or not')\nparser.add_argument('--continue', default=False, action='store_true', help='Continue on the last training model or not', dest='continuing')\nparser.add_argument('--report', default=False, action='store_true', help='Report all models\\' score')\nparser.add_argument('--epochs', type=int, default=40, help='Number of epochs')\nparser.add_argument('--patience', type=int, default=7, help='How long to wait after last time validation loss improved')\noptions = parser.parse_args()\nprint(options)\n\ntrain_file = './Dataset/train.txt'\nvalid_file = './Dataset/validation.txt'\ntest_file = './Dataset/test.txt'\nword2vec_file = './Dataset/wiki_word2vec_50.bin'\nmodel_path = './Model/'\nimg_path = './img/'\nnum_workers=0\nn_epochs = options.epochs\nbatch_size_trian = 256\nbatch_size_valid = 512\nbatch_size_test = 512\nrandom_seed = 2023\nrecord_size = 1024\nmax_stop_count = options.patience\nstep_size = 4\nmax_lr, base_lr = {'MLP': (0.0002, 0.00004), 'CNN': (0.008, 0.0016), 'RNN': (0.0013, 0.00026)}[options.model]\nlearning_rate = (max_lr + base_lr) / 2\n\n\ntorch.manual_seed(random_seed)\n\n\n# data loader\nprint('\\nLoading data...')\ntrain_set = My_Dataset(train_file, word2vec_file)\nvalid_set = My_Dataset(valid_file, word2vec_file)\ntest_set = My_Dataset(test_file, word2vec_file)\ntrain_loader = DataLoader(dataset=train_set, batch_size=batch_size_trian, shuffle=True, num_workers=num_workers, pin_memory=True)\nvalid_loader = DataLoader(dataset=valid_set, batch_size=batch_size_valid, shuffle=True, num_workers=num_workers, pin_memory=True)\ntest_loader = DataLoader(dataset=test_set, batch_size=batch_size_test, shuffle=True, num_workers=num_workers, pin_memory=True)\nprint('Data loaded!\\n')\n\n\n# build model\nnetwork = getattr(models, options.model)()\nif torch.cuda.is_available():\n network.cuda()\noptimizer = optim.AdamW(network.parameters(), lr=learning_rate)\nscheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size, step_size_down=step_size, cycle_momentum=False)\ncriterion = nn.CrossEntropyLoss()\n\n\n# training process\ntrain_losses = []\ntrain_counter = []\nvalid_losses = []\nvalid_acc = []\nvalid_counter = []\nloss_min = None\nstop_count = 0\n\ndef train(epoch):\n network.train()\n record_loss = 0\n record_batch = 0\n print('Train Epoch', epoch)\n print(\"learning: {:.8f}\".format(optimizer.param_groups[0]['lr']))\n for batch_idx, (data, target, input_len) in enumerate(train_loader):\n # calculate\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n optimizer.zero_grad()\n output = network(data, input_len)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n\n #record\n record_loss += loss.item()\n record_batch += 1\n if batch_idx*batch_size_trian % record_size == 0:\n record_loss /= record_batch\n train_losses.append(record_loss)\n train_counter.append((batch_idx * batch_size_trian) + ((epoch - 1) * len(train_loader.dataset)))\n record_loss = 0\n record_batch = 0\n scheduler.step()\n\n\ndef test_acc(data_loader):\n network.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target, input_len in data_loader:\n # calculate\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n output = network(data, input_len)\n loss = criterion(output, target).item()\n pred = output.detach().max(1)[1]\n correct += (target == pred.view_as(target)).sum().item()\n # record\n test_loss += loss\n test_loss /= len(data_loader)\n return correct, len(data_loader.dataset), test_loss\n\n\ndef test(epoch):\n global loss_min\n global stop_count\n # valid accuracy\n correct, total, loss = test_acc(valid_loader)\n print('valid loss: {:.4f}'.format(loss))\n print('valid accuracy: {}/{} ({:.2f}%)'.format(correct, total, 100.*correct/total))\n\n # record\n valid_losses.append(loss)\n valid_acc.append(correct / total)\n valid_counter.append(epoch * len(train_loader.dataset))\n \n # early stop\n if loss_min == None or loss <= loss_min:\n loss_min = loss\n stop_count = 0\n # save the model\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n torch.save(network.state_dict(), model_path + network.name + '.pth')\n else:\n stop_count += 1\n print(f'\\tEarlyStopping counter: {stop_count} out of {max_stop_count}')\n\n\ndef report(data_loader):\n network.eval()\n TP = TN = FN = FP = 0\n with torch.no_grad():\n for data, target, input_len in data_loader:\n # calculate\n if torch.cuda.is_available():\n data, target = data.cuda(), target.cuda()\n output = network(data, input_len)\n pred = output.detach().max(1)[1]\n TP += ((pred == 1) & (target == 1)).sum().item()\n TN += ((pred == 0) & (target == 0)).sum().item()\n FN += ((pred == 0) & (target == 1)).sum().item()\n FP += ((pred == 1) & (target == 0)).sum().item()\n precision = TP / (TP + FP)\n recall = TP / (TP + FN)\n acc = (TP + TN) / (TP + TN + FP + FN)\n fscore = 2 / (1 / precision + 1 / recall)\n print(network.name + ' score:')\n print(\"Accuracy:\\t{:.4f}\".format(acc))\n print(\"F-score:\\t{:.4f}\\n\".format(fscore))\n\n\ndef report_all():\n global network\n model_names = ['MLP', 'CNN', 'RNN']\n for name in model_names:\n network = getattr(models, name)()\n if torch.cuda.is_available():\n network.cuda()\n network.load_state_dict(torch.load(model_path + network.name + '.pth'))\n report(test_loader)\n\n\ndef save_record(last_epoch):\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n dic = {'train_losses': train_losses,\n 'train_counter': train_counter,\n 'valid_losses': valid_losses,\n 'valid_acc': valid_acc,\n 'valid_counter': valid_counter,\n 'epochs': last_epoch}\n torch.save(dic, model_path + network.name + '_record.csv')\n\n\ndef load_record():\n global network, optimizer, scheduler, train_losses, train_counter, valid_losses, valid_acc, valid_counter\n network.load_state_dict(torch.load(model_path + network.name + '.pth'))\n optimizer = optim.AdamW(network.parameters(), lr=learning_rate)\n scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=base_lr, max_lr=max_lr, step_size_up=step_size, step_size_down=step_size, cycle_momentum=False)\n dic = torch.load(model_path + network.name + '_record.csv')\n train_losses = dic['train_losses']\n train_counter = dic['train_counter']\n valid_losses = dic['valid_losses']\n valid_acc = dic['valid_acc']\n valid_counter = dic['valid_counter']\n return dic['epochs'] + 1\n\n\ndef plot_figure():\n if not os.path.exists(img_path):\n os.makedirs(img_path)\n fig = plt.figure()\n plt.plot(train_counter, train_losses, color='blue')\n plt.scatter(valid_counter, valid_losses, color='red')\n plt.plot(valid_counter, valid_acc, color='green')\n plt.legend(['Train Loss', 'Valid Loss', 'Valid Accuracy'], loc='right')\n plt.title(network.name)\n plt.xlabel('number of training examples seen')\n plt.ylabel('cross entropy loss')\n plt.savefig(img_path + network.name + ' ' + str(len(os.listdir(img_path))) +'.png')\n # plt.show()\n\n\nif __name__ == \"__main__\":\n # train the model\n epoch = 0\n if options.train:\n start_epoch = 1\n # if continue -> load\n if options.continuing:\n start_epoch = load_record()\n\n # train\n for epoch in range(start_epoch, n_epochs + 1):\n print(\"-\" * 40)\n train(epoch)\n test(epoch)\n if stop_count >= max_stop_count:\n print(\"\\nNo improvement for {} epoches. Early stop!\\n\".format(max_stop_count))\n break\n print(\"-\" * 40)\n report(test_loader)\n\n # report\n if options.report:\n report_all()\n\n if options.train:\n # save record\n save_record(epoch)\n\n # plot a figure\n plot_figure()\n","repo_name":"zhang-edward/Wave-Rush","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34565566788","text":"import folium\n\n# create base map using default OpenStreetMap tiles\nmap_osm = folium.Map(location=[45.5236, -122.6750])\nmap_osm.save('osm.html')\n\n# using Stamen Terrain, Toner, Mapbox Bright, Control room tiles\nstamen = folium.Map(location=[45.5236, -122.6750], tiles='Stamen Toner',\n zoom_start=13)\nstamen.save('stamen_toner.html')\n\n# for passing Leaflet.js compatible custom tileset\nfolium.Map(location=[45.372, -121.6972],\n zoom_start=12,\n tiles='http://{s}.tiles.yourtiles.com/{z}/{x}/{y}.png',\n attr='My Data Attribution')\n\n# plotting with Leaflet style location marker with popup text\nmap_1 = folium.Map(location=[45.372, -121.6972],\n zoom_start=12,\n tiles='Stamen Terrain')\nfolium.Marker([45.3288, -121.6625], popup='Mt. Hood Meadows').add_to(map_1)\nfolium.Marker([45.3311, -121.7113], popup='Timberline Lodge').add_to(map_1)\nmap_1\n\n# plotting colors and marker icon types (from bootstrap)\nmap_1 = folium.Map(location=[45.372, -121.6972],\n zoom_start=12,\n tiles='Stamen Terrain')\nfolium.Marker([45.3288, -121.6625],\n popup='Mt. Hood Meadows',\n icon=folium.Icon(icon='cloud')\n ).add_to(map_1)\nfolium.Marker([45.3311, -121.7113],\n popup='Timberline Lodge',\n icon=folium.Icon(color='green')\n ).add_to(map_1)\nfolium.Marker([45.3300, -121.6823],\n popup='Some Other Location',\n icon=folium.Icon(color='red',icon='info-sign')\n ).add_to(map_1)\nmap_1\n\nmap_1.save('markers.html')\n\n# plotting circle-style markers, with custom size and color\nmap_2 = folium.Map(location=[45.5236, -122.6750],\n tiles='Stamen Toner',\n zoom_start=13)\nfolium.Marker([45.5244, -122.6699],\n popup='The Waterfront'\n ).add_to(map_2)\nfolium.CircleMarker([45.5215, -122.6261],\n radius=500,\n popup='Laurelhurst Park',\n color='#3186cc',\n fill_color='#3186cc',\n ).add_to(map_2)\nmap_2\n\nmap_2.save('custommarkers.html')\n","repo_name":"codeLovingYogi/AmericanRoadTrip","sub_path":"mapexamples.py","file_name":"mapexamples.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3289517733","text":"from nba_api.stats.endpoints import leaguegamelog\nimport pandas as pd\n\n# Initialize an empty DataFrame to store the data\ndata = pd.DataFrame()\n\n# Loop through the last five seasons\nfor season in [\"2016-17\", \"2017-18\", \"2018-19\", \"2019-20\", \"2020-21\", \"2021-22\", \"2022-23\"]:\n # Make the API request\n gamelog = leaguegamelog.LeagueGameLog(season=season)\n season_data = gamelog.get_data_frames()[0]\n\n # Append the season data to the main DataFrame\n data = pd.concat([data, season_data], ignore_index=True)\n\nreturn data.to_dict(orient='list')","repo_name":"pschybyschp/TC23_NBA","sub_path":"Leaguegamelog_Trellis.py","file_name":"Leaguegamelog_Trellis.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"33300378996","text":"import os\r\nimport argparse\r\nimport jsonlines\r\nimport pickle\r\nfrom preprocessing.stat_corpus import only_english, only_string_in_dict, lines_to_paragraphs\r\n\r\n\r\ndef read_in_docs(corpus_dir: str, output_dir: str, pickle_dir: str, removal=True):\r\n '''\r\n reads in all files, separates them in intro, summary and the single paragraphs, removes non-informative\r\n intros and summaries, writes them into jsonlines file, prints if it fails to read a certain file and\r\n stores it in failed_files\r\n :param corpus_dir: directory of the corpus containing the text files\r\n :param output_dir: output directory where the pickled files of the non-informative intros and summaries are\r\n :param removal: if non-informative text should be removed in the intros and the summaries\r\n :return:\r\n '''\r\n with open(os.path.join(pickle_dir, 'intro_text_often.pkl'), 'rb') as f:\r\n intro_often = pickle.load(f)\r\n with open(os.path.join(pickle_dir, 'summ_text_often.pkl'), 'rb') as f:\r\n summ_often = pickle.load(f)\r\n\r\n dict_paragraphs = {}\r\n failed_files = []\r\n for root, dirs, files in os.walk(corpus_dir):\r\n for file in files:\r\n #file = '001_001.txt'\r\n with open(os.path.join(corpus_dir, file), 'r') as f:\r\n lines = f.readlines()\r\n lines = [line.strip() for line in lines if line.strip('\\n') is not ' ' and line.strip() is not '']\r\n paragraphs = lines_to_paragraphs(lines)\r\n if paragraphs:\r\n paragraphs = only_english(paragraphs)\r\n paragraphs = only_string_in_dict(paragraphs)\r\n if removal:\r\n if paragraphs.get('intro') in intro_often:\r\n paragraphs.update({'intro': None})\r\n if paragraphs.get('Summary:') in summ_often:\r\n paragraphs.update({'Summary:': None})\r\n dict_paragraphs.update({file.split('.')[0]: paragraphs})\r\n else:\r\n print('reading in of file {} doesnt work'.format(file))\r\n failed_files.append(file)\r\n\r\n #with open(os.path.join(output_dir, 'paragraphs_jsonlines.pickle'), 'wb') as f:\r\n # pickle.dump(dict_paragraphs, f)\r\n #with open(os.path.join(output_dir, 'failed_files_jsonlines.pickle'), 'wb') as f:\r\n # pickle.dump(failed_files, f)\r\n\r\n return dict_paragraphs, failed_files\r\n\r\n\r\ndef jsonl_index_whole_doc(output_dir: str, dict_paragraphs: dict):\r\n \"\"\"\r\n creates jsonl file for bm25 index and indexes the whole document with intro and summary as one sample\r\n :param output_dir:\r\n :param dict_paragraphs:\r\n :return:\r\n \"\"\"\r\n with jsonlines.open(os.path.join(output_dir, 'corpus_whole_docs_removed.jsonl'), mode='w') as writer:\r\n for key, values in dict_paragraphs.items():\r\n i = 0\r\n # values is also a dictionary with intro, summary and paragraphs\r\n whole_text = []\r\n for key2, values2 in values.items():\r\n if values2:\r\n whole_text.append(values2)\r\n writer.write({'id': '{}_{}'.format(key.split('.txt')[0], i),\r\n 'contents': ' '.join(whole_text)})\r\n i += 1\r\n\r\n\r\ndef jsonl_index_doc_only_para(output_dir, dict_paragraphs):\r\n \"\"\"\r\n creates jsonl file for bm25 index and indexes the whole document without intro and summary as one sample\r\n :param output_dir:\r\n :param dict_paragraphs:\r\n :return:\r\n \"\"\"\r\n with jsonlines.open(os.path.join(output_dir, 'corpus_doc_only_para.jsonl'), mode='w') as writer:\r\n for key, values in dict_paragraphs.items():\r\n i = 0\r\n # values is also a dictionary with intro, summary and paragraphs\r\n whole_text = []\r\n for key2, values2 in values.items():\r\n if key2 != 'intro' and key2 != 'Summary:':\r\n if values2:\r\n whole_text.append(values2)\r\n writer.write({'id': '{}_{}'.format(key.split('.txt')[0], i),\r\n 'contents': ' '.join(whole_text)})\r\n i += 1\r\n\r\n\r\ndef jsonl_index_para_separately(output_dir, dict_paragraphs, intro_summ=False):\r\n '''\r\n creates jsonl file with one sample containing one passage, if intro_summ = False then it only considers\r\n the paragraphs, if True then it also includes the intros and summaries as samples\r\n :param output_dir:\r\n :param dict_paragraphs:\r\n :param intro_summ:\r\n :return:\r\n '''\r\n with jsonlines.open(os.path.join(output_dir, 'corpus_separately_para_{}.jsonl'.format('with_intro_summ' if intro_summ else 'only')), mode='w') as writer:\r\n for key, values in dict_paragraphs.items():\r\n i = 0\r\n for key2, values2 in values.items():\r\n if not intro_summ:\r\n if key2 != 'intro' and key2 != 'Summary:':\r\n if values2:\r\n writer.write({'id': '{}_{}'.format(key.split('.txt')[0], i),\r\n 'contents': values2})\r\n i += 1\r\n else:\r\n if values2:\r\n writer.write({'id': '{}_{}'.format(key.split('.txt')[0], i),\r\n 'contents': values2})\r\n i += 1\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #\r\n # config\r\n #\r\n #parser = argparse.ArgumentParser()\r\n\r\n #parser.add_argument('--corpus-dir', action='store', dest='corpus_dir',\r\n # help='corpus directory location', required=True)\r\n # parser.add_argument('--output-dir', action='store', dest='output_dir',\r\n # help='output directory location', required=True)\r\n # parser.add_argument('--label-file-train', action='store', dest='label_file',\r\n # help='label file train', required=True)\r\n # parser.add_argument('--label-file-test', action='store', dest='label_file_test',\r\n # help='label file test', required=True)\r\n\r\n #args = parser.parse_args()\r\n\r\n corpus_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/corpus'\r\n pickle_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/pickle_files'\r\n output_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/output'\r\n label_file = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/task1_train_2020_labels.json'\r\n label_file_test = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/task1_test_2020_labels.json'\r\n base_case_dir = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/base_case_all'\r\n\r\n # test functions with smaller datasets\r\n #corpus_dir_test = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/corpus_test'\r\n #output_dir_test = '/mnt/c/Users/salthamm/Documents/phd/data/coliee2020/task1/output_test'\r\n\r\n dict_paragraphs, failed_files = read_in_docs(corpus_dir, output_dir, pickle_dir, removal=True)\r\n\r\n jsonl_index_whole_doc(output_dir, dict_paragraphs)\r\n jsonl_index_doc_only_para(output_dir, dict_paragraphs)\r\n jsonl_index_para_separately(output_dir, dict_paragraphs,\r\n intro_summ=False) # without summary and intro\r\n jsonl_index_para_separately(output_dir, dict_paragraphs,\r\n intro_summ=True) # with summary and intro as separate paragraphs\r\n\r\n\r\n","repo_name":"sophiaalthammer/parm","sub_path":"preprocessing/jsonlines_for_bm25_pyserini.py","file_name":"jsonlines_for_bm25_pyserini.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"12"} +{"seq_id":"11683027986","text":"# 3p\nfrom nose.plugins.attrib import attr\n\n# project\nfrom tests.checks.common import AgentCheckTest\n\n@attr('linux')\n@attr(requires='linux')\nclass TestCheckLinuxProcExtras(AgentCheckTest):\n CHECK_NAME = 'linux_proc_extras'\n\n INODE_GAUGES = [\n 'system.inodes.total',\n 'system.inodes.used'\n ]\n\n PROC_COUNTS = [\n 'system.linux.context_switches',\n 'system.linux.processes_created',\n 'system.linux.interrupts'\n ]\n\n ENTROPY_GAUGES = [\n 'system.entropy.available'\n ]\n\n PROCESS_STATS_GAUGES = [\n 'system.processes.states',\n 'system.processes.priorities'\n ]\n\n # Really a basic check to see if all metrics are there\n def test_check(self):\n self.run_check({'instances': []})\n\n # Assert metrics\n for metric in self.PROC_COUNTS + self.INODE_GAUGES + self.ENTROPY_GAUGES + self.PROCESS_STATS_GAUGES:\n self.assertMetric(metric, tags=[])\n\n self.coverage_report()\n","repo_name":"totalkyos/DataDog","sub_path":"tests/checks/integration/test_linux_proc_extras.py","file_name":"test_linux_proc_extras.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"28806922214","text":"import tensorflow as tf\n\nclass EqualityOfOdds(tf.keras.metrics.Metric):\n def __init__(self, name, **kwargs):\n super(EqualityOfOdds, self).__init__(name=name, **kwargs)\n self.equal_odds = self.add_weight(name='EO', initializer='zeros')\n def update_state(self, y_true, y_pred, sample_weight=None):\n print(y_true)\n y_true = tf.cast(y_true, tf.bool)\n y_pred = tf.cast(y_pred, tf.bool)\n\n values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))\n values = tf.cast(values, self.dtype)\n if sample_weight is not None:\n sample_weight = tf.cast(sample_weight, self.dtype)\n sample_weight = tf.broadcast_weights(sample_weight, values)\n values = tf.multiply(values, sample_weight)\n self.true_positives.assign_add(tf.reduce_sum(values))\n\n def result(self):\n return self.equal_odds","repo_name":"Krabsenm/thesis2021","sub_path":"metrics/equality_odds.py","file_name":"equality_odds.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29523629845","text":"'''\n\tYour task is to detect if any loop is present \n\tin the given linked list.\n\t\n\tFunction Arguments: head (reference to head of the linked list)\n\tReturn Type: True or False (boolean)\n'''\ndef detectLoop(head):\n #code here\n slowPtr = head\n fastPtr = head\n while(fastPtr and slowPtr and fastPtr.next ):\n fastPtr = fastPtr.next.next\n slowPtr = slowPtr.next\n if(fastPtr is slowPtr):\n return True\n return False","repo_name":"nisacharan/algods","sub_path":"LinkedLists/07_isLoop.py","file_name":"07_isLoop.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3698123360","text":"\"\"\"Fast Fourier Transform Processor\"\"\"\r\n\r\n# Authors: Jeffrey Wang\r\n# License: BSD 3 clause\r\n\r\nimport numpy as np\r\n\r\nfrom sleepens.io import DataObject, Dataset\r\nfrom sleepens.utils.data import signal as s\r\nfrom sleepens.utils import calculate_epochs, get_epoch\r\n\r\ndef fft(dataobject, epoch_size, name=None, nperseg_factor=1,\r\n\t\t\t\tnoverlap_factor=0.1, detrend='constant'):\r\n\t\"\"\"\r\n\tProcess the data in `dataobject`,\r\n\tdivide the data into epochs of `epoch_size`, and\r\n\tcompute the Fast Fourier Transform (FFT).\r\n\r\n\tParameters\r\n\t----------\r\n\tdataobject : DataObject\r\n\t\tDataObject for processing.\r\n\r\n\tepoch_size : int\r\n\t\tNumber of seconds in each epoch.\r\n\r\n\tnperseg_factor : float (0, inf)\r\n\t\tMultiplied by data resolution to\r\n\t\tcalculate the NPERSEG value.\r\n\r\n\tnoverlap_factor : float (0, inf)\r\n\t\tMultiplied by NPERSEG to calculate\r\n\t\tthe NOVERLAP value.\r\n\r\n\tdetrend : str or func or False, default='constant'\r\n\t\tMethod to detrend each segment.\r\n\r\n\tReturns\r\n\t-------\r\n\tds = Dataset\r\n\t\tDataset with frequencies as features\r\n\t\tand power spectral densities at each epoch.\r\n\t\"\"\"\r\n\tf, Pxx = [], []\r\n\tepoch_len, n_epochs = calculate_epochs(dataobject.data, dataobject.resolution, epoch_size)\r\n\tfor i in range(n_epochs):\r\n\t\tepoch = get_epoch(dataobject.data, i, epoch_len)\r\n\t\tfs = 1/dataobject.resolution\r\n\t\tnperseg = fs * nperseg_factor\r\n\t\tnoverlap = nperseg * noverlap_factor\r\n\t\tfi, Pxxi = s.welch(epoch, fs=fs, nperseg=nperseg, noverlap=noverlap,\r\n\t\t\t\t\t\t\t\tdetrend=detrend)\r\n\t\tf.append(fi)\r\n\t\tPxx.append(Pxxi)\r\n\treturn Dataset(name=name, features=f, data=Pxx)\r\n","repo_name":"paradoxysm/sleepens","sub_path":"sleepens/process/primary/_fft.py","file_name":"_fft.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"35705279350","text":"\"\"\"\nThis script trains sentence transformers with a triplet loss function.\n\nAs corpus, we use the wikipedia sections dataset that was describd by Dor et al., 2018, Learning Thematic Similarity Metric Using Triplet Networks.\n\"\"\"\n\nfrom sentence_transformers import SentenceTransformer, InputExample, LoggingHandler, losses, models, util\nfrom torch.utils.data import DataLoader\nfrom sentence_transformers.evaluation import TripletEvaluator\nfrom datetime import datetime\nfrom zipfile import ZipFile\n\nfrom sentence_transformers.datasets import SentenceLabelDataset\nfrom sentence_transformers.datasets import NoDuplicatesDataLoader\n\nimport csv\nimport logging\nimport os\nimport sys\nimport torch\n\nsys.path.insert(0, '../../src-py/')\nimport track_1_kp_matching\nfrom KeyPointEvaluator import KeyPointEvaluator\n\nlogging.basicConfig(format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO,\n handlers=[LoggingHandler()])\nlogger = logging.getLogger(__name__)\n\n#You can specify any huggingface/transformers pre-trained model here, for example, bert-base-uncased, roberta-base, xlm-roberta-base\nmodel_name = 'distilbert-base-uncased'\n\n\ndef train_model(dataset_path, eval_data_path, subset_name, output_path, model_name, num_epochs=3, train_batch_size=16, model_suffix='', data_file_suffix='', max_seq_length=256, \n add_special_token=False, loss='Triplet', sentence_transformer=False):\n ### Configure sentence transformers for training and train on the provided dataset\n # Use Huggingface/transformers model (like BERT, RoBERTa, XLNet, XLM-R) for mapping tokens to embeddings\n output_path = output_path+model_name+ \"-\" + model_suffix + \"-\"+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n if sentence_transformer:\n word_embedding_model = SentenceTransformer(model_name)\n word_embedding_model.max_seq_length = max_seq_length\n \n if add_special_token:\n word_embedding_model.tokenizer.add_tokens([''], special_tokens=True)\n word_embedding_model.resize_token_embeddings(len(word_embedding_model.tokenizer))\n\n else:\n word_embedding_model = models.Transformer(model_name)\n word_embedding_model.max_seq_length = max_seq_length\n \n if add_special_token: # True\n word_embedding_model.tokenizer.add_tokens([''], special_tokens=True)\n word_embedding_model.auto_model.resize_token_embeddings(len(word_embedding_model.tokenizer))\n\n # Apply mean pooling to get one fixed sized sentence vector\n pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(),\n pooling_mode_mean_tokens=True,\n pooling_mode_cls_token=False,\n pooling_mode_max_tokens=False)\n\n model = SentenceTransformer(modules=[word_embedding_model, pooling_model])\n\n\n logger.info(\"Read Triplet train dataset\")\n train_examples = []\n with open(os.path.join(dataset_path, 'training_df_{}.csv'.format(data_file_suffix)), encoding=\"utf-8\") as fIn:\n reader = csv.DictReader(fIn, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n for row in reader:\n if loss == 'ContrastiveLoss':\n train_examples.append(InputExample(texts=[row['argument'], row['keypoint']], label=int(row['label'])))\n else:\n train_examples.append(InputExample(texts=[row['anchor'], row['pos'], row['neg']], label=0))\n\n\n\n if loss == 'MultipleNegativesRankingLoss':\n # Special data loader that avoid duplicates within a batch\n train_dataloader = NoDuplicatesDataLoader(train_examples, shuffle=False, batch_size=train_batch_size)\n # Our training loss\n train_loss = losses.MultipleNegativesRankingLoss(model)\n elif loss == 'ContrastiveLoss':\n train_dataloader = DataLoader(train_examples, shuffle=False, batch_size=train_batch_size)\n train_loss = losses.ContrastiveLoss(model)\n else:\n train_dataloader = DataLoader(train_examples, shuffle=False, batch_size=train_batch_size)\n train_loss = losses.TripletLoss(model)\n \n\n evaluator = KeyPointEvaluator.from_eval_data_path(eval_data_path, subset_name, add_special_token, name='dev', show_progress_bar=False)\n\n\n warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) #10% of train data\n\n\n # Train the model\n model.fit(train_objectives=[(train_dataloader, train_loss)],\n evaluator=evaluator,\n epochs=num_epochs,\n evaluation_steps=500,\n warmup_steps=warmup_steps,\n output_path=output_path)\n\n return model\n\n# train_model('/home/marcelbraasch/PycharmProjects/argmining-21-keypoint-analysis-sharedtask-code-2/data/siamese-data/',\n# \"/home/marcelbraasch/PycharmProjects/argmining-21-keypoint-analysis-sharedtask-code-2/data/kpm_data\",\n# 'dev',\n# \"/home/marcelbraasch/PycharmProjects/new_KPA/argmining-21-keypoint-analysis-sharedtask-code-2/code/siamese-models\",\n# 'roberta-base',\n# model_suffix='contrastive-10-epochs',\n# data_file_suffix='contrastive',\n# num_epochs=1, max_seq_length=70, add_special_token=True, train_batch_size=32, loss='ContrastiveLoss')","repo_name":"marcelbra/argmining-21-keypoint-analysis-sharedtask-code-2","sub_path":"code/src-py/sbert_training.py","file_name":"sbert_training.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"11882911041","text":"# Solution - 1: O(N) Space using a Set\n\n# Solution - 2: O(1) Space\nclass Solution:\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n p1,p2 = headA, headB\n while p1 != p2:\n if p1:\n p1 = p1.next\n else:\n p1 = headB\n if p2:\n p2 = p2.next\n else:\n p2 = headA\n \n return p1\n","repo_name":"Ram-95/Python-Competitive-Programming","sub_path":"Linked_Lists/Intersection of Two Linked Lists.py","file_name":"Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"12"} +{"seq_id":"13954808215","text":"#!/usr/bin/env python3\n\n# Day 18: Numbers With Same Consecutive Differences\n#\n# Return all non-negative integers of length N such that the absolute\n# difference between every two consecutive digits is K.\n# Note that every number in the answer must not have leading zeros except for\n# the number 0 itself. For example, 01 has one leading zero and is invalid, but\n# 0 is valid.\n# You may return the answer in any order.\n#\n# Note:\n# 1 <= N <= 9\n# 0 <= K <= 9\n\nclass Solution:\n def numsSameConsecDiff(self, N: int, K: int) -> [int]:\n # Edge case\n if N == 1:\n return list(range(10))\n\n numbers = []\n\n # Recursively search possible numbers\n def buildNumbers(N, number):\n if N == 0:\n numbers.append(number)\n else:\n last_digit = number % 10\n if K == 0:\n next_digits = [last_digit]\n else:\n next_digits = []\n if last_digit - K >= 0:\n next_digits.append(last_digit - K)\n if last_digit + K < 10:\n next_digits.append(last_digit + K)\n for next_digit in next_digits:\n buildNumbers(N - 1, number * 10 + next_digit)\n\n for first_digit in range(1, 10):\n buildNumbers(N - 1, first_digit)\n \n return numbers\n\n# Tests\nnumbers = Solution().numsSameConsecDiff(3, 7)\nexpected = [181,292,707,818,929]\nassert len(numbers) == len(expected) and set(numbers) == set(expected)\nnumbers = Solution().numsSameConsecDiff(2, 1)\nexpected = [10,12,21,23,32,34,43,45,54,56,65,67,76,78,87,89,98]\nassert len(numbers) == len(expected) and set(numbers) == set(expected)\nnumbers = Solution().numsSameConsecDiff(1, 9)\nexpected = [0,1,2,3,4,5,6,7,8,9]\nassert len(numbers) == len(expected) and set(numbers) == set(expected)\n","repo_name":"jkbockstael/leetcode","sub_path":"2020-08-month-long-challenge/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24347230481","text":"import random\nimport time\nimport terninger as d6\n \ndef roll_dice_human():\n roll_human = random.randrange(1,7)\n return roll_human\n \ndef roll_dice_machine():\n roll_machine = random.randrange(1,7)\n return roll_machine\n \n \nscore_human = 0\nscore_machine = 0\n \nprint(\"Dette er et terningespil.\")\nprint(\"Du spiller mod maskinen.\")\nprint(\"Spillet afsluttes, når enten spiller eller maskine når 10 point.\")\nwhile score_human < 10 and score_machine < 10:\n roll_human = roll_dice_human()\n roll_machine = roll_dice_machine()\n enter = input(\"Tryk på enter for at slå en terning\")\n if enter == '':\n roll_dice_human()\n print(\"...\")\n time.sleep(1)\n print(\"Du slog en\",roll_human,\"'er.\")\n d6.terning(roll_human)\n print(\"Nu er det maskinens tur.\")\n print(\"...\")\n time.sleep(1)\n roll_dice_machine()\n print(\"Maskinen slog en\",roll_machine,\"'er\")\n d6.terning(roll_machine)\n if roll_human > roll_machine:\n print(\"Du vandt!\")\n score_human = score_human + 1\n else:\n print(\"Du tabte.\")\n score_machine = score_machine + 1\n print(\"\"\"\n Score er:\n Dig:\"\"\", score_human,\"CPU:\", score_machine)\n print(\"Næste runde starter nu.\")\n continue\nprint(\"Spillet er nu slut. Score er: Dig:\", score_human, \"CPU\", score_machine)\nif score_human > score_machine:\n print(\"Du vandt spillet!\")\nelif score_human == score_machine:\n print(\"Spillet er uafgjort.\")\nelse:\n print(\"Du tabte spillet.\")","repo_name":"deckrd/repo_1","sub_path":"d6.py","file_name":"d6.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34542037181","text":"\"\"\"\n\tProblem Statement:\n\t\tGiven a binary tree, return the vertical order traversal of its nodes values.\n\t\tFor each node at position (X, Y), its left and right children respectively will be at positions (X-1, Y-1) and (X+1, Y-1).\n\t\tRunning a vertical line from X = -infinity to X = +infinity, whenever the vertical line touches some nodes, we report the values of the nodes in order \n\t\tfrom top to bottom (decreasing Y coordinates).\n\t\tIf two nodes have the same position, then the value of the node that is reported first is the value that is smaller.\n\t\tReturn an list of non-empty reports in order of X coordinate. Every report will have a list of values of nodes.\n\"\"\"\nfrom heapq import heappush, heappop\n\nclass Solution:\n def traverse(self, node, x, y):\n if not node:\n return\n \n self.maf[x].append((y, node.val))\n self.traverse(node.left, x-1, y+1)\n self.traverse(node.right, x+1, y+1)\n\t\t\n def verticalTraversal(self, root: TreeNode) -> List[List[int]]:\n self.maf = defaultdict(list) \n self.traverse(root, 0, 0)\n heap, ans = [], []\n\n for x, lst in self.maf.items():\n heappush(heap, (x, sorted(lst)))\n\n while heap:\n ans.append([v for _, v in heappop(heap)[1]])\n\n return ans\n\t\t\n","repo_name":"Madhivarman/DataStructures","sub_path":"leetcodeProblems/vertical_order_bst.py","file_name":"vertical_order_bst.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"15870757943","text":"from base.models import RentInvoice, Issue, PropertyItem, User\r\nimport datetime\r\nimport calendar\r\nimport logging\r\n\r\nlogger = logging.getLogger(\"console_message\")\r\n\r\ndef create_invoice():\r\n today = datetime.date.today()\r\n last_day_of_month = calendar.monthrange(today.year, today.month)[1]\r\n generate_invoice_for = PropertyItem.objects.exclude(tenant_id=None)\r\n \r\n if len(generate_invoice_for) > 0:\r\n for item in generate_invoice_for:\r\n try:\r\n due_date = today.replace(day=item.rent_due_day)\r\n except:\r\n due_date = today.replace(day=last_day_of_month)\r\n tenant_email= User.object.get(id=item.tenant_id).tenant_email\r\n invoice_data = {\r\n 'property_id': item.id, 'property_name': item.name, 'owner_id': item.owner_id, 'tenant_id': item.tenant_id, 'tenant_email': tenant_email,\r\n 'price': item.price, 'currency': ('LEI' if item.currency==2 else 'EUR'), 'due_day': due_date}\r\n \r\n invoice = RentInvoice.objects.create(**invoice_data)\r\n logger.info(\"Generated invoice for \"+item.name)\r\n invoice.save()\r\n\r\ndef create_issue(name, description, by_id):\r\n property_item = PropertyItem.objects.get(id=26)\r\n property_owner = User.objects.get(id=5)\r\n created_by = User.objects.get(id=by_id)\r\n issue_data = {\r\n 'name': name, 'linked_to_property': property_item, 'description': description,\r\n 'closed': False, 'property_owner': property_owner, 'created_by': created_by}\r\n issue = Issue.objects.create(**issue_data)\r\n issue.property_name = property_item.name\r\n issue.save()\r\n\r\n print('created new issue')\r\n\r\n return issue","repo_name":"cristian1f/Property-Rent-Management-App","sub_path":"backend/base/api/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"1068981916","text":"#\n# @lc app=leetcode id=300 lang=python3\n#\n# [300] Longest Increasing Subsequence\n#\n\n# @lc code=start\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int: # O(n*logn)\n n = len(nums)\n tails = [nums[0]] # tails[0] means the min tail of a Increasing Subsequence with length 1\n\n for i in range(1, n):\n if nums[i] > tails[-1]:\n tails.append(nums[i])\n else:\n L, R = 0, len(tails) - 1\n while L <= R:\n M = L + (R - L) // 2\n if tails[M] == nums[i]:\n break\n elif tails[M] > nums[i]:\n R = M - 1\n else:\n L = M + 1\n if L > R:\n tails[L] = nums[i]\n \n return len(tails)\n def lengthOfLIS(self, nums: List[int]) -> int:\n n = len(nums)\n dp = [1] * n\n\n for i in range(1, n):\n for j in range(i - 1, -1, -1):\n if nums[j] < nums[i]:\n dp[i] = max(dp[i], dp[j] + 1)\n \n return max(dp)\n# @lc code=end\n\n","repo_name":"SkyJinXX/LeetCode-Practice","sub_path":"300.longest-increasing-subsequence.py","file_name":"300.longest-increasing-subsequence.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12136161001","text":"import backend as be\nimport customtkinter as ctk\n\n\nclass Sidebar(ctk.CTkFrame):\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n\n self.rowconfigure(3, weight=1)\n\n self.new_note_btn = ctk.CTkButton(\n self,\n text=\"New Note\",\n width=250,\n font=self.winfo_toplevel().button_font,\n command=self.winfo_toplevel().new_note\n )\n self.new_note_btn.grid(\n column=0,\n row=0,\n padx=10,\n pady=5\n )\n\n self.save_note_btn = ctk.CTkButton(\n self,\n text=\"Save Note\",\n width=250,\n fg_color=\"#307C39\",\n hover_color=\"#245E2B\",\n font=self.winfo_toplevel().button_font,\n command=self.winfo_toplevel().save_note\n )\n self.save_note_btn.grid(\n column=0,\n row=1,\n padx=10,\n pady=5\n )\n\n self.delete_note_btn = ctk.CTkButton(\n self,\n text=\"Delete Note\",\n width=250,\n fg_color='#C73E1D',\n hover_color='#8C2D15',\n font=self.winfo_toplevel().button_font,\n command=self.winfo_toplevel().delete_note,\n state='disabled'\n )\n self.delete_note_btn.grid(\n column=0,\n row=2,\n padx=10,\n pady=5\n )\n\n self.notes_list = ctk.CTkScrollableFrame(\n self,\n fg_color=\"transparent\"\n )\n self.notes_list.grid(\n column=0,\n row=3,\n sticky=\"nsew\"\n )\n\n\nclass MainWindow(ctk.CTkFrame):\n def __init__(self, master, **kwargs):\n super().__init__(master, **kwargs)\n\n self.columnconfigure(0, weight=1)\n self.rowconfigure(1, weight=1)\n\n self.title = ctk.CTkEntry(\n self,\n fg_color=\"transparent\",\n border_width=0,\n font=self.winfo_toplevel().title_font\n )\n self.title.grid(\n column=0,\n row=0,\n padx=(0, 5),\n pady=5,\n sticky=\"ew\"\n )\n\n self.body = ctk.CTkTextbox(\n self,\n fg_color=\"transparent\",\n font=self.winfo_toplevel().body_font,\n wrap=\"word\",\n activate_scrollbars=False\n )\n self.body.grid(\n column=0,\n row=1,\n sticky=\"nsew\",\n columnspan=2\n )\n\n\nclass App(ctk.CTk):\n def __init__(self):\n super().__init__()\n\n # Create local db and table if they don't exist\n be.create_notes_table()\n\n # Appearance\n ctk.set_appearance_mode('dark')\n self.title('Notes')\n self.geometry('1000x600')\n self.columnconfigure(1, weight=1)\n self.rowconfigure(0, weight=1)\n\n # Fonts\n self.title_font = ctk.CTkFont(\n family=\"Arial\",\n size=40,\n weight='bold'\n )\n self.body_font = ctk.CTkFont(\n family=\"Helvetica\",\n size=16\n )\n self.button_font = ctk.CTkFont(\n family=\"Helvetica\",\n size=13\n )\n\n # Sidebar\n self.sidebar = Sidebar(self, fg_color=\"transparent\")\n self.sidebar.grid(\n column=0,\n row=0,\n padx=(10, 5),\n pady=10,\n sticky=\"ns\"\n )\n\n # Main Window\n self.main_window = MainWindow(self, fg_color=\"transparent\")\n self.main_window.grid(\n column=1,\n row=0,\n padx=(5, 10),\n pady=10,\n sticky=\"nsew\"\n )\n\n # Start a New Note\n self.new_note()\n\n# Load buttons for any existing notes into the sidebar\n self.load_notes()\n\n def new_note(self):\n self.current_note_id = None\n self.main_window.title.delete(0, ctk.END)\n self.main_window.body.delete('1.0', ctk.END)\n self.main_window.title.insert(0, \"New Note\")\n self.main_window.body.focus_set()\n self.sidebar.delete_note_btn.configure(state='disabled')\n\n def save_note(self):\n title = self.main_window.title.get()\n body = self.main_window.body.get('1.0', ctk.END)\n\n if self.current_note_id is None:\n be.create_note(title, body)\n else:\n be.update_note(self.current_note_id, title, body)\n\n note_id = be.get_last_note_id()\n self.current_note_id = note_id\n\n self.load_notes()\n self.sidebar.delete_note_btn.configure(state='normal')\n\n def delete_note(self):\n if self.current_note_id is not None:\n be.delete_note(self.current_note_id)\n self.load_notes()\n self.new_note()\n\n def load_note_content(self, note_id):\n note = be.get_note(note_id)\n if note:\n note_title = note[1]\n note_body = note[2]\n self.current_note_id = note_id\n self.main_window.title.delete(0, ctk.END)\n self.main_window.body.delete('1.0', ctk.END)\n self.main_window.title.insert(0, note_title)\n self.main_window.body.insert('1.0', note_body)\n self.sidebar.delete_note_btn.configure(state='normal')\n\n def load_notes(self):\n for child in self.sidebar.notes_list.winfo_children():\n child.destroy()\n\n notes = be.get_all_notes()\n\n for i, note in enumerate(notes):\n note_id = note[0]\n note_title = note[1]\n button = ctk.CTkButton(\n self.sidebar.notes_list,\n text=note_title,\n width=250,\n fg_color=\"transparent\",\n font=self.button_font,\n command=lambda id=note_id: self.load_note_content(id)\n )\n button.grid(column=0, row=i, padx=10, pady=5)","repo_name":"dimays/ctk_demo","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":5914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4537804256","text":"from PySide2.QtCore import (\n QPoint\n)\nfrom PySide2.QtWidgets import (\n QPushButton,\n QWidget\n)\n\nclass SingleOrMassPredictionPage(QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.button1 = QPushButton(\"Single Prediction\")\n self.button1.setParent(self)\n self.button1.setFixedSize(200, 100)\n self.button1.move(QPoint(self.parent().size().width() / 3 - self.button1.size().width() / 2, 200))\n self.button1.clicked.connect(self.single_prediction_page)\n self.button1.setStyleSheet(\"font-size: 20px;\")\n\n self.button2 = QPushButton(\"Mass Prediction\")\n self.button2.setParent(self)\n self.button2.setFixedSize(200, 100)\n self.button2.move(QPoint(2 * self.parent().size().width() / 3 - self.button2.size().width() / 2, 200))\n self.button2.clicked.connect(self.mass_prediction_page)\n self.button2.setStyleSheet(\"font-size: 20px;\")\n\n self.button3 = QPushButton(\"Back\")\n self.button3.setParent(self)\n self.button3.setFixedSize(200, 100)\n self.button3.move(QPoint(2 * self.parent().size().width() / 4 - self.button3.size().width() / 2, 400))\n self.button3.clicked.connect(self.back_page)\n self.button3.setStyleSheet(\"font-size: 20px;\")\n\n def single_prediction_page(self):\n return self.parent().show_page(self.parent().single_prediction_page)\n\n def mass_prediction_page(self):\n return self.parent().show_page(self.parent().mass_prediction_page)\n\n def back_page(self):\n self.parent().show_page(self.parent().load_model_page)","repo_name":"onurmx/emotion-recognition","sub_path":"app/pages/singleormasspredictionpage.py","file_name":"singleormasspredictionpage.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19243270125","text":"import cv2\nimport insightface\nimport onnxruntime\nfrom core.config import get_face\nfrom core.utils import rreplace\n\nface_swapper = insightface.model_zoo.get_model('inswapper_128.onnx', providers=onnxruntime.get_available_providers())\n\n\ndef process_video(source_img, frame_paths):\n source_face = get_face(cv2.imread(source_img))\n for frame_path in frame_paths:\n frame = cv2.imread(frame_path)\n try:\n face = get_face(frame)\n result = face_swapper.get(frame, face, source_face, paste_back=True)\n cv2.imwrite(frame_path, result)\n except Exception as e:\n pass\n\n\ndef process_img(source_img, target_path):\n frame = cv2.imread(target_path)\n face = get_face(frame)\n source_face = get_face(cv2.imread(source_img))\n result = face_swapper.get(frame, face, source_face, paste_back=True)\n target_path = rreplace(target_path, \"/\", \"/swapped-\", 1) if \"/\" in target_path else \"swapped-\"+target_path\n print(target_path)\n cv2.imwrite(target_path, result)\n","repo_name":"rayarka/roop","sub_path":"core/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"33343023563","text":"from collections import defaultdict\n\n\ndef solution(gems):\n gdict = defaultdict(int)\n gnum = len(set(gems))\n\n left = 0\n right = 0\n answer = [0, len(gems)]\n \n while right < len(gems):\n # 변수[보석명] = 개수\n gdict[gems[right]] += 1\n right += 1\n # 모든 보석을 구매한 경우\n if len(gdict) == gnum:\n # 최소 경우로 축소\n while left < right:\n if gdict[gems[left]] <= 1:\n break\n gdict[gems[left]] -= 1\n left += 1\n\n # len(gems) + 1 - 0 > right - left\n # 최소값 변경\n if answer[1]+1-answer[0] > right - left:\n answer = [left+1, right]\n\n return answer","repo_name":"6loss0m/MHC_study","sub_path":"6loss0m/Programmers/Kakao2020Intern/jewelShoping.py","file_name":"jewelShoping.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"7879911192","text":"'''\nUtil functions to preprocess sequences\n'''\nimport torch\nimport numpy as np\n\n\ndef seq_to_embedding(seq, to_ix):\n '''\n This is a good entry point for passing in different kinds of embeddings and\n :param seq: sequence of words\n :param to_ix: embedding lib\n :return:\n '''\n idxs = [to_ix[w] for w in seq]\n return torch.tensor(idxs, dtype=torch.long)\n\n\ndef seqs_to_dictionary(training_data: list):\n '''\n Parameters\n ----------\n training_data: training data as a list of tuples.\n\n Returns\n -------\n word_to_ix: a dictionary mapping words to indices\n tag_to_ix: a dictionary mapping tags to indices\n '''\n # Prepare for padding; need to change count to 1 as well.\n word_to_ix = {'':0}\n tag_to_ix = {'':0}\n count1 = count2 = 1\n\n for sent, tags in training_data:\n for word in sent:\n if word not in word_to_ix:\n word_to_ix[word] = count1\n count1 += 1\n for tag in tags:\n if tag not in tag_to_ix:\n tag_to_ix[tag] = count2\n count2 += 1\n return word_to_ix, tag_to_ix\n\n# Note: we are stealing this from tf.keras.preprocessing.sequence, to avoid having an additional dependency for tensorflow.\n# It's perfectly fine if you want to install tf and call the function directly from there.\n\n\ndef pad_sequences(sequences,\n maxlen=None,\n dtype='int32',\n padding='pre',\n truncating='pre',\n value=0.):\n \"\"\"Pads sequences to the same length.\n This function transforms a list of\n `num_samples` sequences (lists of integers)\n into a 2D Numpy array of shape `(num_samples, num_timesteps)`.\n `num_timesteps` is either the `maxlen` argument if provided,\n or the length of the longest sequence otherwise.\n Sequences that are shorter than `num_timesteps`\n are padded with `value` at the end.\n Sequences longer than `num_timesteps` are truncated\n so that they fit the desired length.\n The position where padding or truncation happens is determined by\n the arguments `padding` and `truncating`, respectively.\n Pre-padding is the default.\n Arguments:\n sequences: List of lists, where each element is a sequence.\n maxlen: Int, maximum length of all sequences.\n dtype: Type of the output sequences.\n padding: String, 'pre' or 'post':\n pad either before or after each sequence.\n truncating: String, 'pre' or 'post':\n remove values from sequences larger than\n `maxlen`, either at the beginning or at the end of the sequences.\n value: Float, padding value.\n Returns:\n x: Numpy array with shape `(len(sequences), maxlen)`\n Raises:\n ValueError: In case of invalid values for `truncating` or `padding`,\n or in case of invalid shape for a `sequences` entry.\n \"\"\"\n if not hasattr(sequences, '__len__'):\n raise ValueError('`sequences` must be iterable.')\n lengths = []\n for x in sequences:\n if not hasattr(x, '__len__'):\n raise ValueError('`sequences` must be a list of iterables. '\n 'Found non-iterable: ' + str(x))\n lengths.append(len(x))\n\n num_samples = len(sequences)\n if maxlen is None:\n maxlen = np.max(lengths)\n\n # take the sample shape from the first non empty sequence\n # checking for consistency in the main loop below.\n sample_shape = tuple()\n for s in sequences:\n if len(s) > 0: # pylint: disable=g-explicit-length-test\n sample_shape = np.asarray(s).shape[1:]\n break\n\n x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)\n for idx, s in enumerate(sequences):\n if not len(s): # pylint: disable=g-explicit-length-test\n continue # empty list/array was found\n if truncating == 'pre':\n trunc = s[-maxlen:] # pylint: disable=invalid-unary-operand-type\n elif truncating == 'post':\n trunc = s[:maxlen]\n else:\n raise ValueError(\n 'Truncating type \"%s\" not understood' % truncating)\n\n # check `trunc` has expected shape\n trunc = np.asarray(trunc, dtype=dtype)\n if trunc.shape[1:] != sample_shape:\n raise ValueError('Shape of sample %s of sequence at position %s '\n 'is different from expected shape %s' %\n (trunc.shape[1:], idx, sample_shape))\n\n if padding == 'post':\n x[idx, :len(trunc)] = trunc\n elif padding == 'pre':\n x[idx, -len(trunc):] = trunc\n else:\n raise ValueError('Padding type \"%s\" not understood' % padding)\n return x\n","repo_name":"rantsandruse/pytorch_lstm_02minibatch","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"12"} +{"seq_id":"9559536435","text":"\"\"\" Test the transition system \"\"\"\n\nimport logging\nimport unittest\nimport os\nimport sys\nfrom io import StringIO\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\nfrom pysmt.typing import BOOL\nfrom pysmt.shortcuts import (\n get_env,\n Not, And, Or, Implies, Iff,\n Symbol,\n TRUE, FALSE,\n GE, LE, LT, GT, Equals,\n Plus,\n Real,\n is_valid\n)\n\nfrom pysmt.typing import REAL, BOOL\nfrom pysmt.exceptions import SolverAPINotFound\n\nfrom sabbath.test import TestCase\nfrom sabbath.formula_utils import PredicateExtractor\n\nclass TestFormula(TestCase):\n def test_predicates(self):\n env = get_env()\n x,y,z = [Symbol(c, REAL) for c in [\"x\",\"y\",\"z\"]]\n p1 = Equals(Plus(x,y), Real(0))\n\n extr = PredicateExtractor(env)\n\n expected = set([p1])\n extr.add_predicates_from(p1)\n self.assertTrue(expected == extr.get_predicates())\n\n p2 = Symbol(\"a\", BOOL)\n expected.add(p2)\n extr.add_predicates_from(p2)\n self.assertTrue(expected == extr.get_predicates())\n\n p3 = x + 5 >= 0\n p4 = x <= 0\n p5 = z - x <= z * z\n expected.add(p3)\n expected.add(p4)\n expected.add(p5)\n extr.add_predicates_from(And(p3, Or(p4, p5)))\n self.assertTrue(expected == extr.get_predicates())\n\n extr.add_predicates_from(And(p3, Or(p4, p5)))\n self.assertTrue(expected == extr.get_predicates())\n\n\n","repo_name":"cosynus-lix/sabbath","sub_path":"sabbath/test/test_formula.py","file_name":"test_formula.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"12818982705","text":"import random\n\n\ndef isHit(n, score=10):\n recode = 0\n hit = 0\n for i in range(30):\n r1 = random.randint(1, n)\n r2 = random.randint(1, n)\n if r1 == r2:\n recode += score\n hit += 1\n print('hit:{}'.format(hit))\n return recode\n\n\nprint('------------welcome to the game------------')\ngamername = input('Please enter your niko\\n')\nwhile True:\n Choice = input('Please make a selection: 1.lol 2.pubg\\n')\n if Choice == '1':\n print('{},welcome to LOL'.format(gamername))\n # make your hero\n heros = ['aicy', 'yasuo', 'jincx', 'kuki', 'seen', 'noxsic']\n for h in heros:\n print(h)\n hero = input('Please make your hero')\n # make game's level\n levels = ['EASY', 'NORMAL', 'HARD']\n for l in levels:\n print(l)\n level = input(\"make game's level\")\n\n # play game\n if level == 'EASY':\n score = isHit(10, 5)\n print(score)\n elif level == 'NORMAL':\n score = isHit(20)\n print(score)\n elif level == 'HARD':\n score = isHit(30, 15)\n print(score)\n else:\n print('this level do not find ')\n print('complete this game')\n break\n\n elif Choice == '2':\n print('{},welcome to PUBG'.format(gamername))\n print(\"{},let's fight\".format(gamername))\n coins = 0 # coins count\n level = 1 # level\n count = 0 # kills\n while True:\n r = random.randint(1, 20)\n if r % 5 == 0:\n coins += r * level\n count += 1\n\n if 400 >= coins >= 0:\n level = 1\n elif 1600 >= coins > 400:\n level = 2\n elif 3200 >= coins > 1600:\n level = 3\n else:\n print('complete and you kill {}'.format(count))\n break\n break\n else:\n print('Input error ! Please reenter your input:\\n')\n","repo_name":"Amiter-zh/repy","sub_path":"demo7/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73409871700","text":"from Qt import QtGui\nfrom Qt import QtCore\n\n\ndef create_round_image(image_path, wide=50):\n \"\"\"\n Create a circle thumbnail\n\n :param image_path: QImage representing a thumbnail\n :returns: Round QPixmap\n \"\"\"\n image = QtGui.QImage(image_path)\n\n # get the 512 base image\n base_image = QtGui.QPixmap(wide, wide)\n base_image.fill(QtCore.Qt.transparent)\n\n # now attempt to load the image\n # pixmap will be a null pixmap if load fails\n thumb = QtGui.QPixmap.fromImage(image)\n\n if not thumb.isNull():\n # scale it down to fit inside a frame of maximum 512x512\n thumb_scaled = thumb.scaled(wide,\n wide,\n QtCore.Qt.KeepAspectRatioByExpanding,\n QtCore.Qt.SmoothTransformation)\n\n # now composite the thumbnail on top of the base image\n # bottom align it to make it look nice\n thumb_img = thumb_scaled.toImage()\n brush = QtGui.QBrush(thumb_img)\n painter = QtGui.QPainter(base_image)\n painter.setPen(QtCore.Qt.NoPen)\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n painter.setBrush(brush)\n painter.drawEllipse(0, 0, wide, wide)\n painter.end()\n\n return base_image","repo_name":"TianD/merge_excel","sub_path":"libs/std_qt/create_round_image.py","file_name":"create_round_image.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"72780102741","text":"from django.contrib.auth.models import Group\nfrom django import template\n\n\nregister = template.Library()\n\n@register.filter(name='has_group')\ndef has_group(user, group_name): \n group = Group.objects.get(name=group_name) \n return True if group in user.groups.all() else False","repo_name":"arpit456jain/CodingEasy","sub_path":"blog/templatetags/has_group.py","file_name":"has_group.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"12"} +{"seq_id":"26443988554","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2021/5/24 16:53\n---------\n@summary: \n---------\n@author: mkdir700\n@email: mkdir700@gmail.com\n\"\"\"\nimport time\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\n# driver = webdriver.Remote(command_executor='http://192.168.20.222:4444/wd/hub',desired_capabilities={'browserName': 'chrome'})\n# driver.get('https://qikan.cqvip.com/Qikan/Search/Advance?from=index/')\n# print(driver.get_cookies())\n# time.sleep(10)\n# print(driver.page_source)\n# driver.close()\n\n\nchrome_options = webdriver.ChromeOptions()\n# chrome_options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n# chrome_options.add_experimental_option('useAutomationExtension', False)\n# chrome_options.add_argument(\"--headless\")\n# chrome_options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36')\n# chrome_options.add_experimental_option(\"prefs\", {\"profile.managed_default_content_settings.images\": 2})\n# chrome_options.add_extension(\"./inject.crx\")\nchrome_options.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\ndriver = webdriver.Chrome(options=chrome_options)\n# script = 'Object.defineProperty(navigator, \"webdriver\", {get: () => false,});'\n# 运行 JavaScript 代码\n# driver.execute_script(script)\n\n# with open('./stealth.min.js', 'r', encoding='utf-8') as f:\n# js = f.read()\n#\n# driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n# \"source\": js\n# })\n\ncqvipurl = 'http://qikan.cqvip.com/Qikan/Search/Advance?from=index'\n# cqvipurl = 'https://bot.sannysoft.com/'\ndriver.get(cqvipurl)\n\n# firefox_options = webdriver.FirefoxOptions()\n# profile = webdriver.FirefoxProfile(\"C:\\\\Users\\\\mkdir700\\AppData\\Roaming\\Mozilla\\Firefox\\Profiles\\8s3z5uun.default-release\")\n# driver = webdriver.Firefox(profile)\n\n# 显示等待输入框是否加载完成\n# WebDriverWait(driver, 1000).until(\n# EC.presence_of_all_elements_located(\n# (By.CLASS_NAME, 'advance-submit')\n# )\n# )\n# time.sleep(2)\n# # 设置第一查询条件,第一查询条件为单位名称\n# node = driver.find_element_by_name(\"advSearchKeywords\")\n# node.click() # 点击第一个检索条件下拉框# 选择检索条件为机构暨单位\n# node.send_keys(\"清华大学\")\n# time.sleep(1)\n# driver.find_element_by_xpath(\n# \"//div[@id='basic_searchdomainfilter']/div[@class='advance-submit']/button\").click() # 点击检索按钮\n# #\n# print(driver.page_source)\ncookies = driver.get_cookies()\nc = \"\"\nfor cookie in cookies:\n c += \"{}={}; \".format(cookie['name'], cookie['value'])\nprint(\"=\"*60)\nprint(c)\nprint(driver.execute_script('return t_cookie;'))\n","repo_name":"mkdir700/rs_weipu_reverse","sub_path":"brower.py","file_name":"brower.py","file_ext":"py","file_size_in_byte":2819,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"41575176682","text":"from ruamel.yaml import YAML # type: ignore\n\n\nclass YamlParser:\n \"\"\"\n Parser for the input YAML text\n \"\"\"\n\n @staticmethod\n def parse_str(string: str) -> dict:\n \"\"\"\n Parse a string in the YAML format into the corresponding dictionary\n \"\"\"\n yaml = YAML(typ='safe', pure=True)\n return yaml.load(string)\n\n @staticmethod\n def parse_file(input_file: str) -> dict:\n \"\"\"\n Parse a YAML file into the corresponding dictionary\n \"\"\"\n with open(input_file, 'r') as stream:\n yaml = YAML(typ='safe', pure=True)\n data_loaded = yaml.load(stream)\n return data_loaded\n","repo_name":"FPSG-UIUC/teaal-compiler","sub_path":"teaal/parse/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"20383685033","text":"from __future__ import annotations\n\nfrom pathlib import Path\n\nimport pymediainfo\nfrom pyd2v import D2V\n\nfrom pynfogen.tracks.BaseTrack import BaseTrack\n\n\nclass Video(BaseTrack):\n DYNAMIC_RANGE_MAP = {\n \"SMPTE ST 2086\": \"HDR10\",\n \"HDR10\": \"HDR10\",\n \"SMPTE ST 2094 App 4\": \"HDR10+\",\n \"HDR10+\": \"HDR10+\",\n \"Dolby Vision\": \"DV\"\n }\n\n def __init__(self, track: pymediainfo.Track, path: Path):\n super().__init__(track, path)\n # quick shorthands\n self.profile = self._x.format_profile\n self.dar = self._x.other_display_aspect_ratio[0]\n if self._x.framerate_num:\n self.fps = f\"{self._x.framerate_num}/{self._x.framerate_den}\"\n else:\n self.fps = self._x.frame_rate\n\n @property\n def codec(self) -> str:\n \"\"\"\n Get video codec in common P2P simplified form.\n E.g., 'MPEG-2' instead of 'MPEG Video, Version 2'.\n \"\"\"\n return {\n \"MPEG Video\": f\"MPEG-{(self._x.format_version or '').replace('Version ', '')}\"\n }.get(self._x.format, self._x.format)\n\n @property\n def range(self) -> str:\n \"\"\"\n Get video range as typical shortname.\n Returns multiple ranges in space-separated format if a fallback range is\n available. E.g., 'DV HDR10'.\n \"\"\"\n if self._x.hdr_format:\n return \" \".join([\n self.DYNAMIC_RANGE_MAP.get(x)\n for x in self._x.hdr_format.split(\" / \")\n ])\n elif \"HLG\" in ((self._x.transfer_characteristics or \"\"), (self._x.transfer_characteristics_original or \"\")):\n return \"HLG\"\n return \"SDR\"\n\n @property\n def scan(self) -> str:\n \"\"\"\n Get video scan type in string form.\n Will accurately check using DGIndex if codec is MPEG-1/2.\n\n Examples:\n 'Interlaced'\n 'Progressive'\n When there's information on scan type percentages:\n 'Interlaced (CST)'\n 'Progressive (CST)'\n '99.78% Progressive (VST)'\n '0.01% Interlaced (VST)'\n \"\"\"\n scan_type = self._x.scan_type\n if not scan_type:\n # some videos may not state scan, presume progressive\n scan_type = \"Progressive\"\n\n if self.codec in [\"MPEG-1\", \"MPEG-2\"]:\n d2v = D2V.load(self._path)\n for ext in (\"log\", \"d2v\", \"mpg\", \"mpeg\"):\n d2v.path.with_suffix(f\".{ext}\").unlink(missing_ok=True)\n\n flags = [\n dict(**flag, vob=d[\"vob\"], cell=d[\"cell\"])\n for d in d2v.data\n for flag in d[\"flags\"]\n ]\n progressive_frames = sum(f[\"progressive_frame\"] for f in flags)\n progressive_percent = (progressive_frames / len(flags)) * 100\n is_constant = progressive_percent in (0.0, 100.0)\n\n scan_type = [\"Interlaced\", \"Progressive\"][progressive_percent >= 50.0]\n scan_type += f\" ({['VST', 'CST'][is_constant]})\"\n if not is_constant:\n scan_type = f\"{progressive_percent:.2f}% {scan_type}\"\n\n return scan_type\n","repo_name":"rlaphoenix/pynfogen","sub_path":"pynfogen/tracks/Video.py","file_name":"Video.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"12"} +{"seq_id":"23108178728","text":"import docker\nimport logging\nfrom Config import _\nfrom typing import Dict, Any\n\n\nclass DockerHandler:\n \"\"\"\n The DockerHandler creates an abstraction layer for the necessary tasks related to Docker.\n \"\"\"\n\n _instance = None\n\n @staticmethod\n def getInstance(storageConnector=None):\n if DockerHandler._instance == None:\n DockerHandler()\n return DockerHandler._instance\n else:\n return DockerHandler._instance\n\n # Allows this class to be pickled by APScheduler - Thanks: https://github.com/agronholm/apscheduler/issues/421\n def __getstate__(self):\n state = self.__dict__.copy()\n del state[\"_client\"] # remove the unpicklable DockerClient\n return state\n\n # will be called on unpickling\n def __setstate__(self, state):\n self.__dict__.update(state)\n try:\n self._client: docker.DockerClient = docker.DockerClient(\n base_url=\"unix://var/run/docker.sock\"\n )\n except Exception as e:\n self._client: None = None\n logging.error(_(\"DOCKERHANDLER_EXCEPTION_SOCKET_CONNECT\" % str(e)))\n\n def __init__(self):\n if DockerHandler._instance != None:\n raise Exception(\"Singleton!\")\n else:\n try:\n self._client: docker.DockerClient = docker.DockerClient(\n base_url=\"unix://var/run/docker.sock\"\n )\n except Exception as e:\n self._client: None = None\n logging.error(_(\"DOCKERHANDLER_EXCEPTION_SOCKET_CONNECT\" % str(e)))\n\n DockerHandler._instance = self\n\n def check_connection(self) -> bool:\n \"\"\"Test the connection to the Docker Socket\n\n Returns:\n bool: True, if connection is ok\n \"\"\"\n if not self._client:\n logging.error(_(\"DOCKERHANDLER_EXCEPTION_DOCKERCLIENT_NONE\"))\n return False\n\n try:\n self._client.info()\n except docker.errors.APIError:\n logging.error(_(\"DOCKERHANDLER_EXCEPTION_API_ERROR\"))\n return False\n\n logging.info(_(\"DOCKERHANDLER_SOCKET_CONNECTION_ACCEPTED\"))\n return True\n\n def get_docker_handle(self) -> docker.DockerClient:\n \"\"\"Returns the Docker Client handle\n\n Returns:\n docker.DockerClient: Current Docker Client Handle\n \"\"\"\n return self._client\n\n def get_container_obj_by_name(self, container_name: str) -> docker.models.containers.Container:\n \"\"\"Returns the corresponding Docker Containter Object by Name\n\n Args:\n container_name (str): Name of the Container or ID\n\n Returns:\n docker.models.containers.Container: Docker Container Object\n \"\"\"\n try:\n return self._client.containers.get(container_name)\n except docker.errors.NotFound:\n logging.error(_(\"DOCKERHANDLER_EXCEPTION_CONTAINER_NOT_FOUND\"))\n return None\n\n def check_container_still_active(self, container_name: str) -> bool:\n \"\"\"Checks if a Container is still alive (= Status 'runnung')\n\n Args:\n container_name (str): Container Name or ID\n\n Returns:\n bool: True, if Container is still running\n \"\"\"\n try:\n container: docker.models.containers.Container = self._client.containers.get(\n container_name\n )\n except docker.errors.NotFound:\n return False\n\n if container.status == \"running\":\n return True\n\n return False\n\n def get_containers_for_monitoring(self) -> Dict[str, Dict[str, Any]]:\n \"\"\"Discovers all Containers with the Label io.smclab.dockmon.enabled = true.\n It will return a Dict with the Containername (or ID, if no name is provided) as key, and a Dict with all\n relevant Labels for Dockmon.\n\n Returns:\n Dict[str, Dict[str, Any]]: Dict containing all Containers with corresponding labels.\n \"\"\"\n\n containers: Dict[str, Dict[str, Any]] = {}\n for container in self._client.containers.list(\n filters={\"label\": [\"io.smclab.dockmon.enabled=True\"], \"status\": \"running\"}\n ):\n container_name = container.name or container.id\n containers[container_name] = container.labels\n\n return containers\n","repo_name":"sciencemediacenter/dockmon","sub_path":"src/handler/DockerHandler.py","file_name":"DockerHandler.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"74587015382","text":"# 各種ライブラリインポート\nimport bpy\n\n# 指定オブジェクトのアンビエントオクルージョンを画像テクスチャにベイクする\ndef bake_ambientocclusion_texture(\n arg_object:bpy.types.Object,\n arg_texturename:str=\"BakeTexture\",\n arg_texturesize:int=2048,\n arg_bakemargin:int=0,\n arg_aofactor:float=1.0,\n arg_distance:float=10.0) -> bpy.types.Image:\n \"\"\"指定オブジェクトのアンビエントオクルージョンを画像テクスチャにベイクする\n\n Args:\n arg_object (bpy.types.Object): 指定オブジェクト\n arg_texturename (str, optional): 作成テクスチャ名. Defaults to \"BakeTexture\".\n arg_texturesize (int, optional): 作成テクスチャサイズ(px). Defaults to 2048.\n arg_bakemargin (int, optional): ベイク余白(px). Defaults to 0.\n arg_aofactor (float, optional): AO係数. Defaults to 1.0.\n arg_distance (float, optional): AO距離. Defaults to 10.0.\n\n Returns:\n bpy.types.Image: 作成テクスチャの参照\n \"\"\"\n\n # 参照の保存用変数\n name_mapping = {}\n\n # 追加する画像ノード名を定義する\n texturenode_name = \"ForBakeTextureNode\"\n\n # 新規テクスチャを作成して参照を取得する\n bake_image = make_new_image(\n arg_texturename=arg_texturename,\n arg_texturesize=arg_texturesize\n )\n\n # 指定オブジェクトのマテリアルリストを取得する\n for material_slot in arg_object.material_slots:\n # スロットのマテリアルを取得する\n target_material = material_slot.material\n\n # マテリアルが割り当てられているか\n if target_material == None:\n continue\n\n # 新規テクスチャを参照する画像ノードを追加する\n add_node = add_node_image(\n arg_material=target_material,\n arg_image=bake_image\n )\n\n # 作成ノードの参照を保存する\n name_mapping[texturenode_name + target_material.name] = add_node\n\n # 指定の画像ノードを選択状態に設定する\n select_node_target(\n arg_material=target_material,\n arg_node=name_mapping[texturenode_name + target_material.name]\n )\n\n # 指定オブジェクトの「アンビエントオクルージョン」をベイクする\n bake_ambientocclusion(\n arg_object=arg_object,\n arg_bakemargin=arg_bakemargin,\n arg_GPUuse=True,\n arg_aofactor=arg_aofactor,\n arg_distance=arg_distance\n )\n\n # 指定オブジェクトのマテリアルリストを取得する\n for material_slot in arg_object.material_slots:\n # スロットのマテリアルを取得する\n target_material = material_slot.material\n\n # マテリアルが割り当てられているか\n if target_material == None:\n continue\n\n # 追加した画像ノードを削除する\n delete_node_target(\n arg_material=target_material,\n arg_node=name_mapping[texturenode_name + target_material.name]\n )\n \n return bake_image\n\n\n# 新規画像を作成する\ndef make_new_image(arg_texturename:str=\"BakeTexture\",\n arg_texturesize:int=2048) -> bpy.types.Image:\n \"\"\"新規画像を作成する\n\n Args:\n arg_texturename (str, optional): 作成テクスチャ名. Defaults to \"BakeTexture\".\n arg_texturesize (int, optional): 作成テクスチャサイズ. Defaults to 2048.\n\n Returns:\n bpy.types.Image: 作成画像の参照\n \"\"\"\n\n # 新規画像を作成する\n newimage = bpy.data.images.new(\n name=arg_texturename,\n width=arg_texturesize,\n height=arg_texturesize,\n alpha=True\n )\n\n return newimage\n\n\n# 対象マテリアルに指定テクスチャを参照する画像ノードを追加する\ndef add_node_image(arg_material:bpy.types.Material,\n arg_image:bpy.types.Image) -> bpy.types.Node:\n \"\"\"対象マテリアルに指定テクスチャを参照する画像ノードを追加する\n\n Args:\n arg_material (bpy.types.Material): 対象マテリアル\n arg_image (bpy.types.Image): 指定テクスチャ\n\n Returns:\n bpy.types.Node: 作成ノードの参照\n \"\"\"\n\n # ノード操作のマニュアル\n # (https://docs.blender.org/api/current/bpy.types.Node.html)\n\n # ターゲットマテリアルのノード参照を取得\n mat_nodes = arg_material.node_tree.nodes\n\n # テクスチャノードの追加\n texture_node = mat_nodes.new(type=\"ShaderNodeTexImage\")\n\n # テクスチャノードに指定画像を設定する\n texture_node.image = arg_image\n\n return texture_node\n\n\n# 対象マテリアルの指定ノードのみを選択状態する\ndef select_node_target(arg_material:bpy.types.Material, arg_node:bpy.types.Node):\n \"\"\"対象マテリアルの指定ノードのみを選択状態する\n\n Args:\n arg_material (bpy.types.Material): 対象マテリアル\n arg_node (bpy.types.Node): 指定ノード\n \"\"\"\n\n # ノード操作のマニュアル\n # (https://docs.blender.org/api/current/bpy.types.Node.html)\n # ノードリスト操作のマニュアル\n # (https://docs.blender.org/api/current/bpy.types.Nodes.html)\n\n # ターゲットマテリアルのノード参照を取得\n mat_nodes = arg_material.node_tree.nodes\n\n # 全てのノードの選択状態を解除する\n for mat_node in mat_nodes:\n # 選択状態を解除する\n mat_node.select = False\n\n # 指定ノードを選択状態にする\n arg_node.select = True\n\n # 指定ノードをアクティブにする\n mat_nodes.active = arg_node\n\n return\n\n# 指定オブジェクトのアンビエントオクルージョンをベイクする\ndef bake_ambientocclusion(\n arg_object:bpy.types.Object, arg_bakemargin:int=0, \n arg_onesample:bool=False, arg_GPUuse:bool=False,\n arg_aofactor:float=1.0, arg_distance:float=10.0):\n \"\"\"指定オブジェクトのアンビエントオクルージョンをベイクする\n\n Args:\n arg_object (bpy.types.Object): 指定オブジェクト\n arg_bakemargin (int, optional): ベイク余白. Defaults to 0.\n arg_onesample (bool, optional): 簡易サンプリング指定. Defaults to False.\n arg_GPUuse (bool, optional): GPU利用指定. Defaults to False.\n arg_aofactor (float, optional): AO係数. Defaults to 1.0.\n arg_distance (float, optional): AO距離. Defaults to 10.0.\n \"\"\"\n\n # 全てのオブジェクトを非選択状態にする\n for obj in bpy.context.scene.objects:\n # 選択状態を解除する\n obj.select_set(False)\n\n # 指定オブジェクトを選択状態にする\n arg_object.select_set(True)\n\n # 指定オブジェクトをアクティブにする\n bpy.context.view_layer.objects.active = arg_object\n\n # レンダリングエンジンを CYCLES に切り替える\n bpy.context.scene.render.engine = 'CYCLES'\n\n # GPUの利用有無を確認する\n if arg_GPUuse == True:\n # 利用設定ならGPUの設定を行う\n bpy.context.scene.cycles.device = 'GPU'\n # CUDAを選択する\n bpy.context.preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'\n # デバイスの一覧を取得する\n for devices in bpy.context.preferences.addons['cycles'].preferences.get_devices():\n for device in devices:\n # デバイスタイプがCUDAならば利用対象とする\n if device.type == 'CUDA':\n device.use = True\n\n # render.bake の設定項目を予め設定する\n bake_setting = bpy.context.scene.render.bake\n \n # [選択->アクティブ]のベイクを無効化する\n bake_setting.use_selected_to_active = False\n\n # 現在のワールド参照を取得する\n context_world = bpy.context.scene.world\n\n # アンビエントオクルージョンを有効化する\n context_world.light_settings.use_ambient_occlusion = True\n\n # 係数の設定を行う\n context_world.light_settings.ao_factor = arg_aofactor\n\n # 距離の設定を行う\n context_world.light_settings.distance = arg_distance\n\n # 現在のサンプリング数を記録する\n current_samples = bpy.context.scene.cycles.samples\n current_preview_samples = bpy.context.scene.cycles.preview_samples\n\n # 簡易サンプリングが有効かチェックする\n if arg_onesample == True:\n # サンプリング数を減らす\n bpy.context.scene.cycles.samples = 1\n bpy.context.scene.cycles.preview_samples = 1\n\n # ディフューズタイプのベイクを実行する\n # ベイクの種類\n # ('COMBINED', 'AO', 'SHADOW', 'NORMAL', 'UV', 'ROUGHNESS',\n # 'EMIT', 'ENVIRONMENT', 'DIFFUSE', 'GLOSSY', 'TRANSMISSION')\n # (render.bake 以外の設定は引数で指定する必要あり)\n bpy.ops.object.bake(type='AO', margin=arg_bakemargin)\n\n # サンプリング数を元に戻す\n bpy.context.scene.cycles.samples = current_samples\n bpy.context.scene.cycles.preview_samples = current_preview_samples\n\n return\n\n# 対象マテリアルの指定ノードを削除する\ndef delete_node_target(arg_material:bpy.types.Material, arg_node:bpy.types.Node):\n \"\"\"対象マテリアルの指定ノードを削除する\n\n Args:\n arg_material (bpy.types.Material): 対象マテリアル\n arg_node (bpy.types.Node): 指定ノード\n \"\"\"\n\n # ノード操作のマニュアル\n # (https://docs.blender.org/api/current/bpy.types.Node.html)\n\n # ターゲットマテリアルのノード参照を取得\n mat_nodes = arg_material.node_tree.nodes\n\n # ノードを削除する\n mat_nodes.remove(arg_node)\n\n return\n","repo_name":"HoloAdventure/MRTKChannelMapMaker_BlenderAddon","sub_path":"bake_ambientocclusion_texture.py","file_name":"bake_ambientocclusion_texture.py","file_ext":"py","file_size_in_byte":9877,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"38968370397","text":"import tensorflow as tf\nimport numpy as np\n\nimport sys\nif sys.version[0] == '2':\n import cPickle as pkl\nelse:\n import pickle as pkl\n\ndef init_var_map(init_vars, params, init_path=None):\n if init_path is not None:\n load_var_map = pkl.load(open(init_path, 'rb'))\n print('load variable map from', init_path, load_var_map.keys())\n var_map = {}\n for var_name, var_shape, init_method, dtype in init_vars:\n if init_method == 'zero':\n var_map[var_name] = tf.Variable(tf.zeros(var_shape, dtype=dtype), name=var_name, dtype=dtype)\n elif init_method == 'one':\n var_map[var_name] = tf.Variable(tf.ones(var_shape, dtype=dtype), name=var_name, dtype=dtype)\n elif init_method == 'normal':\n var_map[var_name] = tf.Variable(tf.random_normal(var_shape, mean=0.0, stddev=params.STDDEV, dtype=dtype),\n name=var_name, dtype=dtype)\n elif init_method == 'tnormal':\n var_map[var_name] = tf.Variable(tf.truncated_normal(var_shape, mean=0.0, stddev=params.STDDEV, dtype=dtype),\n name=var_name, dtype=dtype)\n elif init_method == 'uniform':\n var_map[var_name] = tf.Variable(tf.random_uniform(var_shape, minval=params.MINVAL, maxval=params.MAXVAL, dtype=dtype),\n name=var_name, dtype=dtype)\n elif init_method == 'xavier':\n maxval = np.sqrt(6. / np.sum(var_shape))\n minval = -maxval\n var_map[var_name] = tf.Variable(tf.random_uniform(var_shape, minval=minval, maxval=maxval, dtype=dtype),\n name=var_name, dtype=dtype)\n elif isinstance(init_method, int) or isinstance(init_method, float):\n var_map[var_name] = tf.Variable(tf.ones(var_shape, dtype=dtype) * init_method, name=var_name, dtype=dtype)\n elif init_method in load_var_map:\n if load_var_map[init_method].shape == tuple(var_shape):\n var_map[var_name] = tf.Variable(load_var_map[init_method], name=var_name, dtype=dtype)\n else:\n print('BadParam: init method', init_method, 'shape', var_shape, load_var_map[init_method].shape)\n else:\n print('BadParam: init method', init_method)\n return var_map\n\n\ndef init_uninitialized(sess):\n sess.run(tf.variables_initializer(\n [v for v in tf.global_variables()\n if v.name.split(':')[0]\n in set(sess.run(tf.report_uninitialized_variables()))\n ])\n )\n\n\ndef xavier_init(fan_in, fan_out, constant = 1):\n low = -constant * np.sqrt(6.0 / (fan_in + fan_out))\n high = constant * np.sqrt(6.0 / (fan_in + fan_out))\n return tf.random_uniform((fan_in, fan_out), minval = low, maxval = high, dtype = tf.float32)\n\n\ndef guarantee_initialized_variables(session, list_of_variables = None):\n if list_of_variables is None:\n list_of_variables = tf.all_variables()\n uninitialized_variables = list(tf.get_variable(name) for name in\n session.run(tf.report_uninitialized_variables(list_of_variables)))\n session.run(tf.initialize_variables(uninitialized_variables))\n #return unintialized_variables\n\n\ndef new_variable_initializer(sess):\n uninitialized_vars = []\n for var in tf.global_variables():\n try:\n _ = sess.run(var)\n except tf.errors.FailedPreconditionError:\n uninitialized_vars.append(var)\n init_new_vars_op = tf.variables_initializer(uninitialized_vars)\n return init_new_vars_op\n #sess.run(init_new_vars_op)","repo_name":"sandwhite/usual-tools","sub_path":"tensorflow-lib/util/init_util.py","file_name":"init_util.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13765391779","text":"import sys\nimport tty\nimport termios\nimport asyncio\nfrom spider import Spider\nfrom ezblock import Camera\n\npower_val = 50\nkey = 'status'\n\ncam = Camera(0, rotation=180)\ncam.start()\n\ndef readchar():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef readkey(getchar_fn=None):\n getchar = getchar_fn or readchar\n c1 = getchar()\n if ord(c1) != 0x1b:\n return c1\n c2 = getchar()\n if ord(c2) != 0x5b:\n return c1\n c3 = getchar()\n return chr(0x10 + ord(c3) - 65)\n\ndef Keyborad_control():\n sp = Spider([1,2,3,4,5,6,7,8,9,10,11,12])\n \n while True:\n global power_val\n key=readkey()\n print(key)\n if key=='w':\n sp.do_action(\"forward\",speed=100)\n elif key=='a':\n sp.do_action(\"turn left\",speed=100)\n elif key=='s':\n sp.do_action(\"backward\",speed=100)\n elif key=='d':\n sp.do_action(\"turn right\",speed=100)\n elif key=='n':\n sp.do_action(\"sit\",speed=100)\n elif key=='r':\n sp.do_action(\"dance\",speed=100)\n else:\n if key=='l':\n sp.do_action(\"look right\",speed=100)\n elif key=='j':\n sp.do_action(\"look left\",speed=100)\n elif key=='i':\n sp.do_action(\"look up\",speed=100)\n elif key=='k':\n sp.do_action(\"look down\",speed=100) \n elif key=='f':\n sp.do_action(\"push up\",speed=100)\n elif key=='g':\n sp.do_action(\"wave\",speed=100)\n else:\n sp.do_action(\"stand\",speed=100)\n \n if key=='q':\n print(\"quit\") \n break \nif __name__ == '__main__':\n Keyborad_control()\n\n\n\n\n","repo_name":"ezblockcode/ezb-robot","sub_path":"examples/spider_example/keyborad_control.py","file_name":"keyborad_control.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6785309846","text":"from struct import pack\nfrom ipaddress import ip_address\nfrom ipaddress import IPv4Address\nfrom ipaddress import IPv6Address\nfrom exabgp.protocol.ip import IPv4\nfrom exabgp.protocol.ip import IPv6\nfrom exabgp.protocol.family import AFI\n\nfrom exabgp.bgp.message.update.nlri.qualifier import Labels\nfrom exabgp.bgp.message.update.nlri.qualifier import RouteDistinguisher\nfrom exabgp.bgp.message.update.attribute.sr.prefixsid import PrefixSid\nfrom exabgp.bgp.message.update.attribute.sr.labelindex import SrLabelIndex\nfrom exabgp.bgp.message.update.attribute.sr.srgb import SrGb\nfrom exabgp.bgp.message.update.attribute.sr.srv6.l3service import Srv6L3Service\nfrom exabgp.bgp.message.update.attribute.sr.srv6.l2service import Srv6L2Service\nfrom exabgp.bgp.message.update.attribute.sr.srv6.sidinformation import Srv6SidInformation\nfrom exabgp.bgp.message.update.attribute.sr.srv6.sidstructure import Srv6SidStructure\nfrom exabgp.bgp.message.update.nlri.mup import InterworkSegmentDiscoveryRoute\nfrom exabgp.bgp.message.update.nlri.mup import DirectSegmentDiscoveryRoute\nfrom exabgp.bgp.message.update.nlri.mup import Type1SessionTransformedRoute\nfrom exabgp.bgp.message.update.nlri.mup import Type2SessionTransformedRoute\n\n\ndef label(tokeniser):\n labels = []\n value = tokeniser()\n\n if value == '[':\n while True:\n value = tokeniser()\n if value == ']':\n break\n labels.append(int(value))\n else:\n labels.append(int(value))\n\n return Labels(labels)\n\n\ndef route_distinguisher(tokeniser):\n data = tokeniser()\n\n separator = data.find(':')\n if separator > 0:\n prefix = data[:separator]\n suffix = int(data[separator + 1 :])\n\n if '.' in prefix:\n data = [bytes([0, 1])]\n data.extend([bytes([int(_)]) for _ in prefix.split('.')])\n data.extend([bytes([suffix >> 8]), bytes([suffix & 0xFF])])\n rtd = b''.join(data)\n else:\n number = int(prefix)\n if number < pow(2, 16) and suffix < pow(2, 32):\n rtd = bytes([0, 0]) + pack('!H', number) + pack('!L', suffix)\n elif number < pow(2, 32) and suffix < pow(2, 16):\n rtd = bytes([0, 2]) + pack('!L', number) + pack('!H', suffix)\n else:\n raise ValueError('invalid route-distinguisher %s' % data)\n\n return RouteDistinguisher(rtd)\n\n\n# [ 300, [ ( 800000,100 ), ( 1000000,5000 ) ] ]\ndef prefix_sid(tokeniser): # noqa: C901\n sr_attrs = []\n srgbs = []\n srgb_data = []\n value = tokeniser()\n get_range = False\n consume_extra = False\n try:\n if value == '[':\n label_sid = tokeniser()\n while True:\n value = tokeniser()\n if value == '[':\n consume_extra = True\n continue\n if value == ',':\n continue\n if value == '(':\n while True:\n value = tokeniser()\n if value == ')':\n break\n if value == ',':\n get_range = True\n continue\n if get_range:\n srange = value\n get_range = False\n else:\n base = value\n if value == ')':\n srgb_data.append((base, srange))\n continue\n if value == ']':\n break\n if consume_extra:\n tokeniser()\n except Exception as e:\n raise ValueError('could not parse BGP PrefixSid attribute: {}'.format(e))\n\n if int(label_sid) < pow(2, 32):\n sr_attrs.append(SrLabelIndex(int(label_sid)))\n\n for srgb in srgb_data:\n if len(srgb) == 2 and int(srgb[0]) < pow(2, 24) and int(srgb[1]) < pow(2, 24):\n srgbs.append((int(srgb[0]), int(srgb[1])))\n else:\n raise ValueError('could not parse SRGB tupple')\n\n if srgbs:\n sr_attrs.append(SrGb(srgbs))\n\n return PrefixSid(sr_attrs)\n\n\n# ( [l2-service|l3-service] )\n# ( [l2-service|l3-service] )\n# ( [l2-service|l3-service] [, , , , , ] )\ndef prefix_sid_srv6(tokeniser):\n value = tokeniser()\n if value != \"(\":\n raise Exception(\"expect '(', but received '%s'\" % value)\n\n service_type = tokeniser()\n if service_type not in [\"l3-service\", \"l2-service\"]:\n raise Exception(\"expect 'l3-service' or 'l2-service', but received '%s'\" % value)\n\n sid = IPv6.unpack(IPv6.pton(tokeniser()))\n behavior = 0xFFFF\n subtlvs = []\n subsubtlvs = []\n value = tokeniser()\n if value != \")\":\n base = 10 if not value.startswith(\"0x\") else 16\n behavior = int(value, base)\n value = tokeniser()\n if value == \"[\":\n values = []\n for i in range(6):\n if i != 0:\n value = tokeniser()\n if value != \",\":\n raise Exception(\"expect ',', but received '%s'\" % value)\n value = tokeniser()\n base = 10 if not value.startswith(\"0x\") else 16\n values.append(int(value, base))\n\n value = tokeniser()\n if value != \"]\":\n raise Exception(\"expect ']', but received '%s'\" % value)\n\n value = tokeniser()\n\n subsubtlvs.append(\n Srv6SidStructure(\n loc_block_len=values[0],\n loc_node_len=values[1],\n func_len=values[2],\n arg_len=values[3],\n tpose_len=values[4],\n tpose_offset=values[5],\n )\n )\n\n subtlvs.append(\n Srv6SidInformation(\n sid=sid,\n behavior=behavior,\n subsubtlvs=subsubtlvs,\n )\n )\n\n if value != \")\":\n raise Exception(\"expect ')', but received '%s'\" % value)\n\n if service_type == \"l3-service\":\n return PrefixSid([Srv6L3Service(subtlvs=subtlvs)])\n elif service_type == \"l2-service\":\n return PrefixSid([Srv6L2Service(subtlvs=subtlvs)])\n\ndef parse_ip_prefix(tokeninser):\n addrstr, length = tokeninser.split(\"/\")\n if length == None:\n raise Exception(\"unexpect prefix format '%s'\" % tokeninser)\n\n addr = ip_address(addrstr)\n if isinstance(addr, IPv4Address):\n ip = IPv4.unpack(IPv4.pton(addrstr))\n elif isinstance(addr, IPv6Address):\n ip = IPv6.unpack(IPv6.pton(addrstr))\n else:\n raise Exception(\"unexpect ipaddress format '%s'\" % addrstr)\n\n return ip, length\n\n# 'mup-isd rd ',\ndef srv6_mup_isd(tokeniser, afi):\n ip, length = parse_ip_prefix(tokeniser())\n\n value = tokeniser()\n if \"rd\" == value:\n rd = route_distinguisher(tokeniser)\n else:\n raise Exception(\"expect rd, but received '%s'\" % value)\n\n return InterworkSegmentDiscoveryRoute(\n rd=rd,\n ipprefix_len=int(length),\n ipprefix=ip,\n afi=afi,\n )\n\n# 'mup-dsd rd ',\ndef srv6_mup_dsd(tokeniser, afi):\n if afi == AFI.ipv4:\n ip = IPv4.unpack(IPv4.pton(tokeniser()))\n elif afi == AFI.ipv6:\n ip = IPv6.unpack(IPv6.pton(tokeniser()))\n else:\n raise Exception(\"unexpect afi: %s\" % afi)\n value = tokeniser()\n if \"rd\" == value:\n rd = route_distinguisher(tokeniser)\n else:\n raise Exception(\"expect rd, but received '%s'\" % value)\n\n return DirectSegmentDiscoveryRoute(\n rd=rd,\n ip=ip,\n afi=afi,\n )\n\n# 'mup-t1st rd teid qfi endpoint ',\ndef srv6_mup_t1st(tokeniser, afi):\n ip, length = parse_ip_prefix(tokeniser())\n\n value = tokeniser()\n if \"rd\" == value:\n rd = route_distinguisher(tokeniser)\n else:\n raise Exception(\"expect rd, but received '%s'\" % value)\n\n value = tokeniser()\n if \"teid\" == value:\n teid = tokeniser()\n else:\n raise Exception(\"expect teid, but received '%s'\" % value)\n\n value = tokeniser()\n if \"qfi\" == value:\n qfi = tokeniser()\n else:\n raise Exception(\"expect qfi, but received '%s'\" % value)\n\n value = tokeniser()\n if \"endpoint\" == value:\n if afi == AFI.ipv4:\n endpoint_ip = IPv4.unpack(IPv4.pton(tokeniser()))\n endpoint_ip_len = 32\n elif afi == AFI.ipv6:\n endpoint_ip = IPv6.unpack(IPv6.pton(tokeniser()))\n endpoint_ip_len = 128\n else:\n raise Exception(\"unexpect afi: %s\" % afi)\n else:\n raise Exception(\"expect endpoint, but received '%s'\" % value)\n\n return Type1SessionTransformedRoute(\n rd=rd,\n ipprefix_len=int(length),\n ipprefix=ip,\n teid=int(teid),\n qfi=int(qfi),\n afi=afi,\n endpoint_ip=endpoint_ip,\n endpoint_ip_len=int(endpoint_ip_len),\n )\n\n# 'mup-t2st rd teid ',\ndef srv6_mup_t2st(tokeniser, afi):\n if afi == AFI.ipv4:\n endpoint_ip = IPv4.unpack(IPv4.pton(tokeniser()))\n endpoint_ip_len = 32\n elif afi == AFI.ipv6:\n endpoint_ip = IPv6.unpack(IPv6.pton(tokeniser()))\n endpoint_ip_len = 128\n else:\n raise Exception(\"unexpect afi: %s\" % afi)\n\n value = tokeniser()\n if \"rd\" == value:\n rd = route_distinguisher(tokeniser)\n else:\n raise Exception(\"expect rd, but received '%s'\" % value)\n\n value = tokeniser()\n if \"teid\" == value:\n value = tokeniser()\n parse_teid = value.split(\"/\")\n if len(parse_teid) != 2:\n raise Exception(\"unexpect teid format, this expect format //\")\n\n teid = int(parse_teid[0])\n teid_len = int(parse_teid[1])\n else:\n raise Exception(\"expect teid, but received '%s'\" % value)\n\n return Type2SessionTransformedRoute(\n rd=rd,\n endpoint_ip_len=int(endpoint_ip_len),\n endpoint_ip=endpoint_ip,\n teid=teid,\n teid_len=teid_len,\n afi=afi,\n )\n","repo_name":"Exa-Networks/exabgp","sub_path":"src/exabgp/configuration/static/mpls.py","file_name":"mpls.py","file_ext":"py","file_size_in_byte":10388,"program_lang":"python","lang":"en","doc_type":"code","stars":1986,"dataset":"github-code","pt":"12"} +{"seq_id":"22694650500","text":"\nimport numpy as np\nfrom flask import Flask, request, render_template\nimport pandas as pd\n\napp = Flask(__name__)\n\n@app.route(\"/\",methods=[\"GET\"])\ndef home_page():\n return render_template(\"home.html\")\n\n@app.route(\"/predict\",methods=[\"POST\",\"GET\"])\ndef predict():\n if request.method==\"POST\":\n df = pd.read_csv(\"https://raw.githubusercontent.com/teenajain1988/Google_analytics-finale-submission-file/main/lgb_models2.csv\")\n df = df.astype({\"fullVisitorId\": str})\n fullVisitorId=str(request.form[\"fullVisitorId\"])\n if fullVisitorId in df[\"fullVisitorId\"].values:\n result = df[df[\"fullVisitorId\"] == fullVisitorId][\"PredictedLogRevenue\"].values[0]\n return render_template(\"home1.html\",result=f\"The predicted log revenue of of the person having fullVisitorId {fullVisitorId} is {result}\")\n else:\n return render_template(\"home2.html\")\n else:\n return render_template('home.html')\n\nif __name__==\"__main__\":\n app.run()","repo_name":"teenajain1988/Google-Analytics-Customer-Revenue-Prediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34211051686","text":"import unittest\n\nfrom aistore.sdk.const import ProviderAIS\nfrom aistore.sdk.errors import ErrBckNotFound, InvalidBckProvider\n\nfrom aistore.sdk import Client\nimport requests\n\nfrom aistore.sdk.job import Job\nfrom tests.utils import create_and_put_object, random_string\nfrom tests.integration import CLUSTER_ENDPOINT, REMOTE_BUCKET\n\n# If remote bucket is not set, skip all cloud-related tests\nREMOTE_SET = REMOTE_BUCKET != \"\" and not REMOTE_BUCKET.startswith(ProviderAIS + \":\")\n\n\nclass TestBucketOps(unittest.TestCase): # pylint: disable=unused-variable\n def setUp(self) -> None:\n self.bck_name = random_string()\n\n self.client = Client(CLUSTER_ENDPOINT)\n self.buckets = []\n self.provider = None\n self.cloud_bck = None\n self.obj_prefix = \"test_bucket_ops-\"\n if REMOTE_SET:\n self.cloud_objects = []\n self.provider, self.cloud_bck = REMOTE_BUCKET.split(\"://\")\n self._cleanup_objects()\n\n def tearDown(self) -> None:\n # Try to destroy all temporary buckets if there are left.\n for bck_name in self.buckets:\n try:\n self.client.bucket(bck_name).delete()\n except ErrBckNotFound:\n pass\n # If we are using a remote bucket for this specific test, delete the objects instead of the full bucket\n if self.cloud_bck:\n bucket = self.client.bucket(self.cloud_bck, provider=self.provider)\n for obj_name in self.cloud_objects:\n bucket.objects(obj_range=obj_name).delete()\n\n def _cleanup_objects(self):\n cloud_bck = self.client.bucket(self.cloud_bck, self.provider)\n # Clean up any other objects created with the test prefix, potentially from aborted tests\n object_names = [\n x.name for x in cloud_bck.list_objects(self.obj_prefix).get_entries()\n ]\n if len(object_names) > 0:\n job_id = cloud_bck.objects(obj_names=object_names).delete()\n Job(self.client).wait_for_job(job_id=job_id, timeout=30)\n\n def test_bucket(self):\n res = self.client.cluster().list_buckets()\n count = len(res)\n self.create_bucket(self.bck_name)\n res = self.client.cluster().list_buckets()\n count_new = len(res)\n self.assertEqual(count + 1, count_new)\n\n def create_bucket(self, bck_name):\n self.buckets.append(bck_name)\n bucket = self.client.bucket(bck_name)\n bucket.create()\n return bucket\n\n def test_head_bucket(self):\n self.create_bucket(self.bck_name)\n self.client.bucket(self.bck_name).head()\n self.client.bucket(self.bck_name).delete()\n try:\n self.client.bucket(self.bck_name).head()\n except requests.exceptions.HTTPError as e:\n self.assertEqual(e.response.status_code, 404)\n\n def test_rename_bucket(self):\n from_bck_n = self.bck_name + \"from\"\n to_bck_n = self.bck_name + \"to\"\n self.create_bucket(from_bck_n)\n res = self.client.cluster().list_buckets()\n count = len(res)\n\n bck_obj = self.client.bucket(from_bck_n)\n self.assertEqual(bck_obj.name, from_bck_n)\n job_id = bck_obj.rename(to_bck=to_bck_n)\n self.assertNotEqual(job_id, \"\")\n\n # wait for rename to finish\n self.client.job().wait_for_job(job_id=job_id)\n\n # check if objects name has changed\n self.client.bucket(to_bck_n).head()\n\n # new bucket should be created and accessible\n self.assertEqual(bck_obj.name, to_bck_n)\n\n # old bucket should be inaccessible\n try:\n self.client.bucket(from_bck_n).head()\n except requests.exceptions.HTTPError as e:\n self.assertEqual(e.response.status_code, 404)\n\n # length of buckets before and after rename should be same\n res = self.client.cluster().list_buckets()\n count_new = len(res)\n self.assertEqual(count, count_new)\n\n def test_copy_bucket(self):\n from_bck = self.bck_name + \"from\"\n to_bck = self.bck_name + \"to\"\n self.create_bucket(from_bck)\n self.create_bucket(to_bck)\n\n job_id = self.client.bucket(from_bck).copy(to_bck)\n self.assertNotEqual(job_id, \"\")\n self.client.job().wait_for_job(job_id=job_id)\n\n @unittest.skipIf(\n not REMOTE_SET,\n \"Remote bucket is not set\",\n )\n def test_evict_bucket(self):\n obj_name = \"test_evict_bucket-\"\n create_and_put_object(\n self.client,\n bck_name=self.cloud_bck,\n provider=self.provider,\n obj_name=obj_name,\n )\n bucket = self.client.bucket(self.cloud_bck, provider=self.provider)\n objects = bucket.list_objects(\n props=\"name,cached\", prefix=obj_name\n ).get_entries()\n self.verify_objects_cache_status(objects, True)\n\n bucket.evict()\n\n objects = bucket.list_objects(\n props=\"name,cached\", prefix=obj_name\n ).get_entries()\n self.verify_objects_cache_status(objects, False)\n\n def test_evict_bucket_local(self):\n bucket = self.create_bucket(self.bck_name)\n with self.assertRaises(InvalidBckProvider):\n bucket.evict()\n\n def verify_objects_cache_status(self, objects, expected_status):\n self.assertTrue(len(objects) > 0)\n for obj in objects:\n self.assertTrue(obj.is_ok())\n if expected_status:\n self.assertTrue(obj.is_cached())\n else:\n self.assertFalse(obj.is_cached())\n\n def create_object_list(self, prefix, provider, bck_name, suffix=\"\", length=10):\n obj_names = [prefix + str(i) + suffix for i in range(length)]\n for obj_name in obj_names:\n if self.cloud_bck and bck_name == self.cloud_bck:\n self.cloud_objects.append(obj_name)\n create_and_put_object(\n self.client,\n bck_name=bck_name,\n provider=provider,\n obj_name=obj_name,\n )\n return obj_names\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"and-1/aistore","sub_path":"python/tests/integration/sdk/test_bucket_ops.py","file_name":"test_bucket_ops.py","file_ext":"py","file_size_in_byte":6128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"13548174284","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 18 13:18:50 2018\r\n\r\n@author: zhiwei ren\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef fftsc(Mx,My,Mz,export_x,export_y,export_z,T,X,\r\n T_resample,X_resample,win,Resample_Switch,\r\n f_cutoff,klim,x0,x1,cell):\r\n if export_z == 1:\r\n M0_matrix = Mz\r\n elif export_y == 1:\r\n M0_matrix = My\r\n else:\r\n M0_matrix = Mx\r\n M, n = M0_matrix.shape\r\n N = int((x1-x0)/cell)\r\n M_matrix = np.zeros((M,N))\r\n start = 0\r\n end = N\r\n count = 0\r\n while end <= n:\r\n M_matrix = M_matrix + M0_matrix[:,start:end]\r\n start += N\r\n end += N\r\n count += 1\r\n M_matrix = M_matrix / count\r\n \r\n Ms = 1e3\r\n M_matrix = M_matrix * Ms\r\n fs, ks = 1 / T, 1 / (2*X)\r\n M_fft, N_fft = M, N\r\n Matrix = np.zeros((M_fft,N_fft))\r\n \r\n # resampling\r\n if Resample_Switch == 1:\r\n M_fft, N_fft = round(M / T_resample), round(N / X_resample)\r\n j = 0\r\n for i in range(0,N-1):\r\n if np.mod(i,X_resample) == 0:\r\n Matrix[:,j] = M_matrix[:,i]\r\n j += 1\r\n else:\r\n Matrix = M_matrix\r\n \r\n # window function\r\n if win == 1:\r\n from scipy.signal import chebwin as chebwin\r\n attenuation = 50.0\r\n win1 = chebwin(M_fft,attenuation)\r\n win2 = chebwin(N_fft,attenuation)\r\n win1 = win1[np.newaxis,:]\r\n win2 = win2[np.newaxis,:]\r\n win1 = win1.T\r\n win = np.dot(win1, win2)\r\n Matrix = Matrix * win\r\n else:\r\n win = win\r\n \r\n # 2D FFT\r\n fftMatrix = np.zeros((M_fft,N_fft))\r\n fftMatrix = np.fft.fft2(Matrix)\r\n fftMatrix = np.abs(np.fft.fftshift(fftMatrix))\r\n fs = fs / (2e9)\r\n ks = -2*np.pi*ks/(1e9)\r\n fftMatrix = fftMatrix[0:round(M_fft/2), 0:N_fft-1]\r\n fftMatrix = 10 * np.log10(fftMatrix / np.max(fftMatrix))\r\n \r\n # elimate reflection\r\n M_mean = np.mean(fftMatrix)\r\n M_max = np.max(fftMatrix)\r\n fftMatrix = np.clip(fftMatrix,M_mean,M_max)\r\n \r\n # image show\r\n X_neglim, X_poslim = ks*2e2, -ks*2e2\r\n Y_neglim, Y_poslim = 0, fs\r\n extent = [X_neglim,X_poslim,Y_neglim,Y_poslim]\r\n plt.figure()\r\n plt.rcParams['figure.figsize'] = (9.0,8.5)\r\n plt.imshow(fftMatrix, cmap = plt.cm.jet, origin='upper',extent=extent)\r\n \r\n klim = klim*2e2\r\n if klim in [20, 40, 60, 80, 100, 120, 140, 160, 180, 200]:\r\n plt.xticks([20, 40, 60, 80, 100, \r\n 120, 140, 160, 180, 200],\r\n ['0.1', '0.2', '0.3', '0.4','0.5', '0.6',\r\n '0.7','0.8','0.9','1.0'],fontsize = 18)\r\n elif klim in [2, 6, 10, 14, 18]:\r\n plt.xticks([2, 6, 10, 14, 18],\r\n ['0.01', '0.03', '0.05','0.07','0.09'],fontsize = 18)\r\n else:\r\n plt.xticks([4, 8, 12, 16],\r\n ['0.02', '0.04', '0.06','0.08'],fontsize = 18)\r\n plt.yticks(fontsize = 18)\r\n plt.xlim(0,klim)\r\n plt.ylim(0,f_cutoff)\r\n plt.colorbar(shrink = 0.5)\r\n plt.xlabel(r\"$\\mathrm{Wave\\enspace vector}\\enspace k_x\\enspace \\mathrm{(nm^{-1})}$\",\r\n fontsize = 17)\r\n plt.ylabel(r'$\\mathrm{Frequency\\enspace (GHz)}$',fontsize = 17)\r\n plt.savefig('Dispersion curve.eps', dpi=500)\r\n plt.show()\r\n return fftMatrix\r\n\r\nif __name__ == '__main__':\r\n x = y = np.arange(-3.0,3.0,0.025)\r\n X,Y = np.meshgrid(x,y)\r\n Z1 = np.exp(-X**2-Y**2)\r\n Z2 = np.exp(-(X-1)**2 - (Y-1)**2)\r\n Z = (Z1 - Z2)*2\r\n fftsc(1,1,Z,0,0,1,0.025,0.025,1,1,0,1)","repo_name":"LeiHan-THU/MuFA","sub_path":"Dispersion.py","file_name":"Dispersion.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"35235078104","text":"#/usr/bin/env python3\n# -*- encoding: utf-8-*-\n\nfrom __future__ import division, unicode_literals, print_function\nfrom pylab import *\nimport csv\nimport easygui\nimport matplotlib\nimport sympy\nimport json\n\ndef get_magnetic_field(i, rx, ry, mu0=4*pi*1e-7, mu=1):\n r = sqrt(rx**2 + ry**2)\n B = mu0*mu*i/r\n By = B*rx/r\n Bx = -B*ry/r\n return Bx, By \n\nclass Conductor():\n x = 0\n y = 0\n i = 0\n name = ''\n def __init__(self, x=0, y=0, i=0, name=''):\n self.x = x\n self.y = y\n self.i = i\n self.name = name\n def magnetic_field(self, x, y):\n Bx, By = get_magnetic_field(self.i, x-self.x, y-self.y)\n return Bx, By\n def __str__(self):\n s = {'x': self.x, 'y':self.y, 'i': self.i}\n return json.dumps(s)\n\nclass ConductorPlane():\n Bxx = []\n Byy = []\n Babs = []\n conductors = []\n lookup_table = {}\n def __init__(self, x, y):\n self.Babs = zeros((len(x), len(y)))\n self.Bxx = zeros((len(x), len(y)))\n self.Byy = zeros((len(x), len(y)))\n def add_conductor(self, c):\n self.conductors.append(c)\n def remove_conductor(self, x):\n if type(x) == Conductor:\n if x in self.conductors:\n self.conductors.remove(x)\n else:\n raise Warning('No such conductor in the system!')\n elif type(x) == str:\n for c in self.conductors:\n if c.name == x:\n self.conductors.remove(c)\n else:\n raise Warning('No such conductor in the system!')\n else:\n raise ValueError('Cannot work out what you want to delete.')\n def get_magnetic_field(self):\n lookup_table = self.lookup_table\n Bxx = zeros(shape(self.Bxx))\n Byy = zeros(shape(self.Byy))\n for c in self.conductors:\n cn = str(c)\n if cn in lookup_table:\n Bx = lookup_table[cn]['Bx']\n By = lookup_table[cn]['By']\n else:\n Bx = zeros(shape(self.Bxx))\n By = zeros(shape(self.Byy))\n for i in range(0, len(x)):\n for j in range (0, len(y)):\n Bx[i,j], By[i,j] = c.magnetic_field(x[i], y[j])\n lookup_table[cn] = {'Bx': Bx, 'By': By}\n Bxx += Bx\n Byy += By\n Babs = zeros(shape(self.Babs))\n for i in range(0, len(x)):\n for j in range(0, len(y)):\n Babs[i,j] = sqrt(Bxx[i,j]**2 + Byy[i,j]**2)\n return Babs, Bxx, Byy\n def read_from_file(self, filename):\n with open (filename) as csvfile:\n reader = csv.DictReader(csvfile, skipinitialspace=True)\n for row in reader:\n x = float(sympy.sympify(row['x']))\n y = float(sympy.sympify(row['y']))\n i = float(sympy.sympify(row['i']))\n name = str(row['name'])\n self.add_conductor(Conductor(x, y, i, name))\n\n# Define the plane where the magnetic field is to be calculated\nx = linspace(-50, 50, 1e3)\ny = linspace(-10, 30, 1e3)\np = ConductorPlane(x, y)\n# Read a source file\nfilename = easygui.fileopenbox()\np.read_from_file(filename)\n# Calculate magnetic field\nBabs, Bxx, Byy = p.get_magnetic_field()\n# Plot the magnetic field\npcolormesh(x, y, Babs.transpose()*1e6, cmap=cm.Blues, vmin=0, vmax=300)\ncolorbar()\nw = array([(max(y)-min(y))/(max(x)-min(x)), 1])\nstreamplot(x, y, Bxx.transpose()*1e6, Byy.transpose()*1e6, density=w*3, color='grey')\ncs = contour(x, y, Babs.transpose()*1e6, logspace(log10(0.04), log10(300), 10), colors='black')\nclabel(cs, fmt='%1.2f µT')\n#~ csRef = contour(x, y, Babs.transpose()*1e6, [300], colors='red')\n#~ clabel(csRef, fmt='%1.1f')\ngrid(True)\nhlines(0, min(x), max(x), color='green')\naxis('tight')\naxis('scaled')\nxlabel('Avstand fra spormidten, m')\nylabel('Høyde over skinneoverkant, m')\nfor c in p.conductors:\n #~ annotate('{0}, {1} A'.format(c.name, c.i), (c.x, c.y))\n if c.i > 0:\n plot(c.x, c.y, '.', color='red')\n if c.i < 0:\n plot(c.x, c.y, 'x', color='red')\n else:\n plot(c.x, c.y, 'o', color='red')\nshow()\n","repo_name":"staspika/magnetic-field-around-conductors","sub_path":"init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1301333274","text":"# -*- coding: UTF-8 -*-\r\n\r\nfrom maeve.api import Api\r\nfrom maeve.models import Character, Account, WalletTransaction, MarketOrder, ItemTypeIndex, ItemStats, StationIndex\r\nfrom google.appengine.ext import ndb\r\nfrom datetime import datetime, timedelta\r\nfrom google.appengine.api import taskqueue\r\nimport logging\r\n\r\n\r\n@ndb.tasklet\r\ndef get_characters_async():\r\n characters = yield Character.query().fetch_async()\r\n raise ndb.Return(characters)\r\n\r\n\r\n@ndb.tasklet\r\ndef get_accounts_async():\r\n accounts = yield Account.query().fetch_async()\r\n raise ndb.Return(accounts)\r\n\r\n\r\ndef get_characters_and_accounts():\r\n yield get_characters_async(), get_accounts_async()\r\n\r\n\r\ndef get_latest_transaction(character):\r\n if not character.last_transaction_key:\r\n raise ndb.Return((character.key, None))\r\n else:\r\n transaction = yield ndb.get_async(character.last_transaction_key)\r\n raise ndb.Return((character.key, transaction))\r\n\r\n\r\ndef update_index(new_items, new_stations):\r\n logging.info('Index being updated')\r\n item_index = ItemTypeIndex.query().get()\r\n station_index = StationIndex.query().get()\r\n\r\n if not item_index:\r\n item_index = ItemTypeIndex()\r\n if not station_index:\r\n station_index = StationIndex()\r\n\r\n item_index.items.update(new_items)\r\n item_index.put_async()\r\n\r\n station_index.stations.update(new_stations)\r\n station_index.put_async()\r\n\r\n\r\ndef index_all_characters():\r\n\r\n characters = Character.query(Character.active == True)\r\n task_count = 0\r\n update_after = datetime.now() - timedelta(hours=1)\r\n for character in characters:\r\n if not character.last_update or character.last_update < update_after:\r\n task_count += 1\r\n taskqueue.add(url='/_task/sync',\r\n params={'char': character.key.urlsafe()},\r\n queue_name='transaction-sync',\r\n )\r\n else:\r\n logging.debug('Skipping char {0}'.format(character.name))\r\n\r\n logging.info('{0} sync tasks enqueued'.format(task_count))\r\n\r\n\r\ndef index_character(character, account):\r\n\r\n try:\r\n logging.info('Synching: Character {0} / {1}'.format(character.name, character.char_id))\r\n item_stats = ItemStats.query(ItemStats.character_key == character.key).fetch_async()\r\n orders = MarketOrder.query(MarketOrder.character_key == character.key).fetch_async()\r\n\r\n api = Api(account.api_id, account.api_vcode)\r\n api.authenticate()\r\n api_char = api.get_character(character.char_id)\r\n\r\n row_count = 250\r\n all_items, all_stations = {}, {}\r\n\r\n character.last_update = datetime.now()\r\n\r\n last_transaction_id = character.last_transaction_id\r\n last_transaction_date = character.last_transaction_date\r\n\r\n api_wallet_transactions = api_char.WalletTransactions(rowCount=(last_transaction_id is None and 1000 or row_count))\r\n item_stats = dict([(i.type_id, i) for i in item_stats.get_result()])\r\n\r\n newest_transaction, oldest_transaction, items, stations = sync_transactions(character,\r\n api_wallet_transactions,\r\n last_transaction_id,\r\n last_transaction_date,\r\n item_stats)\r\n\r\n all_items.update(items or {})\r\n all_stations.update(stations or {})\r\n\r\n while last_transaction_id and last_transaction_date and oldest_transaction and \\\r\n (datetime.fromtimestamp(oldest_transaction.transactionDateTime) > last_transaction_date or oldest_transaction.transactionID > last_transaction_id):\r\n logging.info('Fetching another batch from id {0}'.format(oldest_transaction.transactionID))\r\n\r\n api_wallet_transactions = api_char.WalletTransactions(rowCount=row_count, fromID=oldest_transaction.transactionID)\r\n newest_transaction, oldest_transaction, items, stations = sync_transactions(character,\r\n api_wallet_transactions,\r\n last_transaction_id,\r\n last_transaction_date,\r\n item_stats)\r\n\r\n all_items.update(items or {})\r\n all_stations.update(stations or {})\r\n\r\n sync_orders(character,\r\n api_char.MarketOrders(),\r\n orders.get_result())\r\n\r\n character.put_async()\r\n logging.info('Syncing done: Character {0} / {1}'.format(character.name, character.char_id))\r\n return all_items, all_stations\r\n except:\r\n import traceback\r\n logging.error('Error while syncing character {0} / {1}'.format(character.name, character.char_id))\r\n logging.error(traceback.format_exc())\r\n return None\r\n\r\n\r\ndef sync_transactions(character,\r\n api_wallet_transactions,\r\n last_transaction_id,\r\n last_transaction_date,\r\n item_stats):\r\n\r\n newest_transaction, oldest_transaction, items, stations = None, None, {}, {}\r\n to_put = []\r\n\r\n for row in api_wallet_transactions.transactions:\r\n if (not last_transaction_id and not last_transaction_date) or \\\r\n datetime.fromtimestamp(row.transactionDateTime) > last_transaction_date or row.transactionID > last_transaction_id:\r\n\r\n wt = WalletTransaction(character_key=character.key,\r\n char_id=character.char_id,\r\n transaction_id=row.transactionID,\r\n transaction_date=datetime.fromtimestamp(row.transactionDateTime),\r\n quantity=int(row.quantity),\r\n type_id=str(row.typeID),\r\n unit_price=float(row.price),\r\n station_id=str(row.stationID),\r\n client_id=str(row.clientID),\r\n client_name=str(row.clientName),\r\n transaction_type=(row.transactionType == 'sell' and WalletTransaction.SELL or WalletTransaction.BUY),\r\n journal_transaction_id=str(row.journalTransactionID))\r\n\r\n to_put.append(wt)\r\n items[wt.type_id] = row.typeName\r\n stations[wt.station_id] = row.stationName\r\n\r\n stats = item_stats.get(wt.type_id, None)\r\n abs_balance_change = wt.quantity * wt.unit_price\r\n\r\n if not stats:\r\n stats = ItemStats(user=character.user,\r\n char_id=character.char_id,\r\n character_key=character.key,\r\n type_id=wt.type_id,\r\n accumulated_cost=(wt.transaction_type == WalletTransaction.BUY and abs_balance_change or 0),\r\n accumulated_earnings=(wt.transaction_type == WalletTransaction.SELL and abs_balance_change or 0),\r\n items_bought=(wt.transaction_type == WalletTransaction.BUY and wt.quantity or 0),\r\n items_sold=(wt.transaction_type == WalletTransaction.SELL and wt.quantity or 0),\r\n )\r\n item_stats[wt.type_id] = stats\r\n else:\r\n if wt.transaction_type == WalletTransaction.BUY:\r\n stats.accumulated_cost += abs_balance_change\r\n stats.items_bought += wt.quantity\r\n else:\r\n stats.accumulated_earnings += abs_balance_change\r\n stats.items_sold += wt.quantity\r\n\r\n else:\r\n logging.debug('Skipped transaction {0}'.format(row.transactionID))\r\n\r\n if not oldest_transaction or row.transactionID < oldest_transaction.transactionID:\r\n oldest_transaction = row\r\n\r\n if not newest_transaction or row.transactionID > newest_transaction.transactionID:\r\n newest_transaction = row\r\n\r\n character.last_transaction_id = max(character.last_transaction_id, row.transactionID)\r\n row_date = datetime.fromtimestamp(row.transactionDateTime)\r\n character.last_transaction_date = character.last_transaction_date and max(character.last_transaction_date, row_date) or row_date\r\n\r\n ndb.put_multi_async(to_put)\r\n\r\n for stats in item_stats.values():\r\n stats.roi_yield = stats.accumulated_cost > 0 and (stats.accumulated_earnings - stats.accumulated_cost) / stats.accumulated_cost or 0\r\n avg_unit_cost = stats.items_bought > 0 and stats.accumulated_cost / stats.items_bought or 0\r\n avg_unit_earnings = stats.items_sold > 0 and stats.accumulated_earnings / stats.items_sold or 0\r\n stats.avg_roi_yield = avg_unit_cost > 0 and (avg_unit_earnings - avg_unit_cost) / avg_unit_cost or 0\r\n\r\n ndb.put_multi_async(item_stats.values())\r\n\r\n return newest_transaction, oldest_transaction, items, stations\r\n\r\n\r\ndef sync_orders(character, api_orders, existing_orders):\r\n existing_orders = dict([(o.hash_key, o) for o in existing_orders])\r\n to_put = []\r\n\r\n for row in api_orders.orders:\r\n issued = datetime.fromtimestamp(row.issued)\r\n hash_key = hash((character.char_id,\r\n row.volEntered,\r\n str(row.stationID),\r\n str(row.typeID),\r\n issued))\r\n\r\n existing = existing_orders.get(hash_key, None)\r\n if existing:\r\n existing.remaining_quantity = row.volRemaining\r\n existing.order_state = row.orderState\r\n to_put.append(existing)\r\n\r\n else:\r\n order = MarketOrder(hash_key=hash_key,\r\n character_key=character.key,\r\n char_id=character.char_id,\r\n original_quantity=row.volEntered,\r\n remaining_quantity=row.volRemaining,\r\n station_id=str(row.stationID),\r\n type_id=str(row.typeID),\r\n unit_price=row.price,\r\n order_type=(row.bid and MarketOrder.BUY or MarketOrder.SELL),\r\n order_state=row.orderState,\r\n issued=issued)\r\n to_put.append(order)\r\n\r\n ndb.put_multi_async(to_put)\r\n\r\n","repo_name":"westmark/maeve","sub_path":"src/maeve/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":10101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"35305085359","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAPI descriptions.\n\n.. currentmodule:: {{cookiecutter.package_name}}.apis.api\n.. moduleauthor:: {{cookiecutter.author_name}} <{{cookiecutter.author_email}}>\n\"\"\"\nimport json\nimport logging\nfrom flask_restplus import Api\nfrom werkzeug.exceptions import HTTPException\nfrom .. import __version__\nfrom .crud import api as crud\nfrom .info import api as info\n\nAPI_ROOT: str = '/api' #: the common root for API routes\n\nLOGGER: logging.Logger = logging.getLogger(__name__) #: the module logger\n\nEX_CODES = {\n KeyError: 404\n} #: HTTP codes for exception types\n\n# Create the API object.\napi = Api(\n title='{{cookiecutter.project_name}}',\n version=__version__,\n description='{{cookiecutter.project_description}}'\n # Add other API metadata here.\n)\n\n# Add the namespaces.\napi.add_namespace(crud, path='/crud')\napi.add_namespace(info, path='/info')\n\n\n@api.errorhandler(Exception)\ndef handle_ex(ex: Exception):\n \"\"\"Last-resort exception handling.\"\"\"\n\n # Log the fact that the default handler had to handle the exception.\n LOGGER.exception(ex)\n\n # If this is a werkzeug HTTP exception, we can provide certain kinds\n # of helpful information...\n if isinstance(ex, HTTPException):\n return json.dumps({\n 'code': ex.code,\n 'name': ex.name,\n 'type': type(ex).__name__,\n 'message': ex.description\n })\n # Otherwise, return some basic information.\n return {\n 'type': type(ex).__name__,\n 'message': str(ex),\n }, EX_CODES.get(type(ex), 500)\n","repo_name":"patdaburu/cookiecutter-flask-restx","sub_path":"{{cookiecutter.project_name}}/{{cookiecutter.package_name}}/apis/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6969136459","text":"PIN_correct = \"5983\"\ncont = 0\ntentativo = False\nwhile cont != 3 and tentativo == False:\n PIN = input(\"Digitare il PIN (4 cifre)\")\n if(PIN == PIN_correct):\n tentativo = True\n print(\"Your PIN is correct\")\n else:\n print(\"Your PIN is incorrect\")\n cont = cont + 1\n tentativo = False\nif(cont == 3):\n print(\"Your bank card is blocked\")\n\n\n","repo_name":"Pierpaolo-QuijadaGomez/PoliTO","sub_path":"Lab05/P1/es1.py","file_name":"es1.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"817197829","text":"import numpy as np\nimport scipy.io as sio\nfrom scipy import signal\nimport matplotlib.pyplot as plt\nimport cProfile\n\n\ndef SMA_ERP(INPUT,currentfreq,SamplingFreq):\n\n data = np.zeros(shape=(1, INPUT.size)) # i.e #data is horizontal [0 0 0 0 ]\n\n data = INPUT\n\n datareshape = np.reshape(data, (data.size, 1)) # now data is vertical [0 0 0 ]´\n\n T = len(datareshape) # length of DATA\n if np.mod(T,2)==1:\n T = T-1\n F = currentfreq # Current frequency (could be asked to user)\n Fs = SamplingFreq # sampling frequency (could be asked to user)\n\n TIME = T / Fs\n my_new_t = np.arange(1 / Fs, TIME, 1 / Fs)\n\n if F == 40:\n Nosegments = int(T // ((Fs / F) * 2)) # in case freq is 40 #// is int divider\n AdjSegments = np.floor(0.05 * Nosegments) # ADJ is an integer still\n else:\n Nosegments = (T // ((Fs // F) * 1)) # for 5h and 10hz\n AdjSegments = np.floor(0.05 * Nosegments)\n\n if np.mod(AdjSegments, 2) == 1: # if adj segments is odd then:\n AdjSegments = AdjSegments - 1; # make adjsegments even\n\n SegmentCount = int(AdjSegments + 1) # still an int\n timestep = (T // Nosegments)\n # Assuming 1 channel\n\n # used variables\n segments = np.zeros(shape=(timestep, Nosegments)) # i.e timestep x Nosegments array\n CurrentSegments = np.zeros(shape=(timestep, SegmentCount)) # should still be true\n ScaledArtefact = np.zeros(shape=(timestep, Nosegments))\n AvgArtefact = np.zeros(shape=(T, 1))\n Signal = np.zeros(shape=(1, T))\n # start of code\n\n t1 = 0\n t2 = timestep\n\n for j in range(Nosegments): # Has not changed\n segments[:, j] = datareshape[t1:t2, 0]\n t1 = t1 + timestep\n t2 = t2 + timestep\n\n for k in range(1, Nosegments + 1, 1): # I added that + 1\n if k <= (AdjSegments / 2): # remember ADJ segments is even so this always gives int\n S1 = 0\n S2 = AdjSegments\n CurrentSegments[:, 0:SegmentCount] = segments[:, 0:SegmentCount]\n else:\n if k >= (Nosegments - (AdjSegments / 2)):\n CurrentSegments[:, 0:SegmentCount] = segments[:, (Nosegments - SegmentCount):Nosegments]\n else:\n CurrentSegments[:, 0:SegmentCount] = segments[:,(int(k - AdjSegments / 2) - 1):int((k + AdjSegments / 2))]\n\n ScaledArtefact[:, k - 1] = CurrentSegments.mean(axis=1)\n\n AvgArtefact = np.reshape(ScaledArtefact, (T, 1), order='F') # DEFINE AVGARTEFACT\n\n AvgArtefact = np.reshape(AvgArtefact, (1, T)) # TURN INTO HORIZONTAL VECTOR\n\n Signal = data[0,0:T] - AvgArtefact\n\n # print(Signal)\n\n return Signal\n\n\nd, c = signal.butter(3, 50/250, 'low') \nb, a = signal.butter(3, 0.5/250, 'high')\n\n\nFULLDATA_contents = sio.loadmat('Test_1mA_5Hz_ERP_PH.mat') # FULLDATA_contents is an array of structures\n\nDATA_struct = FULLDATA_contents['EEG']\n\nData = DATA_struct[0, 0]\n\nQ = Data['Data']\n\ntacs_t1 = 15000\ntacs_t2 = tacs_t1 + 30000\n\nQ = np.transpose(Q[1, tacs_t1:tacs_t2])\n\nprint(Q.size)\n\n\nSUBJECT1_FREE_array = SMA_ERP(Q ,5 ,500) #apply SMA TO TEST 1 DATA\n\n#print(TEST1_FREE_array)\n\ncProfile.run('signal.filtfilt(d, c, FREE_array, padlen=3*(max(len(d),len(c))-1)) ')\n\nSUBJECT1_SMA_low = signal.filtfilt(d, c, SUBJECT1_FREE_array, padlen=3*(max(len(d),len(c))-1)) #FILTER TEST1 DATA AFTER SMA APPLIED\nSUBJECT1_SMA = signal.filtfilt(b, a, SUBJECT1_SMA_low, padlen=3*(max(len(b),len(a))-1))\n\n\n\n#print(ERP_TEST1_SMA)\n\nSUBJECT2_10HZ_SMA_low = signal.filtfilt(d, c, SUBJECT2_10HZ_FREE_array, padlen=3*(max(len(d),len(c))-1)) #FILTER TEST2 DATA AFTER SMA APPLIED\nSUBJECT2_10HZ_SMA = signal.filtfilt(b, a, SUBJECT2_10HZ_SMA_low, padlen=3*(max(len(b),len(a))-1))\n\n#print(ERP_TEST2_SMA)\n\n\nSHAM_SMA_low_1 = signal.filtfilt(d, c, SUBJECT1['SHAM'], padlen=3*(max(len(d),len(c))-1)) #FILTER RAW SHAM DATA TO COMPARE TO RESULTS\nSHAM_SMA_1 = signal.filtfilt(b, a, SHAM_SMA_low_1, padlen=3*(max(len(b),len(a))-1))\n\n#print(SHAM_SMA)\n\nSHAM_SMA_low_2 = signal.filtfilt(d, c, SUBJECT2_10HZ['SHAM'], padlen=3*(max(len(d),len(c))-1)) #FILTER RAW SHAM DATA TO COMPARE TO RESULTS\nSHAM_SMA_2 = signal.filtfilt(b, a, SHAM_SMA_low_2, padlen=3*(max(len(b),len(a))-1))\n\n\n\nplot_t = np.arange(0,30000/500,1/500)\n\nf, axarr = plt.subplots(2, sharex=True)\naxarr[0].plot(plot_t,SHAM_SMA_1[0,:]/1000, '-k', label='Sham')\naxarr[0].plot(plot_t,SUBJECT1_SMA[0,:]/1000, '--r', label='TEST1')\naxarr[0].set_title('SUBJECT 1 and SHAM')\naxarr[0].set_xlim([27,33])\naxarr[0].set_ylim([-300,300])\n\naxarr[1].plot(plot_t,SHAM_SMA_2[0,:]/1000, '-k', label='Sham')\naxarr[1].plot(plot_t, SUBJECT2_10HZ_SMA[0,:]/1000, '--r', label='TEST2')\naxarr[1].set_title('ERP TEST 2 and SHAM')\naxarr[1].set_xlim([4,12])\naxarr[1].set_ylim([-300,300])\n\naxarr[0].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\naxarr[1].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n\nplt.show()\n","repo_name":"gabocampod/DSP-Noise-removal-of-brain-monitoring-data","sub_path":"Superposition of Moving Averages (SMA)/SMA FOR PHANTOM/SMA_for_phantom.py","file_name":"SMA_for_phantom.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6514795548","text":"import openpyxl\nfrom openpyxl.styles import PatternFill\n\nworkbook = openpyxl.load_workbook(\"myfile.xlsx\")\n# adds a new worksheet to an existing workbook\nworksheet = workbook.create_sheet(title='mysheet')\n\n# fill grey color\ngrey_fill = PatternFill(start_color='CECACA',\n end_color='CECACA',\n fill_type='solid')\n\n# fill red color\nred_fill = PatternFill(start_color='990000',\n end_color='990000',\n fill_type='solid')\n\nworksheet['A1'].fill = grey_fill\nworksheet['A2'].fill = red_fill\nworkbook.save(filename=\"myfile.xlsx\")\n","repo_name":"crupib/python","sub_path":"demo_days/current/03-xl.py","file_name":"03-xl.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"17505228927","text":"import grove_rflink433mhz\nimport sys\n\n# Don't forget to run it with Python 3 !!\n# Don't forget to run it with Python 3 !!\n# Don't forget to run it with Python 3 !!\n\ndef Main():\n # instantiate a RFLinker object\n # default arguments are\n # port = '/dev/ttyS0' -> you've got only one on each raspberry\n # chunk_size = 32 -> the max number of data bytes you can send per fragment - you can ignore it\n # max_bad_readings = 32 -> the number of bad characters read before giving up on a read operation\n # keep in mind that there is environment pollution, so the RF module will get many fake 'transmissions'\n receiver = grove_rflink433mhz.RFLinker()\n message_received = \"\"\n\n # do this indefinitely\n while True:\n # receive the message\n # readMessage takes a default argument\n # called retries = 20\n # it specifies how many times it tries to read consistent data before giving up\n # you should not modify it unless you know what you're doing and provided you also\n # modify the chunk_size for the transmitter\n message_received = receiver.readMessage()\n if len(message_received) > 0:\n # if the string has something then print it\n print('[message received][{}]'.format(message_received))\n else:\n print(\"[message_received][none or couldn't parse it]\")\n\n\nif __name__ == \"__main__\":\n try:\n # it's the above function we call\n Main()\n\n # in case CTRL-C / CTRL-D keys are pressed (or anything else that might interrupt)\n except KeyboardInterrupt:\n print('[Keyboard interrupted]')\n sys.exit(0)\n","repo_name":"DexterInd/GrovePi","sub_path":"Software/Python/grove_rflink433mhz_oneway_kit/grove_receiver_example.py","file_name":"grove_receiver_example.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":485,"dataset":"github-code","pt":"12"} +{"seq_id":"36156836285","text":"from tkinter import *\n\nwin = Tk()\n\nwin.title('List Box')\nwin.geometry('600x400')\n\ndef func():\n #anchor will delete the selected element.\n lst.delete(ANCHOR)\n\nitems = ['Apple','Banana','Mango','Strawberry','finchi','Maneche']\n\nlst = Listbox(win,width=20)\n\nfor i in items:\n lst.insert(END,i)\n\nlst.pack()\n\nbtn = Button(win,text=\"DELETE\",command=func).pack()\nwin.mainloop()","repo_name":"Rishabh-raj-kumar/Software-using-python","sub_path":"list_box.py","file_name":"list_box.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15709046075","text":"import json\nimport sys\n\nfrom auto_esn.esn.esn import GroupedDeepESN\nimport numpy as np\nimport torch\n\nsys.path.insert(0, \".\")\nfrom handy.handy_prototype import (\n simulate_handy,\n)\nfrom utils import pack_parameters, pack_variables, plot_handy\n\n\ndef split_data(simulation, window_start, window_end, train_fraction):\n data = simulation[window_start:window_end]\n\n split_idx = int((window_end - window_start) * train_fraction)\n train = data[:split_idx]\n test = data[split_idx:]\n\n X_train = train[0 : len(train) - 1, :]\n y_train = train[1 : len(train), :]\n\n X_train = np.array(X_train)\n y_train = np.array(y_train)\n\n initial_extrapolation_state = train[-1:, :]\n\n return X_train, y_train, test, initial_extrapolation_state\n\n\ndef prepare_transformations(simulation):\n MIN_ARR = np.min(simulation, axis=0)\n MAX_ARR = np.max(simulation, axis=0)\n return (\n lambda x: (x - MIN_ARR) / (MAX_ARR - MIN_ARR),\n lambda x: x * (MAX_ARR - MIN_ARR) + MIN_ARR,\n )\n\n\ndef evaluate_model(model, extrapolation_steps, state):\n result = []\n for _ in range(extrapolation_steps):\n state = model(state)\n result.append(state)\n return torch.vstack(result).detach().numpy()\n\n\ndef train_and_evaluate_model(x, y, test, transform, inverse_transform, state, repeats):\n best_extrapolation = None\n best_mae = np.inf\n\n for _ in range(repeats):\n model = GroupedDeepESN(\n groups=3,\n input_size=4,\n num_layers=(3, 3, 3),\n hidden_size=500,\n ).float()\n\n model.fit(\n torch.from_numpy(transform(x)).float(),\n torch.from_numpy(transform(y)).float(),\n )\n extrapolation = inverse_transform(\n evaluate_model(\n model,\n len(test),\n torch.from_numpy(transform(state)).float(),\n )\n )\n\n mae = np.mean(np.abs(extrapolation - test))\n if mae < best_mae:\n best_mae = mae\n best_extrapolation = extrapolation\n\n return best_extrapolation\n\n\nif __name__ == \"__main__\":\n _script, config_path = sys.argv\n\n with open(config_path, \"r\") as config_fd:\n config_json = json.load(config_fd)\n\n parameters = pack_parameters(config_json[\"parameters\"])\n initial_value = pack_variables(config_json[\"initial_value\"])\n differential_t = float(config_json[\"differential_t\"])\n simulation_steps = config_json[\"simulation_steps\"]\n\n baseline_simulation = simulate_handy(\n initial_value, parameters, differential_t, simulation_steps\n )\n simulation_length = len(baseline_simulation)\n transform, inverse_transform = prepare_transformations(baseline_simulation)\n\n surogate_simulation_1 = simulate_handy(\n initial_value,\n parameters,\n differential_t,\n simulation_steps,\n simplify_consumption_rates=True,\n simplify_death_rates=False,\n )\n surogate_simulation_2 = simulate_handy(\n initial_value,\n parameters,\n differential_t,\n simulation_steps,\n simplify_consumption_rates=True,\n simplify_death_rates=True,\n )\n surogate_simulation_3 = simulate_handy(\n initial_value,\n parameters,\n differential_t,\n simulation_steps,\n simplify_consumption_rates=False,\n simplify_death_rates=True,\n )\n\n for config in [\n (\n int(0.45 * simulation_length),\n int(0.56 * simulation_length),\n 0.5,\n \"_550_100_next\",\n ),\n (int(0.45 * simulation_length), simulation_length, 0.1, \"_500_till_end\"),\n (\n int(0.45 * simulation_length),\n int(0.56 * simulation_length),\n 0.91,\n \"_1000_100_next\",\n ),\n (int(0.45 * simulation_length), simulation_length, 0.18, \"_1000_till_end\"),\n (\n int(0.45 * simulation_length),\n int(0.61 * simulation_length),\n 0.94,\n \"_1500_100_next\",\n ),\n (int(0.45 * simulation_length), simulation_length, 0.27, \"_1500_till_end\"),\n ]:\n window_start, window_end, train_fraction, name = config\n\n _, _, surogate_result_1, _ = split_data(\n surogate_simulation_1, window_start, window_end, train_fraction\n )\n _, _, surogate_result_2, _ = split_data(\n surogate_simulation_2, window_start, window_end, train_fraction\n )\n _, _, surogate_result_3, _ = split_data(\n surogate_simulation_3, window_start, window_end, train_fraction\n )\n\n X_train, y_train, test, initial_extrapolation_state = split_data(\n baseline_simulation, window_start, window_end, train_fraction\n )\n\n esn_extrapolation = train_and_evaluate_model(\n x=X_train,\n y=y_train,\n test=test,\n transform=transform,\n inverse_transform=inverse_transform,\n state=initial_extrapolation_state,\n repeats=20,\n )\n\n plot_handy(test, len(test), differential_t, path=f\"./esn_vs_surogates_plots/baseline_{name}\")\n plot_handy(\n esn_extrapolation,\n len(esn_extrapolation),\n differential_t,\n path=f\"./esn_vs_surogates_plots/esn_{name}\",\n )\n plot_handy(\n surogate_result_1,\n len(surogate_result_1),\n differential_t,\n path=f\"./esn_vs_surogates_plots/sur_1_{name}\",\n )\n plot_handy(\n surogate_result_2,\n len(surogate_result_2),\n differential_t,\n path=f\"./esn_vs_surogates_plots/sur_2_{name}\",\n )\n plot_handy(\n surogate_result_3,\n len(surogate_result_3),\n differential_t,\n path=f\"./esn_vs_surogates_plots/sur_3_{name}\",\n )\n\n with open(f\"./esn_vs_surogates_plots/mae_{name}.txt\", \"w+\") as mae_f:\n mae_f.write(\n f\"MAE of ESN extrapolation: {np.mean(np.abs(test.T - esn_extrapolation.T), axis=-1)}\\n\"\n )\n mae_f.write(\n f\"MAE of surogate 1 extrapolation: {np.mean(np.abs(test.T - surogate_result_1.T), axis=-1)}\\n\"\n )\n mae_f.write(\n f\"MAE of surogate 2 extrapolation: {np.mean(np.abs(test.T - surogate_result_2.T), axis=-1)}\\n\"\n )\n mae_f.write(\n f\"MAE of surogate 3 extrapolation: {np.mean(np.abs(test.T - surogate_result_3.T), axis=-1)}\\n\"\n )\n","repo_name":"marcinwasowicz/AGH_Complex_Systems_and_Machine_Learning","sub_path":"train_handy_esn.py","file_name":"train_handy_esn.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"35513019669","text":"import numpy as np\nimport librosa\nimport os\nfrom sklearn.preprocessing import StandardScaler\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras.utils import to_categorical\n\nclass DataGenerator(Sequence):\n def __init__(self, file_paths, batch_size=32, n_classes=2):\n self.file_paths = file_paths\n self.batch_size = batch_size\n self.n_classes = n_classes\n self.scaler = StandardScaler()\n\n def __len__(self):\n return int(np.ceil(len(self.file_paths) / float(self.batch_size)))\n\n def __getitem__(self, idx):\n batch_file_paths = self.file_paths[idx * self.batch_size:(idx + 1) * self.batch_size]\n batch_labels = self.labels()[idx * self.batch_size:(idx + 1) * self.batch_size]\n\n X = np.array([self.extract_features(file_path) for file_path in batch_file_paths])\n X = self.scaler.fit_transform(X)\n y = np.array(batch_labels)\n y = to_categorical(y, num_classes=self.n_classes)\n return X, y\n\n def extract_features(self, file_path):\n y, sr = librosa.load(file_path, sr=16000, mono=True, duration=2.5)\n\n features = []\n\n features.append(librosa.feature.spectral_centroid(y=y, sr=sr))\n features.append(librosa.feature.spectral_bandwidth(y=y, sr=sr))\n features.append(librosa.feature.spectral_flatness(y=y))\n features.append(librosa.feature.spectral_contrast(y=y, sr=sr))\n\n features.append(librosa.feature.zero_crossing_rate(y))\n features.append(librosa.feature.rms(y=y))\n\n y_early = y[:int(sr * 0.05)]\n y_late = y[int(sr * 0.05):]\n\n early_energy = np.sum(y_early ** 2)\n late_energy = np.sum(y_late ** 2)\n\n features.append(np.array([early_energy / late_energy if late_energy != 0 else 0]))\n\n X = np.concatenate([feature.flatten() for feature in features])\n return X\n \n def labels(self):\n labels = []\n for filepath in self.file_paths:\n labels.append(int(os.path.basename(os.path.dirname(filepath))))\n return np.array(labels)","repo_name":"nicelyblue/impulse-response-classifier","sub_path":"impulse_response_classifier/modules/data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"72532445140","text":"from functools import partial\nimport torch.nn.functional as F\nimport torch\nimport torch.nn as nn\n\n\nclass hsigmoid(nn.Module):\n def forward(self, x):\n out = F.relu6(x + 3, inplace=True) / 6\n return out\n\n\n# class ChannelAttentionModule(nn.Module):\n# def __init__(self, channel, ratio=16):\n# super(ChannelAttentionModule, self).__init__()\n# self.avg_pool = nn.AdaptiveAvgPool2d(1)\n# self.max_pool = nn.AdaptiveMaxPool2d(1)\n#\n# self.shared_MLP = nn.Sequential(\n# nn.Conv2d(channel, channel // ratio, 1, bias=False),\n# nn.ReLU(),\n# nn.Conv2d(channel // ratio, channel, 1, bias=False)\n# )\n# self.sigmoid = nn.Sigmoid()\n#\n# def forward(self, x):\n# avgout = self.shared_MLP(self.avg_pool(x))\n# print(avgout.shape)\n# maxout = self.shared_MLP(self.max_pool(x))\n# return self.sigmoid(avgout + maxout)\n#\n#\n# class SpatialAttentionModule(nn.Module):\n# def __init__(self):\n# super(SpatialAttentionModule, self).__init__()\n# self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3)\n# self.sigmoid = nn.Sigmoid()\n#\n# def forward(self, x):\n# avgout = torch.mean(x, dim=1, keepdim=True)\n# maxout, _ = torch.max(x, dim=1, keepdim=True)\n# out = torch.cat([avgout, maxout], dim=1)\n# out = self.sigmoid(self.conv2d(out))\n# return out\n#\n#\n# class SeModule(nn.Module):\n# def __init__(self, channel):\n# super(SeModule, self).__init__()\n# self.channel_attention = ChannelAttentionModule(channel)\n# self.spatial_attention = SpatialAttentionModule()\n#\n# def forward(self, x):\n# out = self.channel_attention(x) * x\n# print('outchannels:{}'.format(out.shape))\n# out = self.spatial_attention(out) * out\n# return out\n\n\n# class SeModule(nn.Module):\n# def __init__(self, in_size, reduction=4):\n# super(SeModule, self).__init__()\n#\n# self.se = nn.Sequential(\n# nn.AdaptiveAvgPool2d(1),\n# nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),\n# nn.BatchNorm2d(in_size // reduction),\n# nn.ReLU(inplace=True),\n# nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),\n# nn.BatchNorm2d(in_size),\n# hsigmoid()\n# )\n#\n# def forward(self, x):\n# return x * self.se(x)\n\n\nclass ChannelAttentionModule(nn.Module): # 第二种\n def __init__(self, channel, reduction=4):\n super(ChannelAttentionModule, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n\n\nclass SpatialAttentionModule(nn.Module):\n def __init__(self):\n super(SpatialAttentionModule, self).__init__()\n self.conv2d = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=3, stride=1, padding=1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # map尺寸不变,缩减通道\n avgout = torch.mean(x, dim=1, keepdim=True)\n maxout, _ = torch.max(x, dim=1, keepdim=True)\n # out = torch.cat([avgout, maxout], dim=1)\n # out = self.sigmoid(self.conv2d(out))\n out = maxout + avgout\n out = self.sigmoid(self.conv2d(out))\n return out\n\n\nclass SeModule(nn.Module):\n def __init__(self, channel):\n super(SeModule, self).__init__()\n self.channel_attention = ChannelAttentionModule(channel)\n self.spatial_attention = SpatialAttentionModule()\n\n def forward(self, x):\n out1 = self.channel_attention(x)\n out2 = self.spatial_attention(x) * x\n return out1 + out2\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, input_channels, output_channels, kernel_size, **kwargs):\n super().__init__()\n self.conv = nn.Conv2d(input_channels, output_channels, kernel_size, **kwargs)\n self.bn = nn.BatchNorm2d(output_channels)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\nclass ChannelShuffle(nn.Module):\n\n def __init__(self, groups):\n super().__init__()\n self.groups = groups\n\n def forward(self, x):\n batchsize, channels, height, width = x.data.size()\n channels_per_group = int(channels / self.groups)\n\n # \"\"\"suppose a convolutional layer with g groups whose output has\n # g x n channels; we first reshape the output channel dimension\n # into (g, n)\"\"\"\n x = x.view(batchsize, self.groups, channels_per_group, height, width)\n\n # \"\"\"transposing and then flattening it back as the input of next layer.\"\"\"\n x = x.transpose(1, 2).contiguous()\n x = x.view(batchsize, -1, height, width)\n\n return x\n\n\nclass DepthwiseConv2d(nn.Module):\n\n def __init__(self, input_channels, output_channels, kernel_size, **kwargs):\n super().__init__()\n self.depthwise = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, kernel_size, **kwargs),\n nn.BatchNorm2d(output_channels)\n )\n\n def forward(self, x):\n return self.depthwise(x)\n\n\nclass PointwiseConv2d(nn.Module):\n def __init__(self, input_channels, output_channels, **kwargs):\n super().__init__()\n self.pointwise = nn.Sequential(\n nn.Conv2d(input_channels, output_channels, 1, **kwargs),\n nn.BatchNorm2d(output_channels)\n )\n\n def forward(self, x):\n return self.pointwise(x)\n\n\nclass ShuffleNetUnit(nn.Module):\n\n def __init__(self, input_channels, output_channels, stage, stride, groups):\n super().__init__()\n\n # \"\"\"Similar to [9], we set the number of bottleneck channels to 1/4\n # of the output channels for each ShuffleNet unit.\"\"\"\n self.bottlneck = nn.Sequential(\n PointwiseConv2d(\n input_channels,\n int(output_channels / 4),\n groups=groups\n ),\n nn.ReLU(inplace=True)\n )\n\n # \"\"\"Note that for Stage 2, we do not apply group convolution on the first pointwise\n # layer because the number of input channels is relatively small.\"\"\"\n if stage == 2:\n self.bottlneck = nn.Sequential(\n PointwiseConv2d(\n input_channels,\n int(output_channels / 4),\n groups=groups\n ),\n nn.ReLU(inplace=True)\n )\n\n self.channel_shuffle = ChannelShuffle(groups)\n\n self.depthwise = DepthwiseConv2d(\n int(output_channels / 4),\n int(output_channels / 4),\n 3,\n groups=int(output_channels / 4),\n stride=stride,\n padding=1\n )\n\n self.se = SeModule(int(output_channels / 4))\n\n self.expand = PointwiseConv2d(\n int(output_channels / 4),\n output_channels,\n groups=groups\n )\n\n self.relu = nn.ReLU(inplace=True)\n self.fusion = self._add\n self.shortcut = nn.Sequential()\n\n # \"\"\"As for the case where ShuffleNet is applied with stride,\n # we simply make two modifications (see Fig 2 (c)):\n # (i) add a 3 × 3 average pooling on the shortcut path;\n # (ii) replace the element-wise addition with channel concatenation,\n # which makes it easy to enlarge channel dimension with little extra\n # computation cost.\n if stride != 1 or input_channels != output_channels:\n self.shortcut = nn.AvgPool2d(3, stride=2, padding=1)\n\n self.expand = PointwiseConv2d(\n int(output_channels / 4),\n output_channels - input_channels,\n groups=groups\n )\n\n self.fusion = self._cat\n\n def _add(self, x, y):\n return torch.add(x, y)\n\n def _cat(self, x, y):\n return torch.cat([x, y], dim=1)\n\n def forward(self, x):\n shortcut = self.shortcut(x)\n\n shuffled = self.bottlneck(x)\n shuffled = self.channel_shuffle(shuffled)\n shuffled = self.depthwise(shuffled)\n shuffled = self.se(shuffled)\n shuffled = self.expand(shuffled)\n\n output = self.fusion(shortcut, shuffled)\n output = self.relu(output)\n\n return output\n\n\nclass ShuffleNetV1(nn.Module):\n\n def __init__(self, num_blocks=[4, 8, 4], num_classes=100, groups=1):\n super().__init__()\n\n if groups == 1:\n out_channels = [24, 144, 288, 567]\n elif groups == 2:\n out_channels = [24, 200, 400, 800]\n elif groups == 3:\n out_channels = [24, 240, 480, 960]\n elif groups == 4:\n out_channels = [24, 272, 544, 1088]\n elif groups == 8:\n out_channels = [24, 384, 768, 1536]\n\n self.conv1 = BasicConv2d(3, out_channels[0], 3, padding=1, stride=1)\n self.input_channels = out_channels[0]\n\n self.stage2 = self._make_stage(\n ShuffleNetUnit,\n num_blocks[0],\n out_channels[1],\n stride=2,\n stage=2,\n groups=groups\n )\n\n self.stage3 = self._make_stage(\n ShuffleNetUnit,\n num_blocks[1],\n out_channels[2],\n stride=2,\n stage=3,\n groups=groups\n )\n\n self.stage4 = self._make_stage(\n ShuffleNetUnit,\n num_blocks[2],\n out_channels[3],\n stride=2,\n stage=4,\n groups=groups\n )\n\n self.avg = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(out_channels[3], num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.stage2(x)\n x = self.stage3(x)\n x = self.stage4(x)\n x = self.avg(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n def _make_stage(self, block, num_blocks, output_channels, stride, stage, groups):\n \"\"\"make shufflenet stage\n Args:\n block: block type, shuffle unit\n out_channels: output depth channel number of this stage\n num_blocks: how many blocks per stage\n stride: the stride of the first block of this stage\n stage: stage index\n groups: group number of group convolution\n Return:\n return a shuffle net stage\n \"\"\"\n strides = [stride] + [1] * (num_blocks - 1)\n\n stage = []\n\n for stride in strides:\n stage.append(\n block(\n self.input_channels,\n output_channels,\n stride=stride,\n stage=stage,\n groups=groups\n )\n )\n self.input_channels = output_channels\n\n return nn.Sequential(*stage)\n\n\ndef test():\n net = ShuffleNetV1()\n x = torch.randn(2, 3, 32, 32)\n y = net(x)\n torch.save(net.state_dict(), './pkl/net_params.pkl')\n print(y.size())\n from thop import profile\n flops, params = profile(net, inputs=(x, ))\n print('flops:', flops)\n\ntest()","repo_name":"GG-yuki/bugs","sub_path":"python/Mobile/paper_design_cifar100/shufflenetv1_att.py","file_name":"shufflenetv1_att.py","file_ext":"py","file_size_in_byte":11627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"17105613288","text":"# -*- coding: utf-8 -*-\n\n#-------------------------------------------------------------------------------\n# Load routines for tipsy files and raytracing.\n#-------------------------------------------------------------------------------\n#glass_basis('basis.pixels')\t\t\t\t\t# tell glass we want to work with pixels\nsetup_log(stdout=False)\t\t\t\t\t\t# disable output from glass\nfrom environment import env\nfrom misc.raytrace import raytrace, write_code, observables\nfrom environment import Image\n#from pytipsy import load_tipsy\nfrom numpy import load\nfrom math import cos,sin\nfrom pylab import clf, title\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom math import *\nfrom random import *\nimport sys\nreference = []\t\t\t# the reference data (from simulation or observation)\nreference_exists = False\t# will be set to true, as soon as reference is filled with data\nrefparms = [0.016, 0.0047, 6.0, 1.0]\n\n#-------------------------------------------------------------------------------\n# The lensing procedure\n#-------------------------------------------------------------------------------\ndef lensing(sx, sy, zs, zl):\n\n\t#-------------------------------------------------------------------------------\n\t# Now load the simulation.\n\t#-------------------------------------------------------------------------------\n\t#file='ddehnensphere_0.std'\n\t#t = load_tipsy(file, merge=['r', 'm'], memmap=False)\n\t#X,Y,Z = t.p.r.T\n\t#M = t.p.m\n\n\tfile = 'bigsim.txt.npz'\n\tdata = load(file)\n\tX = data['arr_0']\n\tY = data['arr_1']\n\tZ = data['arr_2']\n\tM = data['arr_3']\n\n\t#-------------------------------------------------------------------------------\n\t# For this simulation we only consider the stars.\n\t#-------------------------------------------------------------------------------\n\tX = X[35471:35471+554370]\n\tY = Y[35471:35471+554370]\n\tZ = Z[35471:35471+554370]\n\tM = M[35471:35471+554370] \n\n\tM *= 1e10 # Only for bigsim (converts to Msun)\n\n\t#print '%i particles' % len(M)\n\t#print 'Total mass is %.4e [Msun]' % sum(M)\n\n\t#result=[]\n\t#-------------------------------------------------------------------------------\n\t# Create the lensing object.\n\t#-------------------------------------------------------------------------------\n\tclear() # Only necessary when before regenerating models\n\tg = 13.7\n\tomega(0.3,0.7)\n\thubble_time(g)\n\n\tobj = globject('my lens')\n\tzlens(zl)\n\tpixrad(10)\n\tmaprad(1.0)\n\n\t#-------------------------------------------------------------------------------\n\t# And the source.\n\t#-------------------------------------------------------------------------------\n\tsrc = source(zs)\n\n\t#-------------------------------------------------------------------------------\n\t# Now generate a single model using the particles and a source postion.\n\t#-------------------------------------------------------------------------------\n\t#r = 0.010\n\t#t = 240\n\t#sx = r * cos(t * 3.1415/180.)\n\t#sy = r * sin(t * 3.1415/180.)\n\n\t#sx,sy = .0160, 0.0047 \n\n\tmodel(1, mode='particles',\n\t\tdata=[X,Y,M, [[sx,sy]], g])\n\n\t#-------------------------------------------------------------------------------\n\t# Raytrace model 0, using object 0 and source 0.\n\t#-------------------------------------------------------------------------------\n\td = raytrace([env().models[0], 0,0])\n\t#print observables(env().models[0], 0,0, d)\n\t#write_code(env().models[0], 0,0, d)\n\n\t#-------------------------------------------------------------------------------\n\t# The plots assume that the source already knew where the images were. We\n\t# add them manually here. This isn't necessary if you want to just run many\n\t# different models.\n\t#-------------------------------------------------------------------------------\n\tfor img,t,_,parity in d:\n\t src.add_image(Image((img.real, img.imag), parity))\n\t\t\t\n\toutput = observables(env().models[0], 0,0, d)\n\treturn output\n\n\"\"\"\n#-------------------------------------------------------------------------------\n# calculate fitness of one chromosome \n# tested\n#-------------------------------------------------------------------------------\ndef lensing_calc_fitness(chrom):\n\n\tsx = chrom[0]\n\tsy = chrom[1]\n\tzs = chrom[2]\n\tzl = chrom[3]\n\t#zs = 1.5\n\t#zl = 0.5\n\n\t# create the reference data if there is none yet\n\tif not reference_exists:\n\t\tsys.stdout.write(\"creating reference\\n\") \n\t\tcreate_reference()\n\n\t# construct phenotype (i.e. the image)\n\tret = lensing(sx, sy, zs, zl)\t\n\t#print sx, \" \", sy, \" \", zs, \" \", zl\n\tres = extract(ret)\t\t\t# the phenotype\n\n\t# find spatial distance between points of phenotype and the reference\n\tdistpen = 0.0\t\n\tfor i in range(len(reference)):\n\t\tif not(i in range(len(res))):\t\t# break if number of ref-points exceeds number of current points\n\t\t\tbreak\n\t\td = (reference[i][0]-res[i][0])*(reference[i][0]-res[i][0]) + (reference[i][1]-res[i][1])*(reference[i][1]-res[i][1]) \n\t\td = sqrt(d)\n\t\t#print str(reference[i][0]), \" \", str(reference[i][1]), \" \", str(res[i][0]), \" \", str(res[i][1]), \" \", d \n\t\tdistpen += d\t\n\n\t# find temporal distance between points of phenotype and the reference\n\ttimepen = 0.0\t\n\tfor i in range(len(reference)):\n\t\tif not(i in range(len(res))):\t\t# break if number of ref-points exceeds number of current points\n\t\t\tbreak\n\t\td = (reference[i][3]-res[i][3])*(reference[i][3]-res[i][3])\n\t\td = sqrt(d)\n\t\t#print str(reference[i][0]), \" \", str(reference[i][1]), \" \", str(res[i][0]), \" \", str(res[i][1]), \" \", d \n\t\ttimepen += d\t\n\n\t# penalty for every two compared points with different point type\n\ttypepen = 0.0\t\n\tfor i in range(len(reference)):\t\n\t\tif not(i in range(len(res))):\n\t\t\tbreak\n\t\tt1 = reference[i][2]\n\t\tt2 = res[i][2]\n\t\tif(not(t1==t2)):\n\t\t\ttypepen += 0.5\n\t\n\t# penalty for missing/extra points in current phenotype\n\tnum = abs(len(reference)-len(res))\n\tnumpen = 1.0*num \n\n\t# add up the penalties\t\n\tfitness = numpen + typepen + distpen + timepen\n\treturn fitness\n\"\"\"\n\n#-------------------------------------------------------------------------------\n# calculate fitness of one genome \n# tested\n# remark: modified to be two dimensional\n#-------------------------------------------------------------------------------\ndef lensing_calc_fitness(chrom):\n\n\tsx = refparms[0]\n\tsy = refparms[1]\n\tzs = genome[0]\n\tzl = genome[1]\n\t\n\t# return big fitness value if parameters are\n\t# physically invalid\n\tif zl >= zs:\n\t\treturn 100.0\n\n\t# for the lensing, the stdout has to be redirected because glass\n\t# calls print at some points\n\told = sys.stdout\t\t\t\n\tnew = sys.stderr\n\tsys.stdout = new\n\n\t# create the reference data if there is none yet\n\tif not reference_exists:\n\t\t#sys.stderr.write(\"creating reference\\n\") \n\t\tcreate_reference()\n\t \t \n\t# generate the image\n\tret = lensing(sx, sy, zs, zl)\n\tres = extract(ret)\t\t\t# the phenotype\n\n\t#print >>sys.stderr, \"ref: \", reference\n\t#print >>sys.stderr, \"res: \", res\n\t\n\n\t# redirect stdout back to where it was at beginning of the function call\n\tsys.stdout = old\n\n\t# find distance between points of phenotype and the reference\n\tdistpen = 0.0\t\n\tfor i in range(len(reference)):\n\t\tif not(i in range(len(res))):\t\t# break if number of ref-points exceeds number of current points\n\t\t\tbreak\n\t\td = (reference[i][0]-res[i][0])*(reference[i][0]-res[i][0]) + (reference[i][1]-res[i][1])*(reference[i][1]-res[i][1]) \n\t\td = sqrt(d)\n\t\t#print str(reference[i][0]), \" \", str(reference[i][1]), \" \", str(res[i][0]), \" \", str(res[i][1]), \" \", d \n\t\tdistpen += (d/2.2454)\n\n\t# find temporal distance between points of phenotype and the reference\n\ttimepen = 0.0\t\n\tfor i in range(len(reference)):\n\t\tif not(i in range(len(res))):\t\t# break if number of ref-points exceeds number of current points\n\t\t\tbreak\n\t\td = (reference[i][3]-res[i][3])*(reference[i][3]-res[i][3])\n\t\td = sqrt(d)\n\t\ttimepen += (d/7.8728)\n\n\t# penalty for every two compared points with different point type\n\ttypepen = 0.0\t\n\t\"\"\"\n\tfor i in range(len(reference)):\t\n\t\tif not(i in range(len(res))):\n\t\t\tbreak\n\t\tt1 = reference[i][2]\n\t\tt2 = res[i][2]\n\t\tif(not(t1==t2)):\n\t\t\ttypepen += 0.5\n\t\"\"\"\n\n\t# from itertools import izip\n\t#for r1,r2 in izip(reference,res):\n\t#\tt1 = r1[2]\n\t#\tt2 = r2[2]\n\t#\tif t1 != t2:\n\t#\t\ttypepen += 0.5\n\t\t\n\t# penalty for missing/extra points in current phenotype\n\tnum = abs(len(reference)-len(res))\n\tnumpen = num \n\tnumpen /= 4.0\n\t\t\n\t# add up the penalties\t\n\tfitness = numpen + typepen + distpen + timepen\n\treturn fitness\n\t\n\n#-------------------------------------------------------------------------------\n# calculate fitness of one chromosome \n# tested\n#-------------------------------------------------------------------------------\ndef lensing_calc_fitness_restricted(chrom, numfit=True, typefit=True, distfit=True, timefit=True):\n\n\tsx = chrom[0]\n\tsy = chrom[1]\n\tzs = chrom[2]\n\tzl = chrom[3]\n\t#zs = 1.5\n\t#zl = 0.5\n\n\t# return big fitness value if parameters are\n\t# physically invalid\n\tif zl >= zs:\n\t\treturn 1.0\n\n\t# create the reference data if there is none yet\n\tif not reference_exists:\n\t\tsys.stdout.write(\"creating reference\\n\") \n\t\tcreate_reference()\n\n\t# construct phenotype (i.e. the image)\n\tret = lensing(sx, sy, zs, zl)\t\n\t#print sx, \" \", sy, \" \", zs, \" \", zl\n\tres = extract(ret)\t\t\t# the phenotype\n\n\t# find spatial distance between points of phenotype and the reference\n\tdistpen = 0.0\t\n\tfor i in range(len(reference)):\n\t\tif not(i in range(len(res))):\t\t# break if number of ref-points exceeds number of current points\n\t\t\tbreak\n\t\td = (reference[i][0]-res[i][0])*(reference[i][0]-res[i][0]) + (reference[i][1]-res[i][1])*(reference[i][1]-res[i][1]) \n\t\td = sqrt(d)\n\t\tdistpen += d\t\n\n\t# find temporal distance between points of phenotype and the reference\n\ttimepen = 0.0\t\n\tfor i in range(len(reference)):\n\t\tif not(i in range(len(res))):\t\t# break if number of ref-points exceeds number of current points\n\t\t\tbreak\n\t\td = (reference[i][3]-res[i][3])*(reference[i][3]-res[i][3])\n\t\td = sqrt(d)\n\t\ttimepen += d\t\n\n\t# penalty for every two compared points with different point type\n\ttypepen = 0.0\t\n\t\"\"\"\n\tfor i in range(len(reference)):\t\n\t\tif not(i in range(len(res))):\n\t\t\tbreak\n\t\tt1 = reference[i][2]\n\t\tt2 = res[i][2]\n\t\tif(not(t1==t2)):\n\t\t\ttypepen += 0.5\n\t\"\"\"\t\n\n\t# penalty for missing/extra points in current phenotype\n\tnum = abs(len(reference)-len(res))\n\tnumpen = 1.0*num \n\n\t# add up the penalties\t\n\tfitness = 0\n\tif(numfit):\n\t\tfitness += numpen\n\tif(typefit):\n\t\tfitness += typepen\n\tif(distfit):\n\t\tfitness += distpen\n\tif(timefit):\n\t\tfitness += timepen\n\treturn fitness\n\n#-------------------------------------------------------------------------------\n# fill the reference variable with data to which the genetic algorithm should converge\n#-------------------------------------------------------------------------------\ndef create_reference():\n\tglobal reference\n\tglobal reference_exists\n\treference = lensing(refparms[0], refparms[1], refparms[2], refparms[3])\n\treference = extract(reference)\n\treference_exists = True\n\n#-------------------------------------------------------------------------------\n# convert data obtained from lensing() into an appropriate format\n#-------------------------------------------------------------------------------\ndef extract(data):\n\tres = []\n\n\t# maybe there is no data at all\n\tif data is None:\n\t\treturn res\n\t\n\t#first elm contains no delay, so extract it separately\n\tx = data[0][0].real\n\ty = data[0][0].imag\n\tpar = data[0][1]\n\tdelay = 0.0\n\tres.append([x,y,par,delay])\n\n\t# extract rest of the image list\n\tfor i in range(1, len(data)): \n\t\tx = data[i][0].real\n\t\ty = data[i][0].imag\n\t\tpar = data[i][1]\n\t\tdelay = data[i][2]\n\t\tres.append([x,y,par,delay])\n\n\treturn res\n\n#-------------------------------------------------------------------------------\n# driver\n# (for testing)\n#-------------------------------------------------------------------------------\n#compute reference data\n#ret = get_reference()\n#reference = extract(ret)\n#plot_reference()\n#lensing_cleanup()\n","repo_name":"perezite/cheesy","sub_path":"gagl/scripts/sampler/lensing.py","file_name":"lensing.py","file_ext":"py","file_size_in_byte":11672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22627368306","text":"#sbatch -a 0-3 multiple_complete_retrieval.slurm\n#sbatch -a 0-5 multiple_complete_retrieval.slurm\nimport torch\nfrom torch.autograd import Variable \nimport torch.nn as nn\nimport torch.nn.functional as F \nimport torch.optim as optim\nfrom torch.optim.lr_scheduler import * \nimport time\nimport os\nimport numpy as np\n# from model import build_r3d_classifier, build_r3d_backbone, build_r3d_original\n# from i3d import InceptionI3d\nfrom model import *\nimport parameters_BL as params\nimport config as cfg\n# from DL_ishanv3 import *\nfrom dl_ret import *\nimport sys, traceback\nfrom sklearn.metrics import precision_recall_fscore_support, average_precision_score\nfrom tensorboardX import SummaryWriter\nimport cv2\nfrom torch.utils.data import DataLoader\nimport math\nimport argparse\nimport itertools\nimport pickle\n# from keras.utils import to_categorical\n\n# if torch.cuda.is_available(): \n# torch.set_default_tensor_type('torch.cuda.FloatTensor')\n\n\ndef val_epoch(clips_per_vid, run_id, epoch,mode, skip, hflip, cropping_fac, pred_dict,label_dict, data_loader, model, criterion, writer, use_cuda):\n print(f'validation at epoch {epoch} - mode {mode} - skip {skip} - hflip {hflip} - cropping_fac {cropping_fac}')\n \n model.eval()\n\n losses = []\n predictions, ground_truth = [], []\n vid_paths = []\n\n for i, (inputs, label, vid_path,_) in enumerate(data_loader):\n vid_paths.extend(vid_path)\n ground_truth.extend(label)\n # inputs = inputs.permute(0,4,1,2,3)\n if len(inputs.shape) != 1:\n\n inputs = inputs.permute(0, 2, 1, 3, 4)\n\n # print(label)\n if use_cuda:\n inputs = inputs.cuda()\n label = torch.from_numpy(np.asarray(label)).cuda()\n # print(label)\n\n \n with torch.no_grad():\n\n output = model(inputs)\n # print(output.shape)\n # exit()\n output = output.squeeze(3)\n output = output.squeeze(3)\n\n predictions.extend(output.cpu())\n # print(len(predictions))\n\n\n if i+1 % 45 == 0:\n print(f'{i} batches are done')\n # print(\"Validation Epoch \", epoch , \"mode\", mode, \"skip\", skip, \"hflip\", hflip, \" Batch \", i, \"- Loss : \", np.mean(losses))\n \n del inputs, output, label\n # ground_truth = np.asarray(ground_truth)\n # pred_array = np.flip(np.argsort(predictions,axis=1),axis=1) # Prediction with the most confidence is the first element here\n # c_pred = pred_array[:,0] #np.argmax(predictions,axis=1).reshape(len(predictions))\n\n for entry in range(len(vid_paths)):\n if str(vid_paths[entry].split('/')[-1]) not in pred_dict.keys():\n pred_dict[str(vid_paths[entry].split('/')[-1])] = []\n pred_dict[str(vid_paths[entry].split('/')[-1])] = (1/clips_per_vid)*predictions[entry].view(-1)\n\n else:\n # print('yes')\n pred_dict[str(vid_paths[entry].split('/')[-1])]+= (1/clips_per_vid)*predictions[entry].view(-1)\n\n for entry in range(len(vid_paths)):\n if str(vid_paths[entry].split('/')[-1]) not in label_dict.keys():\n label_dict[str(vid_paths[entry].split('/')[-1])]= ground_truth[entry]\n\n print_pred_array = []\n\n # for entry in range(pred_array.shape[0]):\n # temp = ''\n # for i in range(5):\n # temp += str(int(pred_array[entry][i]))+' '\n # print_pred_array.append(temp)\n # print(f'check {print_pred_array[0]}')\n # results = open('Submission1.txt','w')\n # for entry in range(len(vid_paths)):\n # content = str(vid_paths[entry].split('/')[-1] + ' ' + print_pred_array[entry])[:-1]+'\\n'\n # results.write(content)\n # results.close()\n \n # correct_count = np.sum(c_pred==ground_truth)\n # accuracy = float(correct_count)/len(c_pred)\n \n # print(f'Correct Count is {correct_count}')\n # print(f'Epoch {epoch}, mode {mode}, skip {skip}, hflip {hflip}, cropping_fac {cropping_fac}, Accuracy: {accuracy*100 :.3f}')\n return pred_dict, label_dict\n \ndef train_classifier(run_id, arch, m_file_name, num_modes):\n use_cuda = True\n best_score = 0\n writer = SummaryWriter(os.path.join(cfg.logs, str(run_id)))\n\n save_dir = os.path.join(cfg.saved_models_dir, run_id)\n # if not os.path.exists(save_dir):\n # os.makedirs(save_dir)\n\n # m_file_name = '/home/c3-0/ishan/ss_saved_models/r3d31_2c_full_e143_32f/model_221_bestAcc_0.7254_F1_0.7173.pth'\n # m_file_name = '/home/idave/ss_saved_models/r3d65/model_best_e234_loss_12.264.pth'\n # m_file_name = '/home/idave/ss_saved_models/r3d62/model_best_e104_loss_68.780.pth'\n # m_file_name = '/home/idave/ss_saved_models/r3d75/model_best_e157_loss_11.327.pth'\n if len(saved_model) ==0:\n print('It`s from scratch')\n model = build_r3d_encoder_ret(self_pretrained = False, num_classes = params.num_classes)\n\n else:\n if arch =='r3d18':\n model = build_r3d_encoder_ret(saved_model_file = m_file_name, num_classes = params.num_classes)\n if arch =='rpd18':\n model = build_rpd_encoder_ret(saved_model_file = m_file_name, num_classes = params.num_classes)\n \n\n \n criterion= nn.CrossEntropyLoss()\n\n if torch.cuda.device_count()>1:\n print(f'Multiple GPUS found!')\n model=nn.DataParallel(model)\n model.cuda()\n criterion.cuda()\n else:\n print('Only 1 GPU is available')\n model.cuda()\n criterion.cuda()\n \n if num_modes:\n # modes = num_modes\n modes = list(range(num_modes))\n\n else:\n modes = list(range(params.num_modes))\n print(f'Num modes {len(modes)}')\n\n skip = list(range(1,params.num_skips+1))\n hflip = params.hflip #list(range(2))\n cropping_fac1 = params.cropping_fac1\n print(f'Num skips {skip}')\n print(f'Cropping fac {cropping_fac1}')\n modes, skip,hflip, cropping_fac = list(zip(*itertools.product(modes,skip,hflip,cropping_fac1)))\n \n\n\n print(f'There will be total {len(modes)} iterations')\n for epoch in range(1):\n \n print(f'Epoch {epoch} started')\n start=time.time()\n \n pred_dict = {}\n label_dict = {}\n val_losses =[]\n if not os.path.exists('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_val_pred_dict.pkl'):\n\n for val_iter in range(len(modes)):\n # if (modes[val_iter] == 0 and skip[val_iter] ==1) or (modes[val_iter] == 2 and skip[val_iter] ==1):\n # continue\n # try:\n # break\n validation_dataset = multi_baseline_dataloader_val_strong(shuffle = True, data_percentage = params.data_percentage,\\\n mode = modes[val_iter], skip = skip[val_iter], hflip= hflip[val_iter], \\\n cropping_factor= cropping_fac[val_iter])\n validation_dataloader = DataLoader(validation_dataset, batch_size=params.v_batch_size, shuffle=True, num_workers=params.num_workers, collate_fn=collate_fn2)\n if val_iter ==0:\n print(f'Validation dataset length: {len(validation_dataset)}')\n print(f'Validation dataset steps per epoch: {len(validation_dataset)/params.v_batch_size}') \n \n pred_dict, label_dict = val_epoch(len(modes), run_id, epoch,modes[val_iter],skip[val_iter],hflip[val_iter],cropping_fac[val_iter], \\\n pred_dict, label_dict, validation_dataloader, model, criterion, writer, use_cuda)\n \n os.makedirs('./'+str(run_id) + '_retrieval')\n pickle.dump(pred_dict, open('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_val_pred_dict.pkl','wb'))\n pickle.dump(label_dict, open('./'+str(run_id) + '_retrieval/'+str(run_id)+'_val_label_dict.pkl','wb'))\n val_features, val_labels = pred_dict, label_dict\n else:\n val_features = pickle.load(open('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_val_pred_dict.pkl','rb'))\n val_labels = pickle.load(open('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_val_label_dict.pkl','rb'))\n pred_dict, label_dict ={}, {}\n if not os.path.exists('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_train_pred_dict.pkl'):\n\n for val_iter in range(len(modes)):\n # if (modes[val_iter] == 0 and skip[val_iter] ==1) or (modes[val_iter] == 2 and skip[val_iter] ==1):\n # continue\n # try:\n train_dataset = multi_baseline_dataloader_train_strong(shuffle = True, data_percentage = params.data_percentage,\\\n mode = modes[val_iter], skip = skip[val_iter], hflip= hflip[val_iter], \\\n cropping_factor= cropping_fac[val_iter])\n train_dataloader = DataLoader(train_dataset, batch_size=params.v_batch_size, shuffle=True, num_workers=params.num_workers, collate_fn=collate_fn2)\n if val_iter ==0:\n print(f'train dataset length: {len(train_dataset)}')\n print(f'train dataset steps per epoch: {len(train_dataset)/params.v_batch_size}') \n \n pred_dict, label_dict = val_epoch(len(modes), run_id, epoch,modes[val_iter],skip[val_iter],hflip[val_iter],cropping_fac[val_iter], \\\n pred_dict, label_dict, train_dataloader, model, criterion, writer, use_cuda)\n pickle.dump(pred_dict, open('./'+str(run_id) + '_retrieval/'+str(run_id)+'_train_pred_dict.pkl','wb'))\n pickle.dump(label_dict, open('./'+str(run_id) + '_retrieval/'+str(run_id)+'_train_label_dict.pkl','wb'))\n train_features, train_labels = pred_dict, label_dict\n\n else:\n train_features = pickle.load(open('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_train_pred_dict.pkl','rb'))\n train_labels = pickle.load(open('./'+str(run_id) + '_retrieval/'+str(run_id)+ '_train_label_dict.pkl','rb'))\n\n train_feat = []\n train_label = []\n for i in (list(train_features.keys())):\n train_feat.append(train_features[i])\n train_label.append(train_labels[i])\n\n val_feat = []\n val_label = []\n for i in (list(val_features.keys())):\n val_feat.append(val_features[i])\n val_label.append(val_labels[i])\n\n train_feat = torch.stack(train_feat,dim=0) \n train_label = np.asarray(train_label)\n val_feat = torch.stack(val_feat,dim=0)\n val_label = np.asarray(val_label)\n\n # print(len(train_label))\n # print(len(val_label))\n # print(train_feat.shape)\n # print(val_feat.shape)\n\n #train features has size of 9537 x 4096\n #val feature has size of 3792 x 4096\n\n train_feat /= train_feat.norm(dim = 1)[:,None]\n val_feat /= val_feat.norm(dim = 1)[:,None]\n\n\n similarity_matrix = torch.mm(train_feat.cuda(), val_feat.cuda().T).cpu().numpy()\n # print(similarity_matrix.shape)\n # similarity matrix has shape of 9537 x 3783\n\n sorted_column_args = np.argsort(similarity_matrix,axis =0)\n top1_correct = 0\n top5_correct = 0\n top10_correct = 0\n top20_correct = 0\n\n for i in range(similarity_matrix.shape[1]):\n top_label = train_label[sorted_column_args[:,i]]\n if val_label[i] == top_label[-1]:\n top1_correct += 1\n if val_label[i] in top_label[-5:]:\n top5_correct += 1\n if val_label[i] in top_label[-10:]:\n top10_correct += 1\n if val_label[i] in top_label[-20:]:\n top20_correct += 1\n print(f'Top-1 correct is {top1_correct/similarity_matrix.shape[1]*100 :.2f}%')\n print(f'Top-5 correct is {top5_correct/similarity_matrix.shape[1]*100 :.2f}%')\n print(f'Top-10 correct is {top10_correct/similarity_matrix.shape[1]*100 :.2f}%')\n print(f'Top-20 correct is {top20_correct/similarity_matrix.shape[1]*100 :.2f}%')\n\n print(f'{top1_correct/similarity_matrix.shape[1]*100 :.2f}, {top5_correct/similarity_matrix.shape[1]*100 :.2f}, {top10_correct/similarity_matrix.shape[1]*100 :.2f}, {top20_correct/similarity_matrix.shape[1]*100 :.2f}')\n f = open('./'+str(run_id) + '_retrieval/'+str(run_id)+'_results.txt','w')\n f.writelines('Top-1, Top-5, Top-10, Top-20 \\n')\n f.writelines(str(top1_correct/similarity_matrix.shape[1]*100)+', ' + str(top5_correct/similarity_matrix.shape[1]*100)+', ' +str(top10_correct/similarity_matrix.shape[1]*100)+', ' + str(top20_correct/similarity_matrix.shape[1]*100))\n f.close()\n # val_losses.append(loss)\n\n # predictions1 = np.zeros((len(list(pred_dict.keys())),102))\n # ground_truth1 = []\n # entry = 0\n # for key in pred_dict.keys():\n # predictions1[entry] = np.mean(pred_dict[key], axis =0)\n # entry+=1\n\n # for key in label_dict.keys():\n # ground_truth1.append(label_dict[key])\n \n # pred_array1 = np.flip(np.argsort(predictions1,axis=1),axis=1) # Prediction with the most confidence is the first element here\n # c_pred1 = pred_array1[:,0]\n\n # correct_count1 = np.sum(c_pred1==ground_truth1)\n # accuracy11 = float(correct_count1)/len(c_pred1)\n\n \n # print(f'Running Avg Accuracy is for epoch {epoch}, mode {modes[val_iter]}, skip {skip[val_iter]}, hflip {hflip[val_iter]}, cropping_fac {cropping_fac[val_iter]} is {accuracy11*100 :.3f}% ') \n # except:\n # print(f'Failed epoch {epoch}, mode {modes[val_iter]}, skip {skip[val_iter]}, hflip {hflip[val_iter]}, cropping_fac {cropping_fac[val_iter]} is {accuracy11*100 :.3f}% ') \n\n\n # val_loss = np.mean(val_losses)\n # predictions = np.zeros((len(list(pred_dict.keys())),102))\n # ground_truth = []\n # entry = 0\n # for key in pred_dict.keys():\n # predictions[entry] = np.mean(pred_dict[key], axis =0)\n # entry+=1\n\n # for key in label_dict.keys():\n # ground_truth.append(label_dict[key])\n \n # pred_array = np.flip(np.argsort(predictions,axis=1),axis=1) # Prediction with the most confidence is the first element here\n # c_pred = pred_array[:,0]\n\n # correct_count = np.sum(c_pred==ground_truth)\n # accuracy1 = float(correct_count)/len(c_pred)\n\n # writer.add_scalar('Validation Loss', np.mean(val_loss), epoch)\n # writer.add_scalar('Validation Accuracy', np.mean(accuracy1), epoch)\n \n # print(f'Overall Accuracy is for epoch {epoch} is {accuracy1*100 :.3f}% ')\n # file_name = f'RunID_{run_id}_Acc_{accuracy1*100 :.3f}_cf_{len(cropping_fac1)}_m_{params.num_modes}_s_{params.num_skips}.pkl' \n # pickle.dump(pred_dict, open(file_name,'wb'))\n\n \n taken = time.time()-start\n print(f'Time taken for Epoch-{epoch} is {taken}')\n print()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Script to train baseline')\n\n parser.add_argument(\"--run_id\", dest='run_id', type=str, required=False, default= \"random\",\n help='run_id')\n parser.add_argument(\"--saved_model\", dest='saved_model', type=str, required=False, default= \"\",\n help='saved_model')\n parser.add_argument(\"--arch\", dest='arch', type=str, required=False, default= \"r3d18\",\n help='run_id')\n parser.add_argument(\"--modes\", dest='modes', type=int, required=False, default= 0,\n help='modes')\n args = parser.parse_args()\n run_id = args.run_id\n modes = args.modes\n arch = args.arch\n\n saved_model = args. saved_model\n train_classifier(str(run_id), arch, str(saved_model),modes)\n\n\n\n \n\n\n","repo_name":"DAVEISHAN/TCLR","sub_path":"nn_retrieval/complete_retrieval.py","file_name":"complete_retrieval.py","file_ext":"py","file_size_in_byte":16081,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"12"} +{"seq_id":"70716729302","text":"import os\nfrom ..utils import conf\n\n\ndef get_local_fields(model, source_fields):\n \"\"\"\n Get the model's local fields as a dict depending on the type of model\n library in use\n \"\"\"\n local_fields = None\n\n # MONGOENGINE\n if hasattr(model, '_fields') and isinstance(model._fields, dict):\n local_fields = dict((field_name, field)\n for field_name, field in model._fields.iteritems()\n if field_name in source_fields)\n\n return local_fields\n\n\ndef get_image(field):\n \"\"\"\n Get a file like object containing the content of the field's image\n \"\"\"\n def handle_string(string):\n # The field might be a string representing the path to the image\n # or it may be an external url\n\n if string.startswith('http'):\n return get_http_asset(string)\n else:\n file_path = os.path.join(conf.MEDIA_ROOT, conf.BASE_PREFIX, string)\n if file_path.startswith('http'):\n return get_http_asset(file_path)\n file = open(file_path, 'rb')\n if file:\n return file\n else:\n raise Exception(\"Could not open a valid file for path: %s\" % file_path)\n\n # Embedded documents should have a way of representing themselves in a way we can use\n # We offer them this ability through a method \"to_imagekit\"\n if hasattr(field, 'to_imagekit'):\n string = field.to_imagekit()\n return handle_string(string)\n elif hasattr(field, 'seek') and hasattr(field, 'read'):\n # The field itself can be treated as a file like object, just return it\n return field\n elif isinstance(field, basestring):\n return handle_string(field)\n\n raise Exception(\"Could not determine a way to extract data from the supplied field: %s\" % field)\n\n\ndef get_http_asset(path):\n import requests\n import shutil\n from StringIO import StringIO\n\n file = StringIO()\n r = requests.get(path, stream=True)\n if r.status_code == 200:\n shutil.copyfileobj(r.raw, file)\n return file\n else:\n raise Exception(\"Http response: %s, trying to get file %s\" % (r.status_code, path))\n","repo_name":"cj-dimaggio/flask-imagekit","sub_path":"flask_imagekit/model_helpers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21581020003","text":"import os\nimport logging\nimport base64\nimport msal\nimport datetime\nimport requests\nimport sys\nimport json\nimport ast\n\n\nfrom dotenv import load_dotenv\nfrom datetime import datetime\nfrom requests.adapters import HTTPAdapter\nfrom urllib3 import Retry\nfrom urllib.parse import quote_plus\n\n\ndef set_current_directory():\n logging.info('Setting current directory')\n\n os.chdir(os.getcwd())\n\n\ndef start_logging():\n global process_name\n\n # Get File Name of existing script\n process_name = os.path.basename(__file__).replace('.py', '').replace(' ', '_')\n\n logging.basicConfig(filename=f'Logs/{process_name}.log', format='%(asctime)s %(message)s', filemode='w',\n level=logging.DEBUG)\n\n # Printing the output to file for debugging\n logging.info('Starting the Script')\n\n\ndef get_env_variables():\n logging.info('Setting Environment variables')\n\n global REMOTE_SERVER_IP, REMOTE_SERVER_USER, REMOTE_SERVER_PASS, O_CLIENT_ID, \\\n CLIENT_SECRET, TENANT_ID, FROM, SEND_TO, CC_TO, ERROR_EMAILS_TO, DB_IP, DB_NAME, \\\n DB_USERNAME, DB_PASSWORD, AUTH_CODE, REDIRECT_URL, CLIENT_ID, RE_API_KEY\n\n load_dotenv()\n\n REMOTE_SERVER_IP = os.getenv('REMOTE_SERVER_IP')\n REMOTE_SERVER_USER = os.getenv('REMOTE_SERVER_USER')\n REMOTE_SERVER_PASS = os.getenv('REMOTE_SERVER_PASS')\n O_CLIENT_ID = os.getenv('O_CLIENT_ID')\n CLIENT_SECRET = os.getenv('CLIENT_SECRET')\n TENANT_ID = os.getenv('TENANT_ID')\n FROM = os.getenv('FROM')\n SEND_TO = eval(os.getenv('SEND_TO'))\n CC_TO = eval(os.getenv('CC_TO'))\n ERROR_EMAILS_TO = eval(os.getenv('ERROR_EMAILS_TO'))\n DB_IP = os.getenv(\"DB_IP\")\n DB_NAME = os.getenv(\"DB_NAME\")\n DB_USERNAME = os.getenv(\"DB_USERNAME\")\n DB_PASSWORD = quote_plus(os.getenv(\"DB_PASSWORD\"))\n AUTH_CODE = os.getenv(\"AUTH_CODE\")\n REDIRECT_URL = os.getenv(\"REDIRECT_URL\")\n CLIENT_ID = os.getenv(\"CLIENT_ID\")\n RE_API_KEY = os.getenv(\"RE_API_KEY\")\n\n\ndef send_error_emails(subject, Argument):\n logging.info('Sending email for an error')\n\n authority = f'https://login.microsoftonline.com/{TENANT_ID}'\n\n app = msal.ConfidentialClientApplication(\n client_id=O_CLIENT_ID,\n client_credential=CLIENT_SECRET,\n authority=authority\n )\n\n scopes = [\"https://graph.microsoft.com/.default\"]\n\n result = None\n result = app.acquire_token_silent(scopes, account=None)\n\n if not result:\n result = app.acquire_token_for_client(scopes=scopes)\n\n template = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
 \n

 Raiser's Edge Automation: {job_name} Failed 

\n
 
 This is to notify you that execution of Auto-updating Alumni records has failed. 
 
\n

Job details:

\n
\n \n \n \n \n \n \n \n \n \n \n \n
 Job :  {job_name} 
 Failed on :  {current_time} 
\n
 
Below is the detailed error log,
{error_log_message}
\n \"\"\"\n\n # Create a text/html message from a rendered template\n email_body = template.format(\n job_name=subject,\n current_time=datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\"),\n error_log_message=Argument\n )\n\n # Set up attachment data\n with open(f'Logs/{process_name}.log', 'rb') as f:\n attachment_content = f.read()\n attachment_content = base64.b64encode(attachment_content).decode('utf-8')\n\n if \"access_token\" in result:\n\n endpoint = f'https://graph.microsoft.com/v1.0/users/{FROM}/sendMail'\n\n email_msg = {\n 'Message': {\n 'Subject': subject,\n 'Body': {\n 'ContentType': 'HTML',\n 'Content': email_body\n },\n 'ToRecipients': get_recipients(ERROR_EMAILS_TO),\n 'Attachments': [\n {\n '@odata.type': '#microsoft.graph.fileAttachment',\n 'name': 'Process.log',\n 'contentBytes': attachment_content\n }\n ]\n },\n 'SaveToSentItems': 'true'\n }\n\n requests.post(\n endpoint,\n headers={\n 'Authorization': 'Bearer ' + result['access_token']\n },\n json=email_msg\n )\n\n else:\n logging.info(result.get('error'))\n logging.info(result.get('error_description'))\n logging.info(result.get('correlation_id'))\n\n\ndef set_api_request_strategy():\n logging.info('Setting API Request strategy')\n\n global http\n\n retry_strategy = Retry(\n total=3,\n status_forcelist=[429, 500, 502, 503, 504],\n allowed_methods=['HEAD', 'GET', 'OPTIONS'],\n backoff_factor=10\n )\n\n adapter = HTTPAdapter(max_retries=retry_strategy)\n http = requests.Session()\n http.mount('https://', adapter)\n http.mount('http://', adapter)\n\n\ndef get_recipients(email_list):\n value = []\n\n for email in email_list:\n email = {\n 'emailAddress': {\n 'address': email\n }\n }\n\n value.append(email)\n\n return value\n\n\ndef del_request_re(url):\n logging.info('Running DEL Request from RE function')\n\n # Request Headers for Blackbaud API request\n headers = {\n # Request headers\n 'Bb-Api-Subscription-Key': RE_API_KEY,\n 'Authorization': 'Bearer ' + retrieve_token(),\n }\n\n http.delete(url, headers=headers)\n\n\ndef retrieve_token():\n logging.info('Retrieve token for API connections')\n\n with open('access_token_output.json') as access_token_output:\n data = json.load(access_token_output)\n access_token = data['access_token']\n\n return access_token\n\n\ndef stop_logging():\n logging.info('Stopping the Script')\n\ntry:\n\n # Start Logging for Debugging\n start_logging()\n\n # Set current directory\n set_current_directory()\n\n # Load Environment Variables\n get_env_variables()\n\n # Set API Request strategy\n set_api_request_strategy()\n\n # Get Gift IDs to delete\n gifts_to_delete = ast.literal_eval(input('Enter the Gift IDs to delete: ').replace(' ', ''))\n\n for each_gift in gifts_to_delete:\n del_request_re(f'https://api.sky.blackbaud.com/gift/v1/gifts/{each_gift}')\n\nexcept Exception as Argument:\n\n logging.error(Argument)\n\n send_error_emails('Deleting Donation in Raisers Edge', Argument)\n\nfinally:\n\n stop_logging()\n\n sys.exit()\n","repo_name":"khamsakamal48/Donation_to_Raisers_Edge_v2","sub_path":"Delete Gifts.py","file_name":"Delete Gifts.py","file_ext":"py","file_size_in_byte":8883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12799638915","text":"import pickle\r\nimport random\r\nfrom PIL import Image\r\nimport numpy as np\r\n\r\n\r\ndef unpickle(file):\r\n with open(file, 'rb') as fo:\r\n d = pickle.load(fo, encoding='bytes')\r\n return d\r\n\r\n\r\ndef show_image_from_vector(vec):\r\n rgb = [vec[:1024], vec[1024:2048], vec[2048:]]\r\n img_arr = []\r\n for color in rgb:\r\n color_arr = []\r\n for start_ind in range(0, 1024, 32):\r\n row = color[start_ind:start_ind+32]\r\n color_arr.append(row)\r\n img_arr.append(color_arr)\r\n img_arr = np.array(img_arr)\r\n img_arr = np.transpose(img_arr)\r\n print(img_arr.shape)\r\n Image.fromarray(img_arr).show()\r\n\r\n\r\nbatch_file = './cifar-10-batches-py/data_batch_{}'\r\ntrain_data_file = \"./data/train.pkl\"\r\ntest_data_file = \"./data/test.pkl\"\r\n\r\nchoose_for_batch = 1000\r\noriginal_batch_size = 10000\r\nall_data = []\r\nall_labels = []\r\n\r\n\r\nfor i in range(1,6):\r\n file_name = batch_file.format(i)\r\n res = unpickle(file_name)\r\n data = res[b\"data\"]\r\n labels = res[b\"labels\"]\r\n indices = random.sample([i for i in range(original_batch_size)], choose_for_batch)\r\n\r\n for ind in indices:\r\n img = data[ind]/255\r\n all_data.append(img)\r\n all_labels.append(labels[ind])\r\n\r\n\r\nall_data = np.array(all_data)\r\nwith open(train_data_file, \"wb\") as f:\r\n pickle.dump(dict(data=all_data, labels=all_labels), f)\r\n\r\n\r\ntest_data = []\r\ntest_labels = []\r\n\r\nres = unpickle('./cifar-10-batches-py/test_batch')\r\ndata = res[b\"data\"]\r\nlabels = res[b\"labels\"]\r\nindices = random.sample([i for i in range(original_batch_size)], choose_for_batch)\r\n\r\nfor ind in indices:\r\n img = data[ind]/255\r\n test_data.append(img)\r\n test_labels.append(labels[ind])\r\n\r\ntest_data = np.array(test_data)\r\nwith open(test_data_file, \"wb\") as f:\r\n pickle.dump(dict(data=test_data, labels=test_labels), f)\r\n\r\n","repo_name":"AmitElyasi/supervised-deep-learning-practices","sub_path":"sample_dataset.py","file_name":"sample_dataset.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"22088268009","text":"import sys\n\ndef _ror(val, carry):\n\tnext_carry\t= bool(val & 1)\n\tval\t\t\t= (val >> 1)\n\tif carry:\n\t\tval |= 0x80\n\treturn val, next_carry\n\ndef random_init():\n\treturn [ 0xA5 ] + ([ 0 ] * 6)\n\ndef random_advance(seed):\n\tcarry = bool((seed[0] & 0x02) ^ (seed[1] & 0x02))\n\n\tfor i in range(0, len(seed)):\n\t\tseed[i], carry = _ror(seed[i], carry)\n\n\treturn seed\n\nfind = [ ]\nseed = random_init()\ntotal = 0\n\n#while True:\nfor i in range(0, 10000):\n\tif 0 == (i % 3200):\n\t\tprint('quick_resume_%d:' % (int(i / 100)))\n\tif 0 == (i % 100):\n\t\tprint('\\t.db '\n\t\t\t+ ', '.join([ '$%02X' % it for it in seed + [0] ])\n\t\t\t+ ' ; Base for %d' % (i))\n\t\n\tfor x in range(0, 21):\n\t\tseed = random_advance(seed)\n\t\t#if seed in find:\n\t\t#\tprint('[%d] Found block!' % (i))\n\t\t#\tprint('#' * 60)\n\ttotal += 1\n","repo_name":"soconnor71/smb_v2","sub_path":"resumetables.py","file_name":"resumetables.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"16879685563","text":"import glob\nimport logging\nimport os\nimport re\nimport sys\nfrom textwrap import dedent\nfrom typing import List, Optional, Sequence\n\nfrom cloudinit import ssh_util, subp, util\nfrom cloudinit.cloud import Cloud\nfrom cloudinit.config import Config\nfrom cloudinit.config.schema import MetaSchema, get_meta_doc\nfrom cloudinit.distros import ALL_DISTROS, ug_util\nfrom cloudinit.settings import PER_INSTANCE\n\nMODULE_DESCRIPTION = \"\"\"\\\nThis module handles most configuration for SSH and both host and authorized SSH\nkeys.\n\n**Authorized keys**\n\nAuthorized keys are a list of public SSH keys that are allowed to connect to\na user account on a system. They are stored in `.ssh/authorized_keys` in that\naccount's home directory. Authorized keys for the default user defined in\n``users`` can be specified using ``ssh_authorized_keys``. Keys\nshould be specified as a list of public keys.\n\n.. note::\n See the ``cc_set_passwords`` module documentation to enable/disable SSH\n password authentication.\n\nRoot login can be enabled/disabled using the ``disable_root`` config key. Root\nlogin options can be manually specified with ``disable_root_opts``.\n\nSupported public key types for the ``ssh_authorized_keys`` are:\n\n - dsa\n - rsa\n - ecdsa\n - ed25519\n - ecdsa-sha2-nistp256-cert-v01@openssh.com\n - ecdsa-sha2-nistp256\n - ecdsa-sha2-nistp384-cert-v01@openssh.com\n - ecdsa-sha2-nistp384\n - ecdsa-sha2-nistp521-cert-v01@openssh.com\n - ecdsa-sha2-nistp521\n - sk-ecdsa-sha2-nistp256-cert-v01@openssh.com\n - sk-ecdsa-sha2-nistp256@openssh.com\n - sk-ssh-ed25519-cert-v01@openssh.com\n - sk-ssh-ed25519@openssh.com\n - ssh-dss-cert-v01@openssh.com\n - ssh-dss\n - ssh-ed25519-cert-v01@openssh.com\n - ssh-ed25519\n - ssh-rsa-cert-v01@openssh.com\n - ssh-rsa\n - ssh-xmss-cert-v01@openssh.com\n - ssh-xmss@openssh.com\n\n.. note::\n this list has been filtered out from the supported keytypes of\n `OpenSSH`_ source, where the sigonly keys are removed. Please see\n ``ssh_util`` for more information.\n\n ``dsa``, ``rsa``, ``ecdsa`` and ``ed25519`` are added for legacy,\n as they are valid public keys in some old distros. They can possibly\n be removed in the future when support for the older distros are dropped\n\n.. _OpenSSH: https://github.com/openssh/openssh-portable/blob/master/sshkey.c\n\n**Host keys**\n\nHost keys are for authenticating a specific instance. Many images have default\nhost SSH keys, which can be removed using ``ssh_deletekeys``.\n\nHost keys can be added using the ``ssh_keys`` configuration key.\n\nWhen host keys are generated the output of the ssh-keygen command(s) can be\ndisplayed on the console using the ``ssh_quiet_keygen`` configuration key.\n\n.. note::\n When specifying private host keys in cloud-config, care should be taken to\n ensure that the communication between the data source and the instance is\n secure.\n\n\nIf no host keys are specified using ``ssh_keys``, then keys will be generated\nusing ``ssh-keygen``. By default one public/private pair of each supported\nhost key type will be generated. The key types to generate can be specified\nusing the ``ssh_genkeytypes`` config flag, which accepts a list of host key\ntypes to use. For each host key type for which this module has been instructed\nto create a keypair, if a key of the same type is already present on the\nsystem (i.e. if ``ssh_deletekeys`` was false), no key will be generated.\n\nSupported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``\nconfig flags are:\n\n - dsa\n - ecdsa\n - ed25519\n - rsa\n\nUnsupported host key types for the ``ssh_keys`` and the ``ssh_genkeytypes``\nconfig flags are:\n\n - ecdsa-sk\n - ed25519-sk\n\"\"\"\n\n# Note: We do not support *-sk key types because:\n# 1) In the autogeneration case user interaction with the device is needed\n# which does not fit with a cloud-context.\n# 2) This type of keys are user-based, not hostkeys.\n\n\nmeta: MetaSchema = {\n \"id\": \"cc_ssh\",\n \"name\": \"SSH\",\n \"title\": \"Configure SSH and SSH keys\",\n \"description\": MODULE_DESCRIPTION,\n \"distros\": [ALL_DISTROS],\n \"frequency\": PER_INSTANCE,\n \"examples\": [\n dedent(\n \"\"\"\\\n ssh_keys:\n rsa_private: |\n -----BEGIN RSA PRIVATE KEY-----\n MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco\n ...\n -----END RSA PRIVATE KEY-----\n rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...\n rsa_certificate: |\n ssh-rsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...\n dsa_private: |\n -----BEGIN DSA PRIVATE KEY-----\n MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qco\n ...\n -----END DSA PRIVATE KEY-----\n dsa_public: ssh-dsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7Xd ...\n dsa_certificate: |\n ssh-dsa-cert-v01@openssh.com AAAAIHNzaC1lZDI1NTE5LWNlcnQt ...\n ssh_authorized_keys:\n - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUU ...\n - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZ ...\n ssh_deletekeys: true\n ssh_genkeytypes: [rsa, dsa, ecdsa, ed25519]\n disable_root: true\n disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding\n allow_public_ssh_keys: true\n ssh_quiet_keygen: true\n ssh_publish_hostkeys:\n enabled: true\n blacklist: [dsa]\n \"\"\" # noqa: E501\n )\n ],\n \"activate_by_schema_keys\": [],\n}\n\n__doc__ = get_meta_doc(meta)\nLOG = logging.getLogger(__name__)\n\nGENERATE_KEY_NAMES = [\"rsa\", \"dsa\", \"ecdsa\", \"ed25519\"]\nFIPS_UNSUPPORTED_KEY_NAMES = [\"dsa\", \"ed25519\"]\n\npattern_unsupported_config_keys = re.compile(\n \"^(ecdsa-sk|ed25519-sk)_(private|public|certificate)$\"\n)\nKEY_FILE_TPL = \"/etc/ssh/ssh_host_%s_key\"\nPUBLISH_HOST_KEYS = True\n# Don't publish the dsa hostkey by default since OpenSSH recommends not using\n# it.\nHOST_KEY_PUBLISH_BLACKLIST = [\"dsa\"]\n\nCONFIG_KEY_TO_FILE = {}\nPRIV_TO_PUB = {}\nfor k in GENERATE_KEY_NAMES:\n CONFIG_KEY_TO_FILE.update(\n {\n f\"{k}_private\": (KEY_FILE_TPL % k, 0o600),\n f\"{k}_public\": (f\"{KEY_FILE_TPL % k}.pub\", 0o644),\n f\"{k}_certificate\": (f\"{KEY_FILE_TPL % k}-cert.pub\", 0o644),\n }\n )\n PRIV_TO_PUB[f\"{k}_private\"] = f\"{k}_public\"\n\nKEY_GEN_TPL = 'o=$(ssh-keygen -yf \"%s\") && echo \"$o\" root@localhost > \"%s\"'\n\n\ndef handle(name: str, cfg: Config, cloud: Cloud, args: list) -> None:\n\n # remove the static keys from the pristine image\n if cfg.get(\"ssh_deletekeys\", True):\n key_pth = os.path.join(\"/etc/ssh/\", \"ssh_host_*key*\")\n for f in glob.glob(key_pth):\n try:\n util.del_file(f)\n except Exception:\n util.logexc(LOG, \"Failed deleting key file %s\", f)\n\n if \"ssh_keys\" in cfg:\n # if there are keys and/or certificates in cloud-config, use them\n cert_config = []\n for (key, val) in cfg[\"ssh_keys\"].items():\n if key not in CONFIG_KEY_TO_FILE:\n if pattern_unsupported_config_keys.match(key):\n reason = \"unsupported\"\n else:\n reason = \"unrecognized\"\n LOG.warning('Skipping %s ssh_keys entry: \"%s\"', reason, key)\n continue\n tgt_fn = CONFIG_KEY_TO_FILE[key][0]\n tgt_perms = CONFIG_KEY_TO_FILE[key][1]\n util.write_file(tgt_fn, val, tgt_perms)\n # set server to present the most recently identified certificate\n if \"_certificate\" in key:\n cert_config.append((\"HostCertificate\", str(tgt_fn)))\n\n if cert_config:\n ssh_util.append_ssh_config(cert_config)\n\n for private_type, public_type in PRIV_TO_PUB.items():\n if (\n public_type in cfg[\"ssh_keys\"]\n or private_type not in cfg[\"ssh_keys\"]\n ):\n continue\n private_file, public_file = (\n CONFIG_KEY_TO_FILE[private_type][0],\n CONFIG_KEY_TO_FILE[public_type][0],\n )\n cmd = [\"sh\", \"-xc\", KEY_GEN_TPL % (private_file, public_file)]\n try:\n # TODO(harlowja): Is this guard needed?\n with util.SeLinuxGuard(\"/etc/ssh\", recursive=True):\n subp.subp(cmd, capture=False)\n LOG.debug(\n \"Generated a key for %s from %s\", public_file, private_file\n )\n except Exception:\n util.logexc(\n LOG,\n \"Failed generating a key for \"\n f\"{public_file} from {private_file}\",\n )\n else:\n # if not, generate them\n genkeys = util.get_cfg_option_list(\n cfg, \"ssh_genkeytypes\", GENERATE_KEY_NAMES\n )\n # remove keys that are not supported in fips mode if its enabled\n key_names = (\n genkeys\n if not util.fips_enabled()\n else [\n names\n for names in genkeys\n if names not in FIPS_UNSUPPORTED_KEY_NAMES\n ]\n )\n skipped_keys = set(genkeys).difference(key_names)\n if skipped_keys:\n LOG.debug(\n \"skipping keys that are not supported in fips mode: %s\",\n \",\".join(skipped_keys),\n )\n\n lang_c = os.environ.copy()\n lang_c[\"LANG\"] = \"C\"\n for keytype in key_names:\n keyfile = KEY_FILE_TPL % (keytype)\n if os.path.exists(keyfile):\n continue\n util.ensure_dir(os.path.dirname(keyfile))\n cmd = [\"ssh-keygen\", \"-t\", keytype, \"-N\", \"\", \"-f\", keyfile]\n\n # TODO(harlowja): Is this guard needed?\n with util.SeLinuxGuard(\"/etc/ssh\", recursive=True):\n try:\n out, err = subp.subp(cmd, capture=True, env=lang_c)\n if not util.get_cfg_option_bool(\n cfg, \"ssh_quiet_keygen\", False\n ):\n sys.stdout.write(util.decode_binary(out))\n\n gid = util.get_group_id(\"ssh_keys\")\n if gid != -1:\n # perform same \"sanitize permissions\" as sshd-keygen\n permissions_private = 0o600\n ssh_version = ssh_util.get_opensshd_upstream_version()\n if ssh_version and ssh_version < util.Version(9, 0):\n permissions_private = 0o640\n os.chown(keyfile, -1, gid)\n os.chmod(keyfile, permissions_private)\n os.chmod(f\"{keyfile}.pub\", 0o644)\n except subp.ProcessExecutionError as e:\n err = util.decode_binary(e.stderr).lower()\n if e.exit_code == 1 and err.lower().startswith(\n \"unknown key\"\n ):\n LOG.debug(\"ssh-keygen: unknown key type '%s'\", keytype)\n else:\n util.logexc(\n LOG,\n \"Failed generating key type %s to file %s\",\n keytype,\n keyfile,\n )\n\n if \"ssh_publish_hostkeys\" in cfg:\n host_key_blacklist = util.get_cfg_option_list(\n cfg[\"ssh_publish_hostkeys\"],\n \"blacklist\",\n HOST_KEY_PUBLISH_BLACKLIST,\n )\n publish_hostkeys = util.get_cfg_option_bool(\n cfg[\"ssh_publish_hostkeys\"], \"enabled\", PUBLISH_HOST_KEYS\n )\n else:\n host_key_blacklist = HOST_KEY_PUBLISH_BLACKLIST\n publish_hostkeys = PUBLISH_HOST_KEYS\n\n if publish_hostkeys:\n hostkeys = get_public_host_keys(blacklist=host_key_blacklist)\n try:\n cloud.datasource.publish_host_keys(hostkeys)\n except Exception:\n util.logexc(LOG, \"Publishing host keys failed!\")\n\n try:\n (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro)\n (user, _user_config) = ug_util.extract_default(users)\n disable_root = util.get_cfg_option_bool(cfg, \"disable_root\", True)\n disable_root_opts = util.get_cfg_option_str(\n cfg, \"disable_root_opts\", ssh_util.DISABLE_USER_OPTS\n )\n\n keys: List[str] = []\n if util.get_cfg_option_bool(cfg, \"allow_public_ssh_keys\", True):\n keys = cloud.get_public_ssh_keys() or []\n else:\n LOG.debug(\n \"Skipping import of publish SSH keys per \"\n \"config setting: allow_public_ssh_keys=False\"\n )\n\n if \"ssh_authorized_keys\" in cfg:\n cfgkeys = cfg[\"ssh_authorized_keys\"]\n keys.extend(cfgkeys)\n\n apply_credentials(keys, user, disable_root, disable_root_opts)\n except Exception:\n util.logexc(LOG, \"Applying SSH credentials failed!\")\n\n\ndef apply_credentials(keys, user, disable_root, disable_root_opts):\n\n keys = set(keys)\n if user:\n ssh_util.setup_user_keys(keys, user)\n\n if disable_root:\n if not user:\n user = \"NONE\"\n key_prefix = disable_root_opts.replace(\"$USER\", user)\n key_prefix = key_prefix.replace(\"$DISABLE_USER\", \"root\")\n else:\n key_prefix = \"\"\n\n ssh_util.setup_user_keys(keys, \"root\", options=key_prefix)\n\n\ndef get_public_host_keys(blacklist: Optional[Sequence[str]] = None):\n \"\"\"Read host keys from /etc/ssh/*.pub files and return them as a list.\n\n @param blacklist: List of key types to ignore. e.g. ['dsa', 'rsa']\n @returns: List of keys, each formatted as a two-element tuple.\n e.g. [('ssh-rsa', 'AAAAB3Nz...'), ('ssh-ed25519', 'AAAAC3Nx...')]\n \"\"\"\n public_key_file_tmpl = \"%s.pub\" % (KEY_FILE_TPL,)\n key_list = []\n blacklist_files = []\n if blacklist:\n # Convert blacklist to filenames:\n # 'dsa' -> '/etc/ssh/ssh_host_dsa_key.pub'\n blacklist_files = [\n public_key_file_tmpl % (key_type,) for key_type in blacklist\n ]\n # Get list of public key files and filter out blacklisted files.\n file_list = [\n hostfile\n for hostfile in glob.glob(public_key_file_tmpl % (\"*\",))\n if hostfile not in blacklist_files\n ]\n\n # Read host key files, retrieve first two fields as a tuple and\n # append that tuple to key_list.\n for file_name in file_list:\n file_contents = util.load_file(file_name)\n key_data = file_contents.split()\n if key_data and len(key_data) > 1:\n key_list.append(tuple(key_data[:2]))\n return key_list\n","repo_name":"canonical/cloud-init","sub_path":"cloudinit/config/cc_ssh.py","file_name":"cc_ssh.py","file_ext":"py","file_size_in_byte":14885,"program_lang":"python","lang":"en","doc_type":"code","stars":2393,"dataset":"github-code","pt":"12"} +{"seq_id":"15362159815","text":"import sqlite3 as lite\nimport sys\n\n# makes the to be scraped database \ncon = lite.connect('toScrape.db')\nc = con.cursor()\nc.execute(\"CREATE TABLE toScrape(steamid TEXT);\")\nc.execute(\"INSERT INTO toScrape (steamid) VALUES (76561197960435530);\")\ncon.commit()\ncon.close()","repo_name":"johnWilshire/steamScraper","sub_path":"makeDB/makeToScrape.py","file_name":"makeToScrape.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"7710860830","text":"import pickle\nimport os\nimport torch\nimport numpy as np\nimport pandas as pd\n\nfrom torch.utils.data import TensorDataset, DataLoader\n\n# np.random.seed(123)\n# torch.manual_seed(42)\n\n\nclass MyTensorDataset(TensorDataset):\n \"\"\" MyTensorDataset \"\"\"\n def __init__(self, *tensors):\n super(MyTensorDataset, self).__init__(*tensors)\n\n def __getitem__(self, index):\n return tuple([index]) + tuple(tensor[index] for tensor in self.tensors)\n\n\ndef construct_time_series(x, len=36):\n t = x.shape[0]\n out = []\n for i in range(t - len):\n out.append(x[i:i+len])\n\n return np.stack(out)\n\n\ndef get_dataloader(args):\n root_path = args.data_path\n\n if args.dataset == 'radar':\n with open(os.path.join(root_path, f'radar.pkl'), 'rb') as f:\n dt = pickle.load(f)\n x_train = torch.tensor(construct_time_series(dt['data_train'], 25), dtype=torch.float32).flatten(2)\n x_val = torch.tensor(construct_time_series(dt['data_val'], 25), dtype=torch.float32).flatten(2)\n x_test = torch.tensor(construct_time_series(dt['data_test'], 25), dtype=torch.float32).flatten(2)\n mask_train = torch.tensor(construct_time_series(dt['mask_train'], 25), dtype=torch.float32).flatten(2)\n mask_val = torch.tensor(construct_time_series(dt['mask_val'], 25), dtype=torch.float32).flatten(2)\n mask_test = torch.tensor(construct_time_series(dt['mask_test'], 25), dtype=torch.float32).flatten(2)\n\n train = MyTensorDataset(x_train, mask_train)\n validation = MyTensorDataset(x_train, x_val, mask_val)\n test = MyTensorDataset(x_train, x_test, mask_test)\n elif args.dataset == 'indoor':\n with open(os.path.join(root_path, f'indoor.pkl'), 'rb') as f:\n dt = pickle.load(f)\n x_train = torch.tensor(construct_time_series(dt['data_train'], 25), dtype=torch.float32).flatten(2)\n x_val = torch.tensor(construct_time_series(dt['data_val'], 25), dtype=torch.float32).flatten(2)\n x_test = torch.tensor(construct_time_series(dt['data_test'], 25), dtype=torch.float32).flatten(2)\n mask_train = torch.tensor(construct_time_series(dt['mask_train'], 25), dtype=torch.float32).flatten(2)\n mask_val = torch.tensor(construct_time_series(dt['mask_val'], 25), dtype=torch.float32).flatten(2)\n mask_test = torch.tensor(construct_time_series(dt['mask_test'], 25), dtype=torch.float32).flatten(2)\n\n train = MyTensorDataset(x_train, mask_train)\n validation = MyTensorDataset(x_train, x_val, mask_val)\n test = MyTensorDataset(x_train, x_test, mask_test)\n else:\n raise ValueError\n\n batch_size = args.batch_size\n\n train_loader = DataLoader(train, batch_size=batch_size, shuffle=True)\n valid_loader = DataLoader(validation, batch_size=batch_size, shuffle=False)\n if test is not None:\n test_loader = DataLoader(test, batch_size=batch_size, shuffle=False)\n else:\n test_loader = None\n\n data_loaders = {\n \"train\": train_loader,\n \"test\": test_loader,\n \"validate\": valid_loader\n }\n return data_loaders\n","repo_name":"taozerui/nonlinear_tr","sub_path":"src/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73773011221","text":"from typing import List\nimport math\n\nclass Solution:\n\n def get_pos_info(self, s: str, word: str) -> list[list[int]]:\n res = []\n idx = 0\n while idx != -1:\n idx = s.find(word, idx)\n if idx == -1:\n break\n res.append([idx, idx + len(word) - 1])\n idx += 1\n return res\n\n def minExtraChar(self, s: str, dictionary: List[str]) -> int:\n pos_arr = []\n for word in dictionary:\n arr = self.get_pos_info(s, word)\n pos_arr.extend(arr)\n pos_arr.sort(key=lambda x: [x[1], x[0]])\n dp = [math.inf] * len(s)\n for pos in pos_arr:\n dp[pos[1]] = int(min(pos[0], dp[pos[1]]))\n for i, dp_cost in enumerate(dp[: pos[0]]):\n dp[pos[1]] = int(min(pos[0] - i - 1 + dp_cost, dp[pos[1]]))\n ans = len(s)\n for i, cost in enumerate(dp):\n ans = min(ans, len(s) - i - 1 + cost)\n return ans\n\ndata = [\n \"xp\"\n , [\"a\",\"u\",\"d\",\"b\",\"s\",\"r\",\"z\",\"y\",\"f\",\"l\",\"q\",\"i\",\"j\",\"w\",\"o\",\"c\"]\n]\nr = Solution().minExtraChar(*data)\nprint(r)","repo_name":"huangweijing/weo_leetcode","sub_path":"2707_Extra_Characters_in_a_String.py","file_name":"2707_Extra_Characters_in_a_String.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"14594979887","text":"import sqlite3\n\ndef create_table():\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS store (item TEXT, quantity INTEGER, price REAL)\")\n cur.execute(\"INSERT INTO store VALUES ('Bat' , 10, 500)\")\n conn.commit()\n conn.close()\n\ndef insert_table(item, quantity, price):\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"INSERT INTO store VALUES (?, ?, ?)\",(item, quantity, price))\n conn.commit()\n conn.close() \n\ndef select_table():\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM store\")\n rows = cur.fetchall()\n conn.close()\n return rows\n\ndef delete_table(item):\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"DELETE FROM store WHERE item = ?\",(item,))\n conn.commit()\n conn.close() \n\ndef update_table(quantity, price, item):\n conn = sqlite3.connect(\"lite.db\")\n cur = conn.cursor()\n cur.execute(\"UPDATE store SET quantity = ?, price = ? WHERE item = ?\",(quantity, price, item))\n conn.commit()\n conn.close() \n\ncreate_table()\n\ninsert_table(\"Wicket\", 36, 360) \ninsert_table(\"Ball\", 100, 500.75) \ninsert_table(\"Gloves\", 50, 399)\nprint(select_table())\n\ndelete_table(\"Ball\")\nprint(select_table())\n\nupdate_table(24, 250, \"Wicket\")\nprint(select_table())","repo_name":"saurabhsinha09/pythoncode","sub_path":"dbModules/dbSqllite.py","file_name":"dbSqllite.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27365270510","text":"n,x = map(int,input().split())\r\n\r\nt = list(map(int,input().split()))\r\n\r\n\r\ntime_que = [0 for _ in range(x + 2)]#現在の時間にどれだけのqueが存在するのか\r\ntime_que[0] = 1#0秒目には曲を流し始めるため、queを追加する\r\n\r\nmusic_1 = 0\r\nmusic_other = 0\r\n\r\nfor now in range(x + 2):#i秒目(x + 0.5秒を見る必要があるため0...x + 1まで)\r\n if time_que[now] >= 1:#1回以上のqueが予約されているなら\r\n for j in range(len(t)):\r\n if now + t[j] < x + 2:\r\n #time_queのnow+t[i]番目を+=1し、後に処理する\r\n time_que[now + t[j]] += (1 * time_que[now])\r\n else:#オーバーするならそれが最���の曲なので\r\n if j == 0:\r\n music_1 += time_que[now]\r\n else:\r\n music_other += time_que[now]\r\n\r\nprint((music_1,music_other))\r\n","repo_name":"mikatan-mikan/-competitive-programming","sub_path":"atcoder beginner contest/ABC323/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"39587613777","text":"# Question number 3\n\n\"\"\" write an algorithm for the problem of determining prime number? \"\"\"\n\n# Naive method\n\ndef naive(n):\n is_prime = True\n\n # Iterate through the whole integers to find a divisor\n for i in range(2, n):\n # check divisibility\n if n % i == 0:\n is_prime = False\n break # break out of the loop\n \n return is_prime\n\ndef dynamic_method(n):\n is_prime = True\n\n # Iterate till the number's square root\n for i in range(2, int(n**0.5)+1):\n # check divisibility\n if n % i == 0:\n is_prime = False\n break # break out of the loop\n \n return is_prime\n\nif __name__ == '__main__':\n n = 2\n \n # using naive method\n print(f\"Determining if {n} is a prime or not using naive method ...\")\n print(f\"{n} is a prime number. \\n\" if naive(n) else f\"{n} is not a prime number. \\n\")\n\n # using dynamic method\n print(f\"Determining if {n} is a prime or not using dynamic method ...\")\n print(f\"{n} is a prime number.\" if dynamic_method(n) else f\"{n} is not a prime number.\") ","repo_name":"BAcode-X/python-assignment01","sub_path":"py files/question03.py","file_name":"question03.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"28821461747","text":"def main():\n print (\"What number would you like to remove the unnecessary zero's from?\")\n number = input (\"Input: \")\n number = zero_remover(number)\n input (f\"Output: {number}\")\n\ndef zero_remover(number):\n number = float(number)\n if number.is_integer():\n return str(int(number))\n return str(number)\n\nmain()","repo_name":"DruboGit/Python","sub_path":"Other lesson projects/2023-12-01/remove_zeros.py","file_name":"remove_zeros.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36963559125","text":"\"\"\"This file contains database wrapper.\"\"\"\nimport logging\nfrom typing import Dict, List, Optional, Union, cast\n\nfrom aiohttp import web\nfrom motor.core import AgnosticClient\nfrom motor.motor_asyncio import (\n AsyncIOMotorClient,\n AsyncIOMotorCollection,\n AsyncIOMotorDatabase,\n)\nfrom pymongo import ASCENDING, DESCENDING\n\nlogger = logging.getLogger(__name__)\n\n\nclass DBWrapper:\n \"\"\"Class to represent database wrapper.\"\"\"\n\n reference_date: Optional[str] = None\n\n def __init__(\n self,\n url: str,\n db_name: str,\n collection_name: str,\n tls_cert_key_path: Optional[str] = None,\n tls_ca_path: Optional[str] = None,\n ) -> None:\n \"\"\"\n Construct DBWrapper instance.\n\n :param url: connection url string.\n :param db_name: database name.\n :param collection_name: collection name.\n :param tls_cert_key_path: path to certificate key.\n :param tls_ca_path: path to CA certificate.\n \"\"\"\n tls: Dict[str, Union[str, bool, None]] = {}\n if bool(tls_cert_key_path):\n tls[\"tls\"] = True\n tls[\"tlsCertificateKeyFile\"] = tls_cert_key_path\n if bool(tls_ca_path):\n tls[\"tlsCAFile\"] = tls_ca_path\n\n self.client: AgnosticClient = AsyncIOMotorClient(\n url, connectTimeoutMS=1000, retryWrites=True, **tls\n )\n self.db: AsyncIOMotorDatabase = self.client[db_name]\n self.collection: AsyncIOMotorCollection = self.db[collection_name]\n self.is_closed = False\n\n def close(self) -> None:\n \"\"\"Close database connection.\"\"\"\n self.client.close()\n self.is_closed = True\n\n async def drop_collection(self, collection_name: str) -> None:\n \"\"\"\n Drop collection by name.\n\n :param collection_name: collection name.\n :return: None\n \"\"\"\n await self.db[collection_name].drop()\n\n async def get_reference_date(self) -> Optional[str]:\n \"\"\"\n Return last date from database.\n\n :return: last date as string.\n \"\"\"\n if not self.reference_date:\n result = await self.collection.find_one(\n filter={},\n projection={\"_id\": 0, \"date\": 1},\n sort=[(\"date\", DESCENDING)],\n )\n if result:\n self.reference_date = result.get(\"date\")\n return self.reference_date\n\n async def get_districts(self) -> List[str]:\n \"\"\"\n Return list of districts.\n\n :return: list of districts.\n \"\"\"\n reference_date = await self.get_reference_date()\n if reference_date:\n return cast(\n List[str],\n await self.collection.distinct(\n \"district\", filter={\"date\": reference_date}\n ),\n )\n else:\n logger.error(\"Failed to get reference date.\")\n return []\n\n async def get_localities(self, district_name: str) -> List[str]:\n \"\"\"\n Return list of district localities.\n\n :param district_name: district name.\n :return: list of localities.\n \"\"\"\n reference_date = await self.get_reference_date()\n if reference_date:\n return cast(\n List[str],\n await self.collection.distinct(\n \"localities.locality\",\n filter={\"date\": reference_date, \"district\": district_name},\n ),\n )\n else:\n logger.error(\"Failed to get reference date.\")\n return []\n\n async def get_district(\n self, district_name: str\n ) -> List[Dict[str, Union[str, int, float]]]:\n \"\"\"\n Return data by district name.\n\n :param district_name: district name.\n :return:\n \"\"\"\n return cast(\n List[Dict[str, Union[str, int, float]]],\n await self.collection.find(\n filter={\"district\": district_name},\n projection={\"_id\": 0, \"localities\": 0},\n sort=(\"date\",),\n ),\n )\n\n async def get_locality(self, district_name: str, locality_name: str) -> List[str]:\n \"\"\"\n Return locality data by district and locality names.\n\n :param district_name: district name.\n :param locality_name: locality name.\n :return: list of districts localities.\n \"\"\"\n filter_ = {\n \"district\": district_name,\n \"localities.locality\": locality_name,\n }\n list_length = await self.collection.count_documents(filter_)\n if list_length:\n return cast(\n List[str],\n await self.collection.find(\n filter=filter_,\n projection={\n \"_id\": 0,\n \"date\": 1,\n \"localities\": {\"$elemMatch\": {\"locality\": locality_name}},\n },\n sort=[(\"date\", ASCENDING)],\n ).to_list(length=list_length),\n )\n else:\n return []\n\n\nasync def close_db(app: web.Application) -> None:\n \"\"\"\n Close connection with database.\n\n :param app: application instance.\n :return: None\n \"\"\"\n db = app[\"db\"]\n db.close()\n\n\ndef init_db(app: web.Application) -> None:\n \"\"\"\n Initialize database wrapper.\n\n :param app: application instance.\n :return: None\n \"\"\"\n db_uri = app[\"config\"][\"DB_URI\"]\n db_name = app[\"config\"][\"DB_NAME\"]\n collection_name = app[\"config\"][\"DB_COLLECTION_NAME\"]\n tls_cert_key_path = app[\"config\"][\"TLS_CERT_KEY_PATH\"]\n tls_ca_path = app[\"config\"][\"TLS_CA_PATH\"]\n\n app[\"db\"] = DBWrapper(\n db_uri, db_name, collection_name, tls_cert_key_path, tls_ca_path\n )\n\n app.on_cleanup.append(close_db)\n","repo_name":"toolen/covid-leningrad-region","sub_path":"dashboard-backend/dashboard_backend/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41961022212","text":"from django.forms import ModelForm, RadioSelect, TextInput, Textarea\nfrom crispy_forms.helper import FormHelper\nfrom django_ckeditor_5.widgets import CKEditor5Widget\nfrom .models import Item\n\n\nclass ItemUpdateForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n # fields that are null=true\n self.fields['date'].required = False\n self.fields['notes'].required = False\n self.fields['body_redact'].required = False\n # fields that do not save\n self.fields['date'].disabled = True\n self.fields['body_original'].disabled = True\n\n class Meta:\n model = Item\n fields = ('date',\n 'reporter',\n 'title',\n 'body_original',\n 'body_redact',\n 'pool_report',\n 'publish',\n 'off_the_record',\n 'review_status',\n 'notes'\n )\n\n widgets = {\n 'date': TextInput(attrs={'disabled': True}),\n 'reporter': TextInput(attrs={'class': 'form-control'}),\n 'review_status': RadioSelect(attrs={'id': 'value'}),\n 'notes': Textarea(attrs={'class': 'form-control'}),\n 'body_redact': CKEditor5Widget(\n attrs={'class': 'django_ckeditor_5'}, config_name='extends'\n ),\n 'body_original': CKEditor5Widget(\n attrs={'class': 'django_ckeditor_5'}, config_name='extends'\n )\n }\n","repo_name":"timkanke/scutes","sub_path":"src/processing/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4159913054","text":"#abc235_d.py\n#黒板のxは最初1\nfrom collections import deque\nfrom collections import defaultdict\na,N=map(int,input().split())\nx=1\nqueue=deque()\nvisited=defaultdict(int)\nqueue.append((1,0))\nans=10**10\nwhile len(queue)>0:\n d,c=queue.popleft()\n if d==N:\n ans=min(c,ans)\n dstr=str(d)\n dlen=len(dstr)\n if d>=10 and d%10!=0:\n for i in range(len(dstr)-1):\n tempstr=dstr[-1]+dstr[:dlen-1]\n temp=int(tempstr)\n visitedcnt=visited[temp]\n if visitedcnt==0 or c+1 < visitedcnt:\n queue.append((temp,c+1))\n visited[temp]=c+1\n temp=d*a\n if len(str(temp))<=len(str(N)):\n visitedcnt=visited[temp]\n if visitedcnt==0 or c+1 < visitedcnt:\n queue.append((temp,c+1))\n visited[temp]=c+1\n\nif ans==10**10:\n print(-1) \nelse:\n print(ans)","repo_name":"masajoki/atcoder","sub_path":"abc235_d.py","file_name":"abc235_d.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4232669312","text":"import numpy as np\nimport time\n\n\nclass SolveNQueens:\n def __init__(self, size):\n self.size = size\n self.queens = np.zeros((size, ), dtype=int)\n self.row_queens = np.zeros((size, ), dtype=int) # depends on initial placement strategy\n self.main_diagonal_queens = np.zeros((2 * size - 1, ), dtype=int)\n self.rev_diagonal_queens = np.zeros((2 * size - 1, ), dtype=int)\n\n def __str__(self):\n if self.size <= 50:\n return_string = ['_'] * self.size * (self.size + 1)\n for column, row in enumerate(self.queens):\n return_string[(self.size + 1) * column + self.size] = '\\n'\n return_string[(self.size + 1) * row + column] = '*'\n return \"\".join(return_string)\n else:\n return \"Board is too big. Printing time instead...\"\n\n def initialize_random(self):\n \"\"\" Random initial board set-up.\n No two queens conflict row-wise or column-wise. \"\"\"\n\n self.queens = np.random.choice(np.arange(self.size), replace=False,\n size=(self.size))\n print(self.queens)\n\n main_diagonal = self.queens - np.arange(self.size)\n rev_diagonal = self.queens + np.arange(self.size)\n\n unique, counts = np.unique(main_diagonal, return_counts=True)\n for _ in zip(unique, counts):\n self.main_diagonal_queens[unique + self.size - 1] = counts\n self.main_diagonal_queens = np.flip(self.main_diagonal_queens)\n\n unique, counts = np.unique(rev_diagonal, return_counts=True)\n for _ in zip(unique, counts):\n self.rev_diagonal_queens[unique] = counts\n\n def initialize_min_conflict(self):\n \"\"\" Best possible initialization.\n Greedy strategy (min-conflict) guarantees we always\n choose a less conflicted spot for the next queen. \"\"\"\n self.queens[0] = np.random.choice(np.arange(self.size))\n self.row_queens[self.queens[0]] += 1\n self.main_diagonal_queens[-self.queens[0] + self.size - 1] += 1\n self.rev_diagonal_queens[self.queens[0]] += 1\n\n for column in range(1, self.size):\n conflicts_column = self.row_queens + \\\n self.main_diagonal_queens[self.size + column - 1: column - 1: -1] + \\\n self.rev_diagonal_queens[column : self.size + column : 1]\n place_queen_row = np.random.choice(np.where(conflicts_column == np.amin(conflicts_column))[0])\n self.queens[column] = place_queen_row\n self.row_queens[place_queen_row] += 1\n self.main_diagonal_queens[column - place_queen_row + self.size - 1] += 1\n self.rev_diagonal_queens[column + place_queen_row] += 1\n\n def solve(self):\n \"\"\" Solver method. \"\"\"\n\n conflicts = self.get_conflicts()\n count = 0\n while np.amax(conflicts) > 0 and count < 60:\n max_conflicts = np.amax(conflicts)\n move_queen_from_column = np.random.choice(np.where(conflicts == np.amax(conflicts))[0])\n move_queen_from_row = self.queens[move_queen_from_column]\n\n conflicts_column = np.zeros((self.size, ), dtype=int)\n for queen_row in range(self.size):\n conflicts_column[queen_row] = self.row_queens[queen_row] + \\\n self.main_diagonal_queens[move_queen_from_column - queen_row + self.size - 1] + \\\n self.rev_diagonal_queens[move_queen_from_column + queen_row]\n conflicts_column[move_queen_from_row] = max_conflicts\n\n move_queen_to_row = np.random.choice(np.where(conflicts_column == np.amin(conflicts_column))[0]) \n\n self.queens[move_queen_from_column] = move_queen_to_row\n\n self.row_queens[move_queen_from_row] -= 1\n self.main_diagonal_queens[move_queen_from_column - move_queen_from_row + self.size - 1] -= 1\n self.rev_diagonal_queens[move_queen_from_column + move_queen_from_row] -= 1\n\n self.row_queens[move_queen_to_row] += 1\n self.main_diagonal_queens[move_queen_from_column - move_queen_to_row + self.size - 1] += 1\n self.rev_diagonal_queens[move_queen_from_column + move_queen_to_row] += 1\n\n conflicts = self.get_conflicts()\n count += 1\n\n if count == 60:\n self.solve()\n return\n\n def get_conflicts(self):\n \"\"\" Calculate conflicts for current configuration. \"\"\"\n conflicts = np.zeros((self.size, ), dtype=int)\n for queen_column in range(self.size):\n queen_row = self.queens[queen_column]\n conflicts[queen_column] += self.row_queens[queen_row] + \\\n self.main_diagonal_queens[queen_column - queen_row + self.size - 1] + \\\n self.rev_diagonal_queens[queen_column + queen_row] - 3\n return conflicts\n\n\ndef main():\n \"\"\" User interaction.\n Request desired input size.\"\"\"\n size = int(input('Enter number of queens: ').strip())\n\n start_time = time.process_time()\n game = SolveNQueens(size)\n game.initialize_min_conflict()\n game.solve()\n print(game)\n print(\" --- %.5f seconds ---\" % (time.process_time() - start_time))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AtanasGruev/Artificial-Intelligence-Course-Repo","sub_path":"constraint_satisfaction/n_queens.py","file_name":"n_queens.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35670285224","text":"#! /usr/bin/env python3\nfrom settings import *\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import (\n FigureCanvasQTAgg,\n NavigationToolbar2QT as NavigationToolbar,\n)\nfrom PyQt5.QtWidgets import (\n QMainWindow,\n QApplication,\n QWidget,\n QComboBox,\n QLineEdit,\n QDialog,\n QSlider,\n QToolBar,\n QGroupBox,\n QHBoxLayout,\n QGridLayout,\n QStatusBar,\n QVBoxLayout,\n QMessageBox,\n QLabel,\n QFileDialog,\n)\nfrom PyQt5.QtGui import QPalette, QColor\nfrom PyQt5.QtCore import Qt\nimport sys\nfrom os.path import basename\nfrom functools import partial\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport xarray as xr\n\n# import cartopy.crs as ccrs\n\nmpl.use(\"Qt5Agg\")\n\n\nclass MplCanvas(FigureCanvasQTAgg):\n def __init__(self, parent=None, width=10, height=10, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n super(MplCanvas, self).__init__(fig)\n\n\nclass Ui(QMainWindow):\n dialogs = []\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.setWindowTitle(\"ROMSView\")\n self._state = AppState()\n self.generalLayout = QHBoxLayout()\n self.centralWidget = QWidget(self)\n self.setCentralWidget(self.centralWidget)\n self.centralWidget.setLayout(self.generalLayout)\n\n self._createMenu()\n self._createToolBar()\n self._createSideBar()\n self._createMplCanvas()\n self._createStatusBar()\n\n def _createMenu(self):\n self.fileMenu = self.menuBar().addMenu(\"&File\")\n self.fileMenu.addAction(\"&Load roms_grd.nc\", not_found_dialog)\n self.fileMenu.addAction(\"&Load roms_clm.nc\", not_found_dialog)\n self.fileMenu.addAction(\"&Load roms_ini.nc\", not_found_dialog)\n self.fileMenu.addAction(\"&Load roms_his.nc\", not_found_dialog)\n self.fileMenu.addAction(\"&Quit\", self.close)\n\n self.toolsMenu = self.menuBar().addMenu(\"&Plot\")\n self.toolsMenu.addAction(\"&Hslice\", not_found_dialog)\n self.toolsMenu.addAction(\"&Vslice\", not_found_dialog)\n\n def _createToolBar(self):\n tools = QToolBar()\n self.addToolBar(tools)\n for key in RomsNCFiles.__dataclass_fields__.keys():\n tools.addAction(key.upper(), partial(\n self.openFile, f\"*_{key}*.nc\"))\n\n def _createSideBar(self):\n self.sideBarLayout = QVBoxLayout()\n\n self._createPlotSelector()\n self._createVarSelector()\n self._createTimeSelector()\n self._createLevSelector()\n self._createCbarSelector()\n self._createAlphaSelector()\n self._createRangeBox()\n\n widget = QWidget()\n widget.setLayout(self.sideBarLayout)\n widget.setFixedWidth(185)\n self.generalLayout.addWidget(widget)\n\n def _createPlotSelector(self):\n self.plotSelector = QComboBox()\n self.plotSelector.setToolTip(\n \"What to do when clicking horizontal slice point (s)\")\n self.plotSelector.addItems(\n [\"MPL Toolbar\", \"Tseries on click\", \"Vslice on click\"])\n self.plotSelector.setDisabled(True)\n self.plotSelector.activated[str].connect(self.toggle_plot)\n self.sideBarLayout.addWidget(self.plotSelector)\n\n def _createVarSelector(self):\n self.varSelector = QComboBox()\n self.varSelector.setToolTip(\"Variables\")\n self.varSelector.addItem(\"Variables\")\n self.varSelector.setDisabled(True)\n self.varSelector.activated[str].connect(self.toggle_var)\n self.sideBarLayout.addWidget(self.varSelector)\n\n def _createTimeSelector(self):\n self.timeSelector = QComboBox()\n self.timeSelector.setToolTip(\"Times\")\n self.timeSelector.addItem(\"Times\")\n self.timeSelector.setDisabled(True)\n self.timeSelector.activated[str].connect(self.toggle_time)\n self.sideBarLayout.addWidget(self.timeSelector)\n\n def _createLevSelector(self):\n self.levSelector = QComboBox()\n self.levSelector.setToolTip(\"Levels\")\n self.levSelector.addItem(\"Levels\")\n self.levSelector.setDisabled(True)\n self.levSelector.activated[str].connect(self.toggle_lev)\n self.sideBarLayout.addWidget(self.levSelector)\n\n def _createCbarSelector(self):\n self.cbarSelector = QComboBox()\n self.cbarSelector.setToolTip(\"Colorbars\")\n self.cbarSelector.addItems(\n [\"viridis\", \"cividis\", \"inferno\", \"jet\", \"turbo\", \"RdBu_r\", \"twilight\", \"Reds\", \"Blues\", \"terrain\"])\n self.cbarSelector.activated[str].connect(self.set_colorbar)\n self.cbarSelector.setDisabled(True)\n self.sideBarLayout.addWidget(self.cbarSelector)\n\n def _createAlphaSelector(self):\n alpha = QSlider(Qt.Horizontal)\n alpha.setValue(100)\n alpha.valueChanged[int].connect(self.set_alpha)\n self.sideBarLayout.addWidget(alpha)\n\n def _createRangeBox(self):\n self.rangeBox = QGroupBox()\n vmin_label = QLabel(\"Vmin\")\n vmax_label = QLabel(\"Vmax\")\n self.vmin = QLineEdit()\n self.vmax = QLineEdit()\n rangeLayout = QGridLayout()\n rangeLayout.addWidget(vmax_label, 0, 0, 1, 1)\n rangeLayout.addWidget(self.vmax, 0, 1, 1, 1)\n rangeLayout.addWidget(vmin_label, 1, 0, 1, 1)\n rangeLayout.addWidget(self.vmin, 1, 1, 1, 1)\n self.rangeBox.setLayout(rangeLayout)\n self.rangeBox.setFixedHeight(100)\n self.vmin.returnPressed.connect(self.set_range)\n self.vmax.returnPressed.connect(self.set_range)\n self.sideBarLayout.addWidget(self.rangeBox)\n self.rangeBox.setDisabled(True)\n\n def _createMplCanvas(self):\n self.mplcanvas = MplCanvas(self, width=5, height=4, dpi=100)\n self.cid = None # initialize variable to reprensent mpl_connect\n self.init_plot()\n\n # Create toolbar, passing canvas as first parameter, parent (self, the MainWindow) as second.\n self.mpltoolbar = NavigationToolbar(self.mplcanvas, self)\n\n layout = QVBoxLayout()\n layout.addWidget(self.mpltoolbar)\n layout.addWidget(self.mplcanvas)\n\n # Create a placeholder widget to hold our toolbar and canvas.\n widget = QWidget()\n widget.setLayout(layout)\n self.generalLayout.addWidget(widget)\n\n def _createStatusBar(self):\n self.status = QStatusBar()\n self.status.showMessage(\"Ready...\")\n self.setStatusBar(self.status)\n\n def openFile(self, pattern=\"*.nc\"):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n filename, _ = QFileDialog.getOpenFileName(\n self,\n \"QFileDialog.getOpenFileName()\",\n \"/source/roms-py/tests\",\n f\"NetCDF Files ({pattern});;All Files (*)\",\n options=options,\n )\n if filename:\n self.onOpenFile(filename)\n\n def onOpenFile(self, filename):\n self.status.showMessage(f\"Current file: {filename}\")\n self._state.current_file = filename\n self._load_dataset(filename)\n self._state.filetype = detect_roms_file(filename)\n # getting a representative var based on settings.rep_var\n rep_var = getattr(REP_VAR, self._state.filetype)\n self._state.da = last2d(self._state.ds[rep_var])\n self.plot(var_changed=True)\n\n def _reset_mpl_axes(self):\n for ax in self.mplcanvas.figure.axes:\n ax.remove()\n\n self.mplcanvas.axes = self.mplcanvas.figure.add_subplot(111)\n\n def init_plot(self):\n img = plt.imread(\"./icons/welcome.png\")\n self._plot = self.mplcanvas.axes.imshow(img)\n self.mplcanvas.axes.set_axis_off()\n self.mplcanvas.draw()\n\n def _load_dataset(self, filename):\n self._state.ds = xr.open_dataset(filename)\n\n def plot(self, var_changed=False):\n self._reset_mpl_axes()\n\n self._plot = self._state.da.plot(ax=self.mplcanvas.axes)\n if hasattr(self._plot, \"set_cmap\"):\n self.cbarSelector.setEnabled(True)\n self.rangeBox.setEnabled(True)\n self.plotSelector.setEnabled(True)\n\n if (\n hasattr(self._state, \"vmin\") and hasattr(self._state, \"vmax\")\n ) and not var_changed:\n self._plot.set_norm(\n mpl.colors.Normalize(self._state.vmin, self._state.vmax)\n )\n else:\n self._reset_range(np.nanmin(self._state.da),\n np.nanmax(self._state.da))\n\n self.set_colorbar(cbar=self.cbarSelector.currentText())\n else:\n self.cbarSelector.setDisabled(True)\n self.rangeBox.setDisabled(True)\n\n self.mplcanvas.draw()\n\n self._update_vars()\n self._update_times()\n self._update_levels()\n\n def timeseries_or_vslice(self, evt):\n if evt.inaxes != self.mplcanvas.axes:\n return\n\n self._state.clicked_points.append([evt.xdata, evt.ydata])\n self._state.vslice_ref.append(self.mplcanvas.axes.plot(evt.xdata, evt.ydata,\n 'wo', markeredgecolor='k', zorder=10))\n self.mplcanvas.draw()\n\n if 'Vslice' in self.plotSelector.currentText():\n if len(self._state.clicked_points) < 2:\n return\n else:\n self._state.vslice_ref.append(self.mplcanvas.axes.plot(\n *pairs2lists(self._state.clicked_points), 'k', zorder=9))\n self.mplcanvas.draw()\n dialog = VsliceDialog(\n parent=self, title=\"Vertical Slice\")\n dialog.setGeometry(2000, 60, 900, 500)\n dialog.show()\n dialog.plot()\n self.dialogs.append(dialog)\n\n if 'Tseries' in self.plotSelector.currentText():\n dialog = TseriesDialog(parent=self, title=\"Time Series\")\n dialog.setGeometry(2000, 60, 900, 500)\n dialog.show()\n dialog.plot()\n self.dialogs.append(dialog)\n\n return\n\n def _reset_range(self, vmin, vmax):\n self._state.vmin = vmin\n self._state.vmax = vmax\n self.vmin.setText(f\"{vmin:0.6f}\")\n self.vmax.setText(f\"{vmax:0.6f}\")\n\n def _update_vars(self):\n self.varSelector.setEnabled(True)\n self.varSelector.clear()\n self.varSelector.addItems(self._state.ds.data_vars.keys())\n self.varSelector.setCurrentText(self._state.da.name)\n\n def _update_times(self):\n for dim in self._state.ds.dims.keys():\n if (\n \"time\" in dim\n and self._state.filetype not in [\"grd\"]\n and dim in self._state.da.coords.keys()\n ):\n self.timeSelector.setEnabled(True)\n self.timeSelector.clear()\n times = [numpydatetime2str(t)\n for t in self._state.ds[dim].values]\n self.timeSelector.addItems(times)\n current = numpydatetime2str(self._state.da[dim].values)\n self.timeSelector.setCurrentText(current)\n break\n\n self.timeSelector.setDisabled(True)\n\n def _update_levels(self):\n for dim, val in self._state.ds.dims.items():\n if (\n \"s_rho\" in dim\n and self._state.filetype not in [\"grd\", \"bry\"]\n and dim in self._state.da.coords.keys()\n ):\n self.levSelector.setEnabled(True)\n self.levSelector.clear()\n levels = [str(l) for l in self._state.ds[dim].values]\n self.levSelector.addItems(levels)\n self.levSelector.setCurrentText(\n str(self._state.da[dim].values))\n break\n\n self.levSelector.setDisabled(True)\n\n def toggle_plot(self, plot_type):\n if plot_type in [\"Tseries on click\", \"Vslice on click\"]:\n if not self.cid:\n self.cid = self.mplcanvas.mpl_connect(\n 'button_press_event', self.timeseries_or_vslice)\n self.mpltoolbar.setDisabled(True)\n else:\n self.mplcanvas.mpl_disconnect(self.cid)\n self.cid = None\n self.mpltoolbar.setEnabled(True)\n\n def toggle_var(self, var):\n _slice = {}\n # need to remove dimensions that don't exist in the new var\n # if that's the case (Ex, toggling from 3D to 2D var)\n for dim, val in self._state.current_slice.items():\n if dim in self._state.ds[var].dims:\n _slice[dim] = val\n\n self._state.da = last2d(self._state.ds[var].sel(**_slice))\n self.plot(var_changed=True)\n\n def toggle_time(self, timestamp):\n _slice = self._state.current_slice.copy()\n for key in _slice.keys():\n if \"time\" in key:\n _slice[key] = self.timeSelector.currentText()\n\n self._state.da = last2d(\n self._state.ds[self._state.var].sel(**_slice))\n self.plot()\n break\n\n def toggle_lev(self, lev):\n _slice = self._state.current_slice.copy()\n for key in _slice.keys():\n if \"s_rho\" in key:\n _slice[key] = self.levSelector.currentText()\n\n self._state.da = last2d(\n self._state.ds[self._state.var].sel(**_slice))\n self.plot()\n break\n\n def set_range(self):\n try:\n vmin = float(self.vmin.text())\n self._state.vmin = vmin\n except:\n vmin = self._state.vmin\n\n try:\n vmax = float(self.vmax.text())\n self._state.vmax = vmax\n except:\n vmax = self._state.vmax\n\n self._plot.set_norm(mpl.colors.Normalize(\n self._state.vmin, self._state.vmax))\n self.mplcanvas.draw()\n\n def set_colorbar(self, cbar):\n if hasattr(self._plot, \"set_cmap\"):\n self._plot.set_cmap(getattr(plt.cm, cbar))\n self.mplcanvas.draw()\n else:\n not_found_dialog(\"Colorbar does not apply to this plot\")\n\n def set_alpha(self, val):\n self._plot.set_alpha(val / 100)\n self.mplcanvas.draw()\n\n\nclass VsliceDialog(Ui, QDialog):\n def __init__(self, parent=None, title='ROMSView dialog', *args, **kwargs):\n # super().__init__(*args, **kwargs)\n QDialog.__init__(self, *args, **kwargs)\n self.parent = parent\n self._state = AppState()\n self._state.ds = self.parent._state.ds\n self._state.filetype = self.parent._state.filetype\n self.setWindowTitle(title)\n self.generalLayout = QHBoxLayout()\n self.centralWidget = QWidget(self)\n self.setCentralWidget(self.centralWidget)\n self.centralWidget.setLayout(self.generalLayout)\n self._createSideBar()\n self.plotSelector.setDisabled(True)\n self.levSelector.setDisabled(True)\n self._createMplCanvas()\n self._createStatusBar()\n self.status.showMessage(\n f\"Current file: {self.parent._state.current_file}\")\n\n def plot(self, var_changed=False):\n self._reset_mpl_axes()\n if not var_changed:\n var = self.parent._state.var\n # getting time index from parent plot\n for key, val in self.parent._state.current_slice.items():\n if \"time\" in key:\n sel = {key: val}\n self._state.da = self._state.ds[var].sel(**sel)\n self._state.za = self._state.ds['z_rho'].sel(**sel)\n # TODO open a dialog to load grid if z_rho not available, and\n # compute zlevels (clm files or outputs that did not save z_rho)\n\n xsec, zsec, vsec, xaxis = self._extract_slice()\n\n self._plot = self.mplcanvas.axes.contourf(xsec, zsec, vsec, 20)\n self.mplcanvas.axes.set_ylabel('z_rho')\n self.mplcanvas.axes.set_xlabel(xaxis)\n self.mplcanvas.axes.figure.colorbar(self._plot)\n\n self.cbarSelector.setEnabled(True)\n self.rangeBox.setEnabled(True)\n self.plotSelector.setDisabled(True)\n\n if (\n hasattr(self._state, \"vmin\") and hasattr(self._state, \"vmax\")\n ) and not var_changed:\n self._plot.set_norm(\n mpl.colors.Normalize(self._state.vmin, self._state.vmax)\n )\n else:\n self._reset_range(np.nanmin(self._state.da),\n np.nanmax(self._state.da))\n\n self.mplcanvas.draw()\n self._update_vars()\n self._update_times()\n\n def closeEvent(self, event):\n self.parent._state.clicked_points.clear()\n\n for plot in self.parent._state.vslice_ref:\n for pl in plot:\n pl.remove()\n\n self.parent.mplcanvas.draw()\n\n self.parent._state.vslice_ref.clear()\n\n def _extract_slice(self):\n p1 = self.parent._state.clicked_points[0]\n p2 = self.parent._state.clicked_points[1]\n xi_rho, eta_rho = np.meshgrid(self.parent._state.ds.xi_rho.values,\n self.parent._state.ds.eta_rho.values)\n xaxis = eta_or_xi(p1, p2)\n\n xs, ys = get_segment(xi_rho, eta_rho, p1, p2)\n\n # finding nearest depths and data values to segment points\n for idx in range(xs.size):\n line, col = near2d(xi_rho, eta_rho, xs[idx], ys[idx])\n try:\n vsec = np.hstack((vsec, self._state.da.isel(\n eta_rho=line, xi_rho=col).values[:, None]))\n zsec = np.hstack((zsec, self._state.za.isel(\n eta_rho=line, xi_rho=col).values[:, None]))\n except:\n vsec = self._state.da.isel(\n eta_rho=line, xi_rho=col).values[:, None]\n zsec = self._state.za.isel(\n eta_rho=line, xi_rho=col).values[:, None]\n\n if xaxis == 'xi_rho':\n xsec = np.atleast_2d(xs).repeat(self._state.da.s_rho.size, axis=0)\n else:\n xsec = np.atleast_2d(ys).repeat(self._state.da.s_rho.size, axis=0)\n\n return xsec, zsec, vsec, xaxis\n\n\nclass TseriesDialog(VsliceDialog):\n def __init__(self, parent=None, title='ROMSView dialog', *args, **kwargs):\n # super().__init__(*args, **kwargs)\n QDialog.__init__(self, *args, **kwargs)\n self.parent = parent\n self._set_state()\n self._set_da()\n self.setWindowTitle(title)\n self.generalLayout = QHBoxLayout()\n self.centralWidget = QWidget(self)\n self.setCentralWidget(self.centralWidget)\n self.centralWidget.setLayout(self.generalLayout)\n self._createSideBar()\n self.plotSelector.setDisabled(True)\n self.levSelector.setDisabled(True)\n self._createMplCanvas()\n self._createStatusBar()\n self.status.showMessage(\n f\"Current file: {self.parent._state.current_file}\")\n\n def _set_state(self):\n \"\"\"Grabing some state variables from parent\"\"\"\n self._state = AppState()\n self._state.ds = self.parent._state.ds\n self._state.filetype = self.parent._state.filetype\n self._state.clicked_points = self.parent._state.clicked_points\n\n def _set_da(self):\n\n for key, val in self.parent._state.current_slice.items():\n if key == 's_rho':\n sel = {key: val}\n break\n else:\n sel = {}\n\n xi_rho, eta_rho = np.meshgrid(self._state.ds.xi_rho.values,\n self._state.ds.eta_rho.values)\n\n sel['eta_rho'], sel['xi_rho'] = near2d(\n xi_rho,\n eta_rho,\n self._state.clicked_points[0][0],\n self._state.clicked_points[0][1])\n\n self._state.da = self._state.ds[self.parent._state.var].sel(**sel)\n\n def plot(self, var_changed=False):\n self._reset_mpl_axes()\n if not var_changed:\n try:\n var = self._state.var\n except:\n var = self.parent._state.var\n\n self._plot = self._state.da.plot(ax=self.mplcanvas.axes)\n\n self.mplcanvas.draw()\n self._update_vars()\n self._update_times()\n\n if 's_rho' in self._state.da.coords.keys():\n self.levSelector.setEnabled(True)\n self._update_levels()\n else:\n self.levSelector.setDisabled(True)\n\n def closeEvent(self, event):\n self.parent._state.clicked_points.clear()\n\n for plot in self.parent._state.vslice_ref:\n for pl in plot:\n pl.remove()\n\n self.parent.mplcanvas.draw()\n\n self.parent._state.vslice_ref.clear()\n\n\ndef detect_roms_file(filepath):\n for key in RomsNCFiles.__dataclass_fields__.keys():\n if key in basename(filepath):\n return key\n\n\ndef numpydatetime2str(numpydatetime):\n return str(numpydatetime).split(\".\")[0].replace(\"T\", \" \")\n\n\ndef last2d(da):\n if da.ndim <= 2:\n return da\n\n slc = [0] * (da.ndim - 2)\n slc += [slice(None), slice(None)]\n slc = {d: s for d, s in zip(da.dims, slc)}\n\n return da.isel(**slc)\n\n\ndef pairs2lists(pairs):\n \"\"\"Transforms list of pairs of [(x1, y1), (x2, y2), ...]\n in lists of coords [x1, x2, ...], [y1, y2, ...]\n \"\"\"\n x, y = [], []\n for pair in pairs:\n x.append(pair[0])\n y.append(pair[1])\n\n return x, y\n\n\ndef eta_or_xi(p1, p2):\n \"\"\"Chooses preferred X axis based on alignment of two points that \n define a transect\n \"\"\"\n if np.abs(p1[0] - p2[0]) >= np.abs(p1[1] - p2[1]):\n return 'xi_rho'\n else:\n return 'eta_rho'\n\n\ndef near2d(x, y, x0, y0):\n \"\"\"\n Find the indexes of the grid point that is\n nearest a chosen (x0, y0).\n Usage: line, col = near2d(x, y, x0, y0)\n \"\"\"\n dx = np.abs(x - x0)\n dx = dx / dx.max()\n dy = np.abs(y - y0)\n dy = dy / dy.max()\n dn = dx + dy\n fn = np.where(dn == dn.min())\n line = int(fn[0])\n col = int(fn[1])\n return line, col\n\n\ndef get_segment(xg, yg, p1, p2):\n \"\"\"Creates a segment from p1 to p2 based on average resolution of \n xg, yg 2D generic grid\n\n Args:\n xg, yg [numpy 2D arrays]: x, y grid coordinates\n p1, p2 [list or tuple]: x, y pair of start and end of the segment \n\n Returns:\n xs, ys [numpy 1D arrays]: x, y coordinates of the segment\n \"\"\"\n dl = (np.gradient(xg)[1].mean() +\n np.gradient(yg)[0].mean()) / 2\n size = int(np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) / dl)\n xs = np.linspace(p1[0], p2[0], size)\n ys = np.linspace(p1[1], p2[1], size)\n\n return xs, ys\n\n\ndef not_found_dialog(message=\"Coming soon...\"):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setInformativeText(message)\n msg.setWindowTitle(\"Not found\")\n msg.exec_()\n\n\n# Client code\ndef main():\n # Create an instance of QApplication\n app = QApplication(sys.argv)\n app.setStyle(\"Fusion\")\n\n # Now use a palette to switch to dark colors:\n # palette = QPalette()\n # palette.setColor(QPalette.Window, QColor(53, 53, 53))\n # palette.setColor(QPalette.WindowText, Qt.white)\n # palette.setColor(QPalette.Base, QColor(25, 25, 25))\n # palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n # palette.setColor(QPalette.ToolTipBase, Qt.white)\n # palette.setColor(QPalette.ToolTipText, Qt.white)\n # palette.setColor(QPalette.Text, Qt.white)\n # palette.setColor(QPalette.Button, QColor(53, 53, 53))\n # palette.setColor(QPalette.ButtonText, Qt.white)\n # palette.setColor(QPalette.BrightText, Qt.red)\n # palette.setColor(QPalette.Link, QColor(42, 130, 218))\n # palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n # palette.setColor(QPalette.HighlightedText, Qt.black)\n # app.setPalette(palette)\n\n # Show the UI\n view = Ui()\n view.setGeometry(2500, 60, 1000, 800)\n view.show()\n\n if len(sys.argv) > 1:\n view.onOpenFile(sys.argv[1])\n\n # Create instances of the model and the controller\n # model = evaluateExpression\n # Controller(model=model, view=view)\n # Execute the calculator's main loop\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rsoutelino/romsview","sub_path":"romsview.py","file_name":"romsview.py","file_ext":"py","file_size_in_byte":24314,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"12"} +{"seq_id":"7820171668","text":"import torch\nimport torch.nn as nn\nimport torch_geometric as pyg\n\n# Based on https://gitlab.inria.fr/GruLab/s-gcn/-/blob/master/src/sgcn/layers.py\n\n\nclass SphericalConvolution_PYG(pyg.nn.MessagePassing):\n def __init__(self,\n in_features: int,\n out_features: int,\n order: int,\n dropout_p: float,\n non_linearity: str,\n bias: bool = True):\n \"\"\"Constructor\n\n Args:\n in_features (int): Input features size\n out_features (int): Output features size\n order (int): Spherical harmonics order\n dropout_p (float): Dropout\n non_linearity (str): Non linearity\n bias (bool, optional): Bias. Defaults to True.\n \"\"\"\n assert non_linearity in ['relu', 'elu', 'sigmoid',\n 'tanh', 'mish', 'none'], 'Incorrect non-linearity'\n\n super().__init__(aggr='add') # \"Add\" aggregation (Step 5).\n self.in_features = in_features\n self.out_features = out_features\n self.order_squared = order ** 2\n\n self.non_linearity = None\n self.non_linearity_name = non_linearity\n if non_linearity == 'relu':\n self.non_linearity = nn.functional.relu\n elif non_linearity == 'elu':\n self.non_linearity = nn.functional.elu\n elif non_linearity == 'sigmoid':\n self.non_linearity = torch.sigmoid\n elif non_linearity == 'tahn':\n self.non_linearity = torch.tanh\n elif non_linearity == 'none':\n self.non_linearity = None\n self.lin_part = torch.nn.Linear(\n in_features, out_features, bias=False) # = H^{k-1}W\n self.lins = torch.nn.ModuleList()\n\n for _ in range(self.order_squared):\n self.lins.append(torch.nn.Linear(\n in_features, out_features, bias=False)) # W_l^m\n\n self.dropout = torch.nn.Dropout(dropout_p)\n\n self.bias = nn.Parameter(torch.Tensor(\n out_features)) if bias else None # b\n\n torch.nn.init.xavier_normal_(self.lin_part.weight)\n for i in range(self.order_squared):\n torch.nn.init.xavier_normal_(self.lins[i].weight)\n if bias:\n torch.nn.init.constant_(self.bias, 0)\n\n def forward(self,\n x: torch.Tensor,\n edge_index: torch.Tensor,\n edge_attr: torch.Tensor) -> torch.Tensor:\n \"\"\"Apply the layer to a graph\n\n Args:\n x (torch.Tensor): Graph nodes features\n edge_index (torch.Tensor): Graph edge index\n edge_attr (torch.Tensor): Graph edge attributes\n\n Returns:\n torch.Tensor: The new nodes features\n \"\"\"\n x_part = self.lin_part(x)\n out = self.propagate(edge_index=edge_index, x=x,\n edge_attr=edge_attr, aggr='add')\n out += x_part\n if self.bias is not None:\n out = out + self.bias\n if self.non_linearity is not None:\n out = self.non_linearity(out)\n out = self.dropout(out)\n return out\n\n def message(self,\n x_j: torch.Tensor,\n edge_attr: torch.Tensor) -> torch.Tensor:\n \"\"\"Create a message from neighbors\n\n Args:\n x_j (torch.Tensor): Neighbor features\n edge_attr (torch.Tensor): Edge attributes\n\n Returns:\n torch.Tensor: The message\n \"\"\"\n edge_ponderation = []\n for i in range(self.order_squared):\n edge_ponderation.append(edge_attr[:, i].view(-1, 1) * x_j)\n return torch.stack(edge_ponderation)\n\n def update(self, aggr_out: torch.Tensor) -> torch.Tensor:\n \"\"\"Update the node features\n\n Args:\n aggr_out (torch.Tensor): Neighbors message\n\n Returns:\n torch.Tensor: New node features\n \"\"\"\n conv_sh = []\n for i in range(self.order_squared):\n conv_sh.append(self.lins[i](aggr_out[i]))\n return torch.stack(conv_sh).sum(axis=0)\n\n def __repr__(self) -> str:\n \"\"\"Returns a str representing the layer\n\n Returns:\n str: A str representing the layer\n \"\"\"\n return \"Weight:\\t{}\\nWeights:\\t{}\".format(self.lin_part, self.lins)\n","repo_name":"KevinCrp/SGPocket","sub_path":"SGPocket/networks/spherical_convolution.py","file_name":"spherical_convolution.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"33909984024","text":"#!/usr/bin/env python3\n\nimport sys\n\nout_file = sys.argv[1]\nin_files = sys.argv[2:]\n\ni = 0\nwhole = \"\"\nwhile i < len(in_files):\n indiv = in_files[i] + \"\\n\"\n whole = whole + indiv\n i = i + 1\nwith open(out_file, \"w\") as f_out:\n f_out.write(whole)\n","repo_name":"thomashazekamp/CA116","sub_path":"write-args.py","file_name":"write-args.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32087113593","text":"import socket\r\nfrom time import sleep\r\n# Create a UDP socket\r\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n\r\n# Server application IP address and port\r\nserver_address = '127.0.0.1'\r\nserver_port = 10001\r\n\r\n# Buffer size\r\nbuffer_size = 1024\r\n\r\n# Message sent to server\r\nmessage = 'Hi server!'\r\n\r\ntry:\r\n # Send data to server\r\n client_socket.sendto(message.encode(), (server_address, server_port))\r\n print('Sent to server: ', message)\r\n\r\n # Receive response from server\r\n print('Waiting for response...')\r\n data, server = client_socket.recvfrom(buffer_size)\r\n print('Received message from server at:', server)\r\n\r\nfinally:\r\n client_socket.close()\r\n print('Socket closed')\r\nsleep(10)\r\n","repo_name":"walkerdustin/distributed-systems-game","sub_path":"sampleCode/simpleclient.py","file_name":"simpleclient.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"12"} +{"seq_id":"15733395869","text":"dia_inicial, data_inicial = input().split()\nhora_inicial, minuto_inicial, segundo_inicial = map(int, input().split(' : '))\n\ndia_final, data_final = input().split()\nhora_final, minuto_final, segundo_final = map(int, input().split(' : '))\n\ndata_inicial = int(data_inicial)\ndata_final = int(data_final)\n\ndia_de_jogo = data_final - data_inicial\n\nif hora_inicial > hora_final:\n hora_de_jogo = (24 - hora_inicial) + hora_final\n dia_de_jogo -= 1\nelif hora_inicial <= hora_final:\n hora_de_jogo = hora_final - hora_inicial\n\nif minuto_inicial > minuto_final:\n minuto_do_jogo = (60 - minuto_inicial) + minuto_final\n hora_de_jogo -= 1\n if hora_de_jogo < 0:\n hora_de_jogo += 24\n dia_de_jogo -= 1\nelif minuto_inicial <= minuto_final:\n minuto_do_jogo = minuto_final - minuto_inicial\n\nif segundo_inicial > segundo_final:\n segundo_do_jogo = (60 - segundo_inicial) + segundo_final\n minuto_do_jogo -= 1\n if minuto_do_jogo < 0:\n minuto_do_jogo += 60\n hora_de_jogo -= 1\nelif segundo_inicial <= segundo_final:\n segundo_do_jogo = segundo_final - segundo_inicial\n\nprint(f'{dia_de_jogo} dia(s)')\nprint(f'{hora_de_jogo} hora(s)')\nprint(f'{minuto_do_jogo} minuto(s)')\nprint(f'{segundo_do_jogo} segundo(s)')\n","repo_name":"saulojustiniano1/ads-exercicios","sub_path":"python/beecrowd/3_estrutura-controle-desisao/1061_tempo-de-um-evento.py","file_name":"1061_tempo-de-um-evento.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26939080364","text":"import csv\nimport xlsxwriter\n\nwith open('Chinook_Track.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n # create workbook and add worksheet\n workbook = xlsxwriter.Workbook('test.xlsx')\n worksheet = workbook.add_worksheet()\n row = 0\n artists = []\n for line in csv_reader:\n if line[5] != \"NULL\":\n artists.append(line[5])\n line_count += 1\n\n print(f'Processed {line_count} lines.')\n\n # Remove duplicate and write xlx file\n for i in list(set(artists)):\n worksheet.write(row, 0, i)\n row += 1\n\n # close file\n workbook.close()\n\n\n\n","repo_name":"Seren4/365Talents_test","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73918878102","text":"from typing import List\n\n\ndef sort_colors(colors: List[int]):\n '''네덜란드 국기 문제- 퀵 정렬의 개선 아이디어'''\n # partitioning 을 세부분으로 하기. 작은 부분, 같은 부분, 큰 부분\n # red = 0 white = 1 blue = 2\n # red, white, blue 순으로 정렬해야함 -> white를 기준으로 세부분으로 나누기\n # 각 index는 각 색깔이 시작하는 위치를 가리킴? 아님\n red, white, blue = 0, 0, len(colors) # 배열 인덱스 바깥에 있다\n\n # white = blue시 비교 완료\n while white < blue:\n if colors[white] < 1:\n colors[red], colors[white] = colors[white], colors[red]\n white += 1\n red += 1\n elif colors[white] > 1:\n blue -= 1\n colors[white], colors[blue] = colors[blue], colors[white]\n else:\n white += 1\n print(colors, red, blue)\n\n # 종료시 red는 1보다 작은 마지막 인덱스 +1(1일수도,2일수도),blue는 1보다 큰 인덱스의 처음을 가리킴\n\n\nprint(sort_colors([2, 0, 2, 2, 1, 0]))\nprint(sort_colors([2, 0, 2, 2, 0]))\n","repo_name":"i960107/algorithm","sub_path":"programmers/정렬_색정렬.py","file_name":"정렬_색정렬.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21776685226","text":"from flask import request, jsonify\nfrom marshmallow import Schema, fields\nfrom functools import reduce\n\nfrom . import app, db\nfrom app.models import Company, Person, people_friends_table\n\n\nclass CompanySchema(Schema):\n class Meta:\n fields = (\"id\", \"name\")\n\n\nclass PersonSchema(Schema):\n company = fields.Nested(CompanySchema, only=[\"id\"])\n\n class Meta:\n fields = (\n \"id\",\n \"username\",\n \"guid\",\n \"has_died\",\n \"balance\",\n \"picture\",\n \"age\",\n \"eye_colour\",\n \"name\",\n \"gender\",\n \"company_id\",\n \"phone\",\n \"address\",\n \"about\",\n \"registered\",\n \"tags\",\n \"greeting\",\n \"fruits\",\n \"vegetables\",\n )\n\n\n@app.errorhandler(404)\ndef not_found(Error=None):\n response = jsonify({\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n })\n response.status_code = 404\n return response\n\n\n@app.errorhandler(500)\ndef bad_request(Error=None):\n response = jsonify({\n 'status': 500,\n 'message': 'Bad Request: ' + request.url,\n })\n response.status_code = 500\n return response\n\n\n@app.route('/companies', methods=['GET'])\ndef list_companies():\n query = Company.query\n\n # Filter by name\n name = request.args.get(\"name\")\n if name:\n query = query.filter(Company.name == name)\n\n return jsonify(CompanySchema(many=True).dump(query.all()))\n\n\n@app.route('/companies/', methods=['GET'])\ndef get_company(company_id):\n company = Company.query.get(company_id)\n if company is None:\n return not_found()\n return jsonify(CompanySchema().dump(company))\n\n\n@app.route('/people', methods=['GET'])\ndef list_people():\n query = Person.query\n\n # Filter by id\n id = request.args.get(\"id\")\n if id:\n ids = [int(i) for i in id.split(\",\")]\n query = query.filter(Person.id.in_(ids))\n # Filter by username\n username = request.args.get(\"username\")\n if username:\n query = query.filter(Person.username == username)\n # Filter by company id\n company_id = request.args.get(\"company_id\")\n if company_id:\n query = query.filter(Person.company_id == company_id)\n # Respect includes\n includes = request.args.get(\"includes\")\n only = None\n if includes:\n only = [x for x in PersonSchema.Meta.fields if x in set(includes.split(\",\"))]\n\n return jsonify(PersonSchema(many=True, only=only).dump(query.all()))\n\n\n@app.route('/people/', methods=['GET'])\ndef get_person(people_id):\n person = Person.query.get(people_id)\n if person is None:\n return not_found()\n\n # Respect includes\n includes = request.args.get(\"includes\")\n only = None\n if includes:\n only = [x for x in PersonSchema.Meta.fields if x in set(includes.split(\",\"))]\n\n return jsonify(PersonSchema(only=only).dump(person))\n\n\n@app.route('/special_friends', methods=['GET'])\ndef list_special_friends():\n # Filter by id\n id = request.args.get(\"person_id\")\n if id is None:\n return bad_request()\n ids = [int(i) for i in id.split(\",\")]\n\n # Query these common friends with `special` filter, ie. brown eyes and still alive\n people_friends = db.session.query(people_friends_table)\\\n .join(Person, people_friends_table.c.friend_id == Person.id) \\\n .filter(people_friends_table.c.person_id.in_(ids)) \\\n .filter(Person.eye_colour == 'brown') \\\n .filter(Person.has_died == False) \\\n .all()\n\n # Get common element from list of people friends\n friends = {id: [] for id in ids}\n for person_id, friend_id in people_friends:\n friends[person_id].append(friend_id)\n common_friends_ids = list(reduce(lambda i, j: i & j, (set(x) for x in friends.values())))\n\n return jsonify(common_friends_ids)\n","repo_name":"stellalie/techtest-api","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"72748691541","text":"import pandas as pd\nfrom sklearn.model_selection import LeaveOneOut, cross_val_predict\n\nfrom src.approaches.apcs.apcs import APCSSampler\n\n\nclass AccImprovementSampler(APCSSampler):\n\n prev_acc = []\n curr_acc = []\n\n def __init__(self, *args, **kwargs):\n self.k_folds = kwargs.pop(\"k_folds\") if \"k_folds\" in kwargs else 5\n super().__init__(*args, **kwargs)\n\n def acs(self, partition):\n \"\"\"Selects the best class to sample, using the accuracy improvement method.\n The accuracy improvement method works by noting which class has the most increase\n in accuracy from the previous round.\n\n Args:\n partition (Series): The partition of the dataset.\n\n Returns:\n int: The selected pseudo-class.\n \"\"\"\n\n improvement = [\n max(\n 0,\n self.curr_acc[int(partition.name)][i]\n - self.prev_acc[int(partition.name)][i],\n )\n for i in range(self.n_classes)\n ]\n\n lbl = improvement.index(max(improvement))\n return lbl\n\n def update(self):\n \"\"\"Updates the predicted labels, for this round.\"\"\"\n if len(self.dataset.complete.index) > 3:\n self.prev_acc = self.curr_acc\n acc = []\n for partition in self.pseudo_classes:\n cv = (\n LeaveOneOut()\n if partition[self.dataset.complete.index].value_counts().min()\n < self.k_folds\n else self.k_folds\n )\n predictions = pd.Series(\n cross_val_predict(\n self.clf,\n self.dataset.complete[self.dataset.get_cf_names()],\n partition[self.dataset.complete.index],\n cv=cv,\n ),\n index=self.dataset.complete.index,\n )\n scores = []\n for p_class in sorted(partition.unique()):\n score = len(\n predictions[predictions == p_class]\n & partition[partition == p_class]\n ) / len(partition[partition == p_class])\n scores.append(score)\n acc.append(pd.Series(scores, index=sorted(partition.unique())))\n self.curr_acc = acc\n\n def initial_sample(self):\n return super().initial_sample(n=2)\n\n def to_string(self):\n return \"accuracy-improvement\"\n","repo_name":"thomastkok/active-selection-of-classification-features","sub_path":"src/approaches/apcs/acc_improvement.py","file_name":"acc_improvement.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70928264982","text":"import json\n\nfrom flask import make_response\n\nfrom SwitchTracer.universal.exceptions import ContentTypeErrors\n\n\nclass UniBase(object):\n\n ENCODING = \"utf-8\"\n CONTENT_TYPE = None\n\n\nclass ParserBase(UniBase):\n\n def __init__(self, request):\n self.request = request\n self.kwargs = dict()\n\n def __call__(self, mod=\"parse\", **kwargs):\n self.kwargs = kwargs\n if self.request.headers.get(\"Content-Type\") != self.CONTENT_TYPE:\n raise ContentTypeErrors(\"Wrong content type from data\")\n return self.parse(self.request.get_data())\n\n def parse(self, data):\n raise NotImplementedError\n\n\nclass JsonParser(ParserBase):\n\n CONTENT_TYPE = \"application/json\"\n\n def parse(self, data: bytes):\n return json.loads(data.decode(self.kwargs.get(\"encoding\", self.ENCODING)))\n\n\nclass RenderBase(UniBase):\n\n def __init__(self, **data):\n self.data = data\n self.kwargs = dict()\n\n def __call__(self, **kwargs):\n self.kwargs = kwargs\n return self.render()\n\n def render(self):\n raise NotImplementedError\n\n\nclass JsonRender(RenderBase):\n\n CONTENT_TYPE = \"application/json\"\n\n def render(self):\n try:\n data = json.dumps(self.data)\n except Exception as e:\n raise ContentTypeErrors(e)\n resp = make_response(data)\n resp.status = self.kwargs.get(\"status\", \"200 OK\")\n resp.headers[\"Content-Type\"] = self.CONTENT_TYPE\n return resp\n","repo_name":"IzayoiRin/VirtualVeyonST","sub_path":"SwitchTracer/cores/contrib/flaskmiddlewares/parser_render.py","file_name":"parser_render.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"33820355995","text":"\nlines = []\nwith open('1.in', 'r') as fp:\n lines = list(map(int, fp.readlines()))\n\n# print(len(lines))\ncount = 0\nprev = lines[0]\nfor line in lines:\n if line > prev:\n count+=1\n prev = line \n\nprint(count)\n","repo_name":"Zeyu-Li/advent-of-code-2021","sub_path":"1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"24901730853","text":"def leiaint(inteiro):\n while 1:\n try:\n return int(input(inteiro))\n except KeyboardInterrupt:\n print('O usuário preferiu não informar os dados...')\n return 0\n except (ValueError, TypeError):\n print('Digite um número inteiro válido.')\n\n\ndef leiafloat(flutuante):\n while 1:\n try:\n return float(input(flutuante))\n except KeyboardInterrupt:\n print('O usuário preferiu não informar os dados...')\n return 0\n except (ValueError, TypeError):\n print('Digite um número real válido.')\n\n\nn = leiaint('Digite um número inteiro: ')\ny = leiafloat('Digite um número real: ')\n\nprint(f'Você acabou de digitar o número inteiro {n} e o número real {y}')\n","repo_name":"fernandorssa/CeV_Python_Exercises","sub_path":"Desafio 113.py","file_name":"Desafio 113.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"8612395673","text":"from arches.app.datatypes.base import BaseDataType\nfrom arches.app.models import models\nfrom django.core.exceptions import ValidationError\n\nwkt_point_widget = models.Widget.objects.get(name='wkt-point-widget')\n\ndetails = {\n 'datatype': 'wkt-point',\n 'iconclass': 'fa fa-file-code-o',\n 'modulename': 'datatypes.py',\n 'classname': 'WKTPointDataType',\n 'defaultwidget': wkt_point_widget,\n 'defaultconfig': None,\n 'configcomponent': None,\n 'configname': None,\n 'isgeometric': False\n }\n\nclass WKTPointDataType(BaseDataType):\n def validate(self, value, source=None):\n \"\"\"\n Confirm your datatype meets validation criteria\n\n \"\"\"\n errors = []\n try:\n value.upper()\n except:\n errors.append({'type': 'ERROR', 'message': 'datatype: {0} value: {1} {2} - {3}'.format(self.datatype_model.datatype, value, source, 'this is not a string')})\n return errors\n\n def append_to_document(self, document, nodevalue):\n \"\"\"\n Appends a value to a given elastic search document property\n\n \"\"\"\n document['strings'].append(nodevalue)\n\n def transform_export_values(self, value):\n \"\"\"\n Transform a value for export\n\n \"\"\"\n return value.encode('utf8')\n","repo_name":"cvast/cvast-arches","sub_path":"cvast_arches/cvast_arches/datatypes/wkt_point.py","file_name":"wkt_point.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"39156027402","text":"#!/usr/bin/env python3\n\nfrom glob import glob\nfrom helpers import only_blacklists_changed\n\n\ndef test_blacklist_integrity():\n bl_files = glob('bad_*.txt') + glob('blacklisted_*.txt') + \\\n ['watched_keywords.txt']\n seen = dict()\n errors = []\n for bl_file in bl_files:\n with open(bl_file, 'r') as lines:\n for lineno, line in enumerate(lines, 1):\n if line.endswith('\\r\\n'):\n errors.append('{0}:{1}:DOS line ending'.format(bl_file, lineno))\n if not line.endswith('\\n'):\n errors.append('{0}:{1}:No newline'.format(bl_file, lineno))\n if line == '\\n':\n errors.append('{0}:{1}:Empty line'.format(bl_file, lineno))\n if bl_file == 'watched_keywords.txt':\n line = line.split('\\t')[2]\n if line in seen:\n errors.append('{0}:{1}:Duplicate entry {2} (also {3})'.format(\n bl_file, lineno, line.rstrip('\\n'), seen[line]))\n seen[line] = '{0}:{1}'.format(bl_file, lineno)\n\n if len(errors) == 1:\n raise ValueError(errors[0])\n elif len(errors) > 1:\n raise ValueError(\"\\n\\t\".join([\"Multiple errors has occurred.\"] + errors))\n\n\ndef test_blacklist_pull_diff():\n only_blacklists_diff = \"\"\"watched_keywords.txt\n bad_keywords.txt\n blacklisted_websites.txt\"\"\"\n assert only_blacklists_changed(only_blacklists_diff)\n mixed_files_diff = \"\"\"helpers.py\n test/test_blacklists.py\n blacklisted_usernames.txt\"\"\"\n assert not only_blacklists_changed(mixed_files_diff)\n","repo_name":"Undo1/SmellDetector","sub_path":"test/test_blacklists.py","file_name":"test_blacklists.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"73036336020","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution:\r\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\r\n if not head:\r\n return head\r\n slow=head\r\n fast=head.next #fast=slow*2\r\n while(fast and fast!=slow and fast.next):\r\n slow=slow.next\r\n fast=fast.next.next #fast=slow*2\r\n \r\n if fast==slow: #hasCycle \r\n while(slow.next!=head):\r\n slow=slow.next\r\n head=head.next\r\n return head\r\n else:\r\n return None\r\n \r\n\r\n# Problem link\r\n# https://leetcode.com/problems/linked-list-cycle-ii/\r\n\r\n# source\r\n# https://leetcode.com/mayura_8499","repo_name":"BalaMithran/100daysofcode","sub_path":"Day-6 Find the starting point of the Loop of LinkedList.py","file_name":"Day-6 Find the starting point of the Loop of LinkedList.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32279540139","text":"\"\"\"\nPython Web Development Techdegree\nProject 1 - Number Guessing Game\n--------------------------------\n\nFor this first project we will be using Workspaces. \n\nNOTE: If you strongly prefer to work locally on your own computer, you can totally do that by clicking: File -> Download Workspace in the file menu after you fork the snapshot of this workspace.\n\n\"\"\"\n\nimport random\n\n\ndef start_game():\n print(\"/////////////////////////////////////\\n************************************\\nWelcome to the number guessing game!\\n************************************\\n////////////////////////////////////\")\n magic_number = random.randint(1,50)\n attempts = 0\n while True:\n try:\n guess = int(input(\"Guess a number between 1 and 50: \"))\n attempts = attempts + 1\n if guess > magic_number:\n print(\"It's lower.\")\n continue\n elif guess < magic_number:\n print(\"It's higher.\")\n continue\n elif guess == magic_number:\n print(\"Congratulations! You guessed the number!\\nIt took you {} tries!\\nGoodbye and thank you for playing! See you next time!\\n**********\\n********\\n******\\n****\\n**\".format(attempts))\n break\n except ValueError:\n print(\"Whoops! Please enter a numerical value between 1 and 50!\")\n\nstart_game()","repo_name":"curtisnouchi/number_guessing_game","sub_path":"guessing_game.py","file_name":"guessing_game.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18584997614","text":"from typing import Any, List, Optional\nimport requests\nimport logging\nimport json\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nclass GitLabWikis(dict):\n '''\n Gitlab handler\n\n Params:\n Title: Title of the wiki page: \\\n Example: \"QW.QCC01\"\n gitlabUrl: URL to the gitlab website: \\\n Example: \"http://gitlab.seismo.nrcan.gc.ca\"\n projectId: The project Id of the repository: \\\n Example: 10\n token: The private token of the repository: \\\n Example: \"gJ-TxBSSMYBrsxhh9jze\"\n webserver: The direct link to the web server that includes the\\\n attachments we are uploading to the wiki. \\\n Example: \\\n \"https://3.96.234.48:18010/json/QW/ONE01/2022-04-21-2022-05-01/\"\n '''\n\n def __init__(\n self,\n title: str,\n gitlabUrl: str,\n projectId: int,\n token: str,\n webserver: str\n ):\n self.gitlabUrl = f\"{gitlabUrl}/api/v4/projects/{projectId}/wikis\"\n self.token = token\n self.webserver = webserver\n self.title = title\n self.list_of_attachment_references: List[Any] = []\n self.validation_json: dict = {}\n\n def _post_wiki_api(self,\n content: Optional[str] = None,\n ):\n request_url = f'{self.gitlabUrl}'\n headers = {'PRIVATE-TOKEN': self.token}\n data = {\"title\": self.title,\n \"content\": content}\n try:\n req = requests.post(\n request_url,\n headers=headers,\n data=data,\n )\n req.raise_for_status()\n except requests.exceptions.HTTPError:\n self._put_wiki_api(title=self.title,\n content=content)\n\n def _put_wiki_api(\n self,\n title: str,\n content: Optional[str] = None,\n ):\n request_url = f\"{self.gitlabUrl}/{title}\"\n headers = {'PRIVATE-TOKEN': self.token}\n data = {\"title\": title,\n \"content\": content}\n try:\n req = requests.put(\n request_url,\n headers=headers,\n data=data,\n )\n req.raise_for_status()\n except requests.exceptions.HTTPError as err:\n logging.error(err.response.content)\n raise err\n\n def _get_api(self,\n path_to_attachments: Optional[str] = None) -> \\\n requests.Response:\n request_url = self.webserver if path_to_attachments is None else\\\n f\"{self.webserver}{path_to_attachments}\"\n try:\n request_result = requests.get(\n request_url, verify=False\n )\n logging.info(f\"Getting file: {request_url}\")\n request_result.raise_for_status()\n except requests.exceptions.HTTPError as err:\n logging.error(err.response.content)\n raise err\n return request_result\n\n def _upload_attachments_wiki_api(self, attachments: List):\n request_url = f'{self.gitlabUrl}/attachments'\n headers = {'PRIVATE-TOKEN': self.token}\n list_of_attachment_references: List[dict] = []\n for attachment in attachments:\n logging.info(f'Adding attachment {attachment[\"filename\"]}')\n try:\n req = requests.post(\n request_url,\n headers=headers,\n files={\n 'file': (\n attachment[\"filename\"],\n attachment[\"content\"]\n )\n })\n req.raise_for_status()\n request_as_json = json.loads(req.content.decode('utf-8'))\n list_of_attachment_references.append(request_as_json)\n except requests.exceptions.HTTPError as err:\n logging.error(err)\n self.list_of_attachment_references = list_of_attachment_references\n\n def _download_documents(self):\n # Download the documents from the webserver\n list_of_document_references = list(\n map(lambda attachment: {\"filename\": attachment, \"content\":\n self._get_api(attachment).content}, self.list_of_documents))\n\n validation_doc = list(filter(\n lambda document: \"validation_results\" in document[\"filename\"],\n list_of_document_references))\n if len(validation_doc) != 0:\n validation_json = json.loads(\n validation_doc[0][\"content\"].decode('utf-8'))\n self.validation_json = validation_json\n # Upload documents to Git Lab as attachments\n self._upload_attachments_wiki_api(\n attachments=list_of_document_references)\n\n def _get_list_of_documents(\n self,\n ):\n # Getting the list of documents from the webserver provided\n request_result = self._get_api()\n array_of_documents = list(\n map(lambda document: document[\"name\"], request_result.json()))\n filtered_array_of_documents = list(filter(lambda document_name: '.png'\n in document_name or\n '.json' in document_name or\n '.csv' in document_name,\n array_of_documents))\n self.list_of_documents = filtered_array_of_documents\n\n def setup_wiki(self):\n self._get_list_of_documents()\n self._download_documents()\n","repo_name":"hasanissa25/EEW-Station-Validation","sub_path":"stationverification/utilities/GitLabWikis.py","file_name":"GitLabWikis.py","file_ext":"py","file_size_in_byte":5601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36512233536","text":"import pygame\r\nfrom enum import Enum\r\nfrom collections import namedtuple\r\n\r\nCONFIG = {\r\n 'colors': \r\n {\r\n 'white': (255, 255, 255),\r\n 'black': (0, 0, 0),\r\n 'green': (0, 128, 0),\r\n 'red': (100, 0, 0)\r\n },\r\n 'game':\r\n {\r\n 'width': 640, \r\n 'height': 480,\r\n 'block_size': 20,\r\n 'speed': 40\r\n },\r\n \r\n}\r\n\r\n\r\nPoint = namedtuple('Point', 'x, y')\r\n\r\nclass Direction(Enum):\r\n RIGHT = 1\r\n LEFT = 2\r\n UP = 3\r\n DOWN = 4\r\n \r\npygame.init()\r\nfont = pygame.font.Font(\"assets/Now-Regular.otf\", 25)\r\n","repo_name":"brightonm/COBRA","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"6686293717","text":"import logging\n\nimport torch\n\nimport modulus # noqa: F401 for docs\n\nTensor = torch.Tensor\nlogger = logging.getLogger(\"__name__\")\n\n\ndef check_sequence(\n tensor: Tensor, start_index: int, step_size: int, seq_length: int, axis: int = 0\n) -> bool:\n \"\"\"Checks if tensor has correct sequence. The tensor is expected to have a dimension that represents the sequence. Indexing this dimension should give a tensor of constant ints with the correct sequence number.\n\n Parameters\n ----------\n tensor : Tensor\n tensor to check sequence on.\n start_index : int\n expected value of first tensor in sequence\n step_size : int\n step size in sequence\n seq_length : int\n expected sequence length\n axis : int\n axis of tensor to check sequence on\n\n Returns\n -------\n bool\n Test passed\n \"\"\"\n\n # convert tensors to int list\n tensor_tags = [\n int(tensor.select(axis, i).flatten()[0]) for i in range(tensor.shape[axis])\n ]\n\n # correct seq\n correct_seq = [step_size * i + start_index for i in range(seq_length)]\n\n # check if seq matches epected\n if correct_seq != tensor_tags:\n logger.warning(\"Sequence does not match expected\")\n logger.warning(f\"Expected Sequence: {correct_seq}\")\n logger.warning(f\"Sequence order: {tensor_tags}\")\n return False\n return True\n","repo_name":"NVIDIA/modulus","sub_path":"test/datapipes/common/sequence.py","file_name":"sequence.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":416,"dataset":"github-code","pt":"12"} +{"seq_id":"74840296980","text":"class Anatomy:\r\n def __init__(self):\r\n self.skeleton = self.Skeleton()\r\n self.respiration = self.Respiration()\r\n self.digestion = self.Digestion()\r\n self.urinary = self.Urinary()\r\n self.endocrine = self.Endocrine()\r\n self.reproduction = self.Reproduction()\r\n self.circulation = self.Circulation()\r\n self.lymphatic = self.Lymphatic()\r\n self.nervous = self.Nervous()\r\n self.muscular = self.Muscular()\r\n self.integumentary = self.Integumentary()\r\n\r\n\r\n class Skeleton:\r\n def __init__(self):\r\n self.structure = self.Skeletal_Structure()\r\n self.functions = self.Skeletal_Functions()\r\n\r\n class Skeletal_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n # Skull\r\n self.frontal_bone = None\r\n self.parietal_bone = None\r\n self.temporal_bone = None\r\n self.occipital_bone = None\r\n self.mandible = None\r\n\r\n # Spine\r\n self.cervical_vertebrae = None\r\n self.thoracic_vertebrae = None\r\n self.lumbar_vertebrae = None\r\n self.spine_sacrum = None\r\n self.spine_coccyx = None\r\n\r\n # Rib_Cage\r\n self.true_ribs = None\r\n self.false_ribs = None\r\n self.floating_ribs = None\r\n\r\n # Sternum\r\n self.manubrium = None\r\n self.body = None\r\n self.xiphoid_process = None\r\n\r\n # Upper_Limb\r\n self.clavicle = None\r\n self.scapula = None\r\n self.humerus = None\r\n self.radius = None\r\n self.ulna = None\r\n self.carpals = None\r\n self.metacarpals = None\r\n self.upper_phalangus = None\r\n\r\n # Lower_Limb\r\n self.femur = None\r\n self.patella = None\r\n self.tibia = None\r\n self.fibula = None\r\n self.tarsals = None\r\n self.metatarsals = None\r\n self.lower_phalangus = None\r\n\r\n # Pelvic_Girdle\r\n self.hip_bone = None\r\n self.pelvic_sacrum = None\r\n self.pelvic_coccyx = None\r\n\r\n def skull(self, frontal_bone, parietal_bone, temporal_bone, occipital_bone, mandible):\r\n self.frontal_bone = frontal_bone\r\n self.parietal_bone = parietal_bone\r\n self.temporal_bone = temporal_bone\r\n self.occipital_bone = occipital_bone\r\n self.mandible = mandible\r\n\r\n def spine(self, cervical_vertebrae, thoracic_vertebrae, lumbar_vertebrae, spine_sacrum, spine_coccyx):\r\n self.cervical_vertebrae = cervical_vertebrae\r\n self.thoracic_vertebrae = thoracic_vertebrae\r\n self.lumbar_vertebrae = lumbar_vertebrae\r\n self.sacrum = spine_sacrum\r\n self.coccyx = spine_coccyx\r\n\r\n def rib_cage(self, true_ribs, false_ribs, floating_ribs):\r\n self.true_ribs = true_ribs\r\n self.false_ribs = false_ribs\r\n self.floating_ribs = floating_ribs\r\n\r\n def sternum(self, manubrium, body, xiphoid_process):\r\n self.manubrium = manubrium\r\n self.body = body\r\n self.xiphoid_process = xiphoid_process\r\n\r\n def upper_limb(self, clavicle, scapula, humerus, radius, ulna, carpals, metacarpals, upper_phalangus):\r\n self.clavicle = clavicle\r\n self.scapula = scapula\r\n self.humerus = humerus\r\n self.radius = radius\r\n self.ulna = ulna\r\n self.carpals = carpals\r\n self.metacarpals = metacarpals\r\n self.upper_phalangus = upper_phalangus\r\n\r\n def lower_limb(self, femur, patella, tibia, fibula, tarsals, metatarsals, lower_phalangus):\r\n self.femur = femur\r\n self.patella = patella\r\n self.tibia = tibia\r\n self.fibula = fibula\r\n self.tarsals = tarsals\r\n self.metatarsals = metatarsals\r\n self.lower_phalangus = lower_phalangus\r\n\r\n def pelvic_girdle(self, hip_bone, pelvic_sacrum, pelvic_coccyx):\r\n self.hip_bone = hip_bone\r\n self.pelvic_sacrum = pelvic_sacrum\r\n self.pelvic_coccyx = pelvic_coccyx\r\n\r\n class Skeletal_Functions:\r\n def __init__(self):\r\n self.functions = None\r\n\r\n self.osteogenesis = None\r\n\r\n self.collagen_production = None\r\n self.mineralization = None\r\n\r\n self.osteoclast_activity = None\r\n self.osteoblast_activity = None\r\n\r\n self.synovial_fluid_production = None\r\n self.cartilage_maintenance = None\r\n self.ligament_strength = None\r\n self.tendon_elasticity = None\r\n\r\n self.endochondral_ossification = None\r\n self.intramembranous = None\r\n self.ossification = None\r\n\r\n self.fracture_healing = None\r\n self.cartilage_regeneration = None\r\n\r\n self.calcium_metabolism = None\r\n self.vitaminD_receptor = None\r\n self.hormonal_regulation = None\r\n\r\n self.collagen_crosslinking = None\r\n self.mineral_composition = None\r\n\r\n def bone_formation(self, osteogenesis):\r\n self.osteogenesis = osteogenesis\r\n\r\n def bone_structure(self, collagen_production, mineralization):\r\n self.collagen_production = collagen_production\r\n self.mineralization = mineralization\r\n\r\n def bone_remodeling(self, osteoclast_activity, osteoblast_activity):\r\n self.osteoclast_activity = osteoclast_activity\r\n self.osteoblast_activity = osteoblast_activity\r\n\r\n def joint_functions(self, synovial_fluid_production, cartilage_maintenance, ligament_strength,\r\n tendon_elasticity):\r\n self.synovial_fluid_production = synovial_fluid_production\r\n self.cartilage_maintenance = cartilage_maintenance\r\n self.ligament_strength = ligament_strength\r\n self.tendon_elasticity = tendon_elasticity\r\n\r\n def skeletal_growth(self, endochondral_ossification, intramembranous, ossification):\r\n self.endochondral_ossification = endochondral_ossification\r\n self.intramembranous = intramembranous\r\n self.ossification = ossification\r\n\r\n def bone_repair(self, fracture_healing, cartilage_regeneration):\r\n self.fracture_healing = fracture_healing\r\n self.cartilage_regeneration = cartilage_regeneration\r\n\r\n def bone_density(self, calcium_metabolism, vitaminD_receptor, hormonal_regulation):\r\n self.calcium_metabolism = calcium_metabolism\r\n self.vitaminD_receptor = vitaminD_receptor\r\n self.hormonal_regulation = hormonal_regulation\r\n\r\n def bone_strength(self, collagen_crosslinking, mineral_composition):\r\n self.collagen_crosslinking = collagen_crosslinking\r\n self.mineral_composition = mineral_composition\r\n\r\n class Respiration:\r\n def __init__(self):\r\n self.structure = self.Respiratory_Structure()\r\n self.functions = self.Respiratory_Functions()\r\n\r\n class Respiratory_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.external_nares = None\r\n self.nasal_septum = None\r\n self.nasal_conchae = None\r\n self.nasal_mucosa = None\r\n self.olfactory_epithelium = None\r\n\r\n self.nasopharynx = None\r\n self.oropharynx = None\r\n self.laryngopharnx = None\r\n\r\n self.epiglottis = None\r\n self.vocal_cord = None\r\n\r\n self.trachea = None\r\n\r\n self.main_bronchi = None\r\n self.bronchioles = None\r\n self.alveolar_ducts = None\r\n self.alveoli = None\r\n\r\n self.lobes = None\r\n self.pleura = None\r\n self.bronchopulmonary_segments = None\r\n\r\n self.diaphragm = None\r\n\r\n def nasal_cavity(self, external_nares, nasal_septum, nasal_conchae, nasal_mucosa, olfactory_epithelium):\r\n self.external_nares = external_nares\r\n self.nasal_septum = nasal_septum\r\n self.nasal_conchae = nasal_conchae\r\n self.nasal_mucosa = nasal_mucosa\r\n self.olfactory_epithelium = olfactory_epithelium\r\n\r\n def pharynx(self, nasopharynx, oropharynx, laryngopharnx):\r\n self.nasopharynx = nasopharynx\r\n self.oropharynx = oropharynx\r\n self.laryngopharnx = laryngopharnx\r\n\r\n def larynx(self, epiglottis, vocal_cord):\r\n self.epiglottis = epiglottis\r\n self.vocal_cord = vocal_cord\r\n\r\n def bronchial_tree(self, main_bronchi, bronchioles, alveolar_ducts, alveoli):\r\n self.main_bronchi = main_bronchi\r\n self.bronchioles = bronchioles\r\n self.alveolar_ducts = alveolar_ducts\r\n self.alveoli = alveoli\r\n\r\n def lungs(self, lobes, pleura, bronchopulmonary_segments):\r\n self.lobes = lobes\r\n self.pleura = pleura\r\n self.bronchopulmonary_segments = bronchopulmonary_segments\r\n\r\n class Respiratory_Functions:\r\n def __init__(self):\r\n self.functions = None\r\n\r\n self.diaphragmatic_contraction = None\r\n self.intercostal_muscle_action = None\r\n\r\n self.pulmonary_ventilation = None\r\n self.alveolar_capillary_diffusion = None\r\n\r\n self.hemoglobin_function = None\r\n\r\n self.bicarbonate_ion_formation = None\r\n self.red_blood_cell_carbonic_anhydrase = None\r\n\r\n self.brainstem_respiratory_centers = None\r\n self.chemoreceptor_sensitivity = None\r\n\r\n self.mucociliary_escalator = None\r\n self.cough_mechanism = None\r\n\r\n self.elastic_fibers = None\r\n self.surfactant_production = None\r\n\r\n self.mucosal_immunity = None\r\n self.alveolar_macrophages = None\r\n self.ciliary_escalator = None\r\n self.mucus_production = None\r\n\r\n self.cough_reflex = None\r\n self.sneezing_reflex = None\r\n\r\n def breathing(self, diaphragmatic_contraction, intercostal_muscle_action):\r\n self.diaphragmatic_contraction = diaphragmatic_contraction\r\n self.intercostal_muscle_action = intercostal_muscle_action\r\n\r\n def gas_exchange(self, pulmonary_ventilation, alveolar_capillary_diffusion):\r\n self.pulmonary_ventilation = pulmonary_ventilation\r\n self.alveolar_capillary_diffusion = alveolar_capillary_diffusion\r\n\r\n def oxygen_transportation(self, hemoglobin_function):\r\n self.hemoglobin_function = hemoglobin_function\r\n\r\n def carbon_dioxide_transportation(self, bicarbonate_ion_formation, red_blood_cell_carbonic_anhydrase):\r\n self.bicarbonate_ion_formation = bicarbonate_ion_formation\r\n self.red_blood_cell_carbonic_anhydrase = red_blood_cell_carbonic_anhydrase\r\n\r\n def respiratory_control(self, brainstem_respiratory_centers, chemoreceptor_sensitivity):\r\n self.brainstem_respiratory_centers = brainstem_respiratory_centers\r\n self.chemoreceptor_sensitivity = chemoreceptor_sensitivity\r\n\r\n def airway_clearance(self, mucociliary_escalator, cough_mechanism):\r\n self.mucociliary_escalator = mucociliary_escalator\r\n self.cough_mechanism = cough_mechanism\r\n\r\n def lung_compliance(self, elastic_fibers, surfactant_production):\r\n self.elastic_fibers = elastic_fibers\r\n self.surfactant_production = surfactant_production\r\n\r\n def pulmonary_defence(self, mucosal_immunity, alveolar_macrophages, ciliary_escalator, mucus_production):\r\n self.mucosal_immunity = mucosal_immunity\r\n self.alveolar_macrophages = alveolar_macrophages\r\n self.ciliary_escalator = ciliary_escalator\r\n self.mucus_production = mucus_production\r\n\r\n def respiratory_reflexes(self, cough_reflex, sneezing_reflex):\r\n self.cough_reflex = cough_reflex\r\n self.sneezing_reflex = sneezing_reflex\r\n\r\n class Digestion:\r\n def __inti__(self):\r\n self.structure = self.Digestive_Structure()\r\n self.functions = self.Digestive_Functions()\r\n\r\n class Digestive_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.teeth = None\r\n self.tongue = None\r\n self.salivary_glands = None\r\n\r\n self.pharynx = None\r\n self.esophagus = None\r\n self.stomach = None\r\n self.liver = None\r\n self.gallbladder = None\r\n self.pancreases = None\r\n\r\n self.cardiac_region = None\r\n self.fundas = None\r\n self.body = None\r\n self.pylorus = None\r\n\r\n self.dudenum = None\r\n self.jejunum = None\r\n self.ileum = None\r\n\r\n self.cecum = None\r\n self.ascending_colon = None\r\n self.descending_colon = None\r\n self.transverse_colon = None\r\n self.sigmoid_colon = None\r\n self.rectum = None\r\n self.anus = None\r\n\r\n def oral_cavity(self, teeth, tongue, salivary_glands):\r\n self.teeth = teeth\r\n self.tongue = tongue\r\n self.salivary_glands = salivary_glands\r\n\r\n def stomach(self, cardiac_region, fundas, body, pylorus):\r\n self.cardiac_region = cardiac_region\r\n self.fundas = fundas\r\n self.body = body\r\n self.pylorus = pylorus\r\n\r\n def small_intestine(self, dudenum, jejunum, ileum):\r\n self.dudenum = dudenum\r\n self.jejunum = jejunum\r\n self.ileum = ileum\r\n\r\n def large_intestine(self, cecum, ascending_colon, descending_colon, transverse_colon, sigmoid_colon, rectum,\r\n anus):\r\n self.cecum = cecum\r\n self.ascending_colon = ascending_colon\r\n self.descending_colon = descending_colon\r\n self.transverse_colon = transverse_colon\r\n self.sigmoid_colon = sigmoid_colon\r\n self.rectum = rectum\r\n self.anus = anus\r\n\r\n class Digestive_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.enzymatic_digestion = None\r\n self.mechanic_digestion = None\r\n\r\n self.nutrient_absorption = None\r\n self.water_absorption = None\r\n self.bacterial_fermentation = None\r\n\r\n self.gastric_acid_secretion = None\r\n self.enzyme_secretion = None\r\n self.bicarbonate_secretion = None\r\n\r\n self.peristalsis = None\r\n self.segmentation = None\r\n\r\n self.gastric_emptying = None\r\n self.gastric_acid_regulation = None\r\n\r\n self.detoxification = None\r\n self.bile_production = None\r\n\r\n self.bile_storage = None\r\n self.bile_release = None\r\n\r\n def digestion(self, enzymatic_digestion, mechanic_digestion):\r\n self.enzymatic_digestion = enzymatic_digestion\r\n self.mechanic_digestion = mechanic_digestion\r\n\r\n def intestinal_functions(self, nutrient_absorption, water_absorption, bacterial_fermentation):\r\n self.nutrient_absorption = nutrient_absorption\r\n self.water_absorption = water_absorption\r\n self.bacterial_fermentation = bacterial_fermentation\r\n\r\n def motility(self, peristalsis, segmentation):\r\n self.peristalsis = peristalsis\r\n self.segmentation = segmentation\r\n\r\n def gastric_functions(self, gastric_emptying, gastric_acid_regulation):\r\n self.gastric_emptying = gastric_emptying\r\n self.gastric_acid_regulation = gastric_acid_regulation\r\n\r\n def liver_functions(self, detoxification, bile_production):\r\n self.detoxification = detoxification\r\n self.bile_production = bile_production\r\n\r\n def pancreatic_functions(self, gastric_acid_secretion, enzyme_secretion, bicarbonate_secretion):\r\n self.gastric_acid_secretion = gastric_acid_secretion\r\n self.enzyme_secretion = enzyme_secretion\r\n self.bicarbonate_secretion = bicarbonate_secretion\r\n\r\n def gallbladder_functions(self, bile_storage, bile_release):\r\n self.bile_storage = bile_storage\r\n self.bile_release = bile_release\r\n\r\n class Urinary:\r\n def __init__(self):\r\n self.structure = self.Urinary_Structure()\r\n self.functions = self.Urinary_Functions()\r\n\r\n class Urinary_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.kidney = None\r\n self.ureters = None\r\n self.urinary_bladder = None\r\n self.urethra = None\r\n\r\n class Urinary_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.glomerular_filtration = None\r\n self.tubular_reabsorption = None\r\n self.tubular_secretion = None\r\n\r\n self.nephron_function = None\r\n self.renal_blood_flow = None\r\n self.fluid_balance = None\r\n\r\n self.ureter_peristalsis = None\r\n self.bladder_contraction = None\r\n self.urination = None\r\n\r\n self.sodium_balance = None\r\n self.potassium_balance = None\r\n self.calcium_balance = None\r\n self.phosphate_balance = None\r\n\r\n self.renal_tubular_acidosis = None\r\n self.metabolic_alkalosis = None\r\n self.respiratory_acidosis = None\r\n\r\n self.mucus_secretion = None\r\n self.urine_flow = None\r\n self.antimicrobial_peptides = None\r\n\r\n def filtration(self, glomerular_filtration, tubular_reabsorption, tubular_secretion):\r\n self.glomerular_filtration = glomerular_filtration\r\n self.tubular_reabsorption = tubular_reabsorption\r\n self.tubular_secretion = tubular_secretion\r\n\r\n def urine_formation(self, nephron_function, renal_blood_flow, fluid_balance):\r\n self.nephron_function = nephron_function\r\n self.renal_blood_flow = renal_blood_flow\r\n self.fluid_balance = fluid_balance\r\n\r\n def urinary_transport(self, ureter_peristalsis, bladder_contraction, urination):\r\n self.ureter_peristalsis = ureter_peristalsis\r\n self.bladder_contraction = bladder_contraction\r\n self.urination = urination\r\n\r\n def electrolyte_balance(self, sodium_balance, potassium_balance, calcium_balance, phosphate_balance):\r\n self.sodium_balance = sodium_balance\r\n self.potassium_balance = potassium_balance\r\n self.calcium_balance = calcium_balance\r\n self.phosphate_balance = phosphate_balance\r\n\r\n def acid_base_balance(self, renal_tubular_acidosis, metabolic_alkalosis, respiratory_acidosis):\r\n self.renal_tubular_acidosis = renal_tubular_acidosis\r\n self.metabolic_alkalosis = metabolic_alkalosis\r\n self.respiratory_acidosis = respiratory_acidosis\r\n\r\n def urinary_tract_defence(self, mucus_secretion, urine_flow, antimicrobial_peptides):\r\n self.mucus_secretion = mucus_secretion\r\n self.urine_flow = urine_flow\r\n self.antimicrobial_peptides = antimicrobial_peptides\r\n\r\n class Endocrine:\r\n def __init__(self):\r\n self.structure = self.Endocrine_Structure()\r\n self.functions = self.Endocrine_Functions()\r\n\r\n class Endocrine_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.pituitary_glands = None\r\n self.thyroid_glands = None\r\n self.parathyroid_glands = None\r\n self.adrenal_glands = None\r\n self.pancreases = None\r\n self.ovaries = None\r\n self.testes = None\r\n self.pineal_gland = None\r\n self.thymus = None\r\n\r\n class Endocrine_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.thyroid_hormone = None\r\n self.adrenal_hormone = None\r\n self.insulin = None\r\n self.growth_hormone = None\r\n self.prolactin = None\r\n self.follicle_stimulating_hormone = None\r\n self.luteinizing_hormone = None\r\n\r\n self.hypothalamic_pituitary_axis = None\r\n self.feedback_mechanism = None\r\n\r\n self.thyroid_glands = None\r\n self.adrenal_glands = None\r\n self.pituitary_glands = None\r\n self.pineal_glands = None\r\n self.pancreases = None\r\n\r\n self.thyroid_hormone_receptor = None\r\n self.insulin_receptor = None\r\n self.growth_hormone_receptor = None\r\n self.estrogen_receptor = None\r\n self.testosterone_receptor = None\r\n\r\n self.cAMP_signaling_pathway = None\r\n self.MAPK_signaling_pathway = None\r\n self.ERK_signaling_pathway = None\r\n self.JAK_signaling_pathway = None\r\n self.STAT_signaling_pathway = None\r\n self.PI3K_signaling_pathway = None\r\n self.Akt_signaling_pathway = None\r\n\r\n def hormone_production(self, thyroid_hormone, adrenal_hormone, insulin, growth_hormone, prolactin,\r\n follicle_stimulating_hormone, luteinizing_hormone):\r\n self.thyroid_hormone = thyroid_hormone\r\n self.adrenal_hormone = adrenal_hormone\r\n self.insulin = insulin\r\n self.growth_hormone = growth_hormone\r\n self.prolactin = prolactin\r\n self.follicle_stimulating_hormone = follicle_stimulating_hormone\r\n self.luteinizing_hormone = luteinizing_hormone\r\n\r\n def hormone_regulation(self, hypothalamic_pituitary_axis, feedback_mechanism):\r\n self.hypothalamic_pituitary_axis = hypothalamic_pituitary_axis\r\n self.feedback_mechanism = feedback_mechanism\r\n\r\n def endocrine_glands(self, thyroid_glands, adrenal_glands, pituitary_glands, pineal_glands, pancreases):\r\n self.thyroid_glands = thyroid_glands\r\n self.adrenal_glands = adrenal_glands\r\n self.pituitary_glands = pituitary_glands\r\n self.pineal_glands = pineal_glands\r\n self.pancreases = pancreases\r\n\r\n def hormone_receptors(self, thyroid_hormone_receptor, insulin_receptor, growth_hormone_receptor,\r\n estrogen_receptor, testosterone_receptor):\r\n self.thyroid_hormone_receptor = thyroid_hormone_receptor\r\n self.insulin_receptor = insulin_receptor\r\n self.growth_hormone_receptor = growth_hormone_receptor\r\n self.estrogen_receptor = estrogen_receptor\r\n self.testosterone_receptor = testosterone_receptor\r\n\r\n class Reproduction:\r\n def __init__(self):\r\n self.functions = self.Reproductive_functions()\r\n self.structure = self.Reproductive_Structure()\r\n\r\n class Reproductive_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.testes = None\r\n self.epididymis = None\r\n self.vas_deferens = None\r\n self.seminal_vasicles = None\r\n self.prostate_glands = None\r\n self.bulbourethral_glands = None\r\n self.penis = None\r\n self.scrotum = None\r\n\r\n self.ovaries = None\r\n self.fallopian_tubes = None\r\n self.uterus = None\r\n self.cervix = None\r\n self.vagina = None\r\n self.vulva = None\r\n\r\n def male_reproductive_organ(self, testes, epididymis, vas_deferens, seminal_vasicles, prostate_glands,\r\n bulbourethral_glands, penis, scrotum):\r\n self.testes = testes\r\n self.epididymis = epididymis\r\n self.vas_deferens = vas_deferens\r\n self.seminal_vasicles = seminal_vasicles\r\n self.prostate_glands = prostate_glands\r\n self.bulbourethral_glands = bulbourethral_glands\r\n self.penis = penis\r\n self.scrotum = scrotum\r\n\r\n def female_reproductive_organ(self, ovaries, fallopian_tubes, uterus, cervix, vagina, vulva):\r\n self.ovaries = ovaries\r\n self.fallopian_tubes = fallopian_tubes\r\n self.uterus = uterus\r\n self.cervix = cervix\r\n self.vagina = vagina\r\n self.vulva = vulva\r\n\r\n class Reproductive_functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.spermatogenesis = None\r\n self.oogenesis = None\r\n\r\n self.testosterone_production = None\r\n self.estrogen_production = None\r\n self.progesterone_production = None\r\n self.follicle_stimulating_hormone_production = None\r\n self.luteinizing_hormone_production = None\r\n\r\n self.sperm_oocyte_interaction = None\r\n\r\n self.blastocyte_formation = None\r\n self.gastrulation = None\r\n self.organogenesis = None\r\n\r\n self.implantation = None\r\n self.placenta_development = None\r\n self.embryonic_development = None\r\n self.fetal_development = None\r\n\r\n self.uterine_contraction = None\r\n self.cervical_dilation = None\r\n self.fetus_expulsion = None\r\n\r\n self.follicular_phase = None\r\n self.ovulation = None\r\n self.luteal_phase = None\r\n self.menstruation = None\r\n\r\n self.hormonal_change = None\r\n self.ovarian_function_decline = None\r\n\r\n self.sperm_production = None\r\n self.egg_production = None\r\n self.sperm_maturation = None\r\n\r\n def gametogenesis(self, spermatogenesis, oogenesis):\r\n self.spermatogenesis = spermatogenesis\r\n self.oogenesis = oogenesis\r\n\r\n def hormone_production(self, testosterone_production, estrogen_production, progesterone_production,\r\n follicle_stimulating_hormone_production, luteinizing_hormone_production):\r\n self.testosterone_production = testosterone_production\r\n self.estrogen_production = estrogen_production\r\n self.progesterone_production = progesterone_production\r\n self.follicle_stimulating_hormone_production = follicle_stimulating_hormone_production\r\n self.luteinizing_hormone_production = luteinizing_hormone_production\r\n\r\n def fertilization(self, sperm_oocyte_interaction):\r\n self.sperm_oocyte_interaction = sperm_oocyte_interaction\r\n\r\n def embryonic_development(self, blastocyte_formation, gastrulation, organogenesis):\r\n self.blastocyte_formation = blastocyte_formation\r\n self.gastrulation = gastrulation\r\n self.organogenesis = organogenesis\r\n\r\n def pregrency(self, implantation, placenta_development, embryonic_development, fetal_development):\r\n self.implantation = implantation\r\n self.placenta_development = placenta_development\r\n self.embryonic_development = embryonic_development\r\n self.fetal_development = fetal_development\r\n\r\n def labor_delivery(self, uterine_contraction, cervical_dilation, fetus_expulsion):\r\n self.uterine_contraction = uterine_contraction\r\n self.cervical_dilation = cervical_dilation\r\n self.fetus_expulsion = fetus_expulsion\r\n\r\n def menstruation_cycle(self, follicular_phase, ovulation, luteal_phase, menstruation):\r\n self.follicular_phase = follicular_phase\r\n self.ovulation = ovulation\r\n self.luteal_phase = luteal_phase\r\n self.menstruation = menstruation\r\n\r\n def menopause(self, hormonal_change, ovarian_function_decline):\r\n self.hormonal_change = hormonal_change\r\n self.ovarian_function_decline = ovarian_function_decline\r\n\r\n def gametes_production(self, sperm_production, sperm_maturation, egg_production):\r\n self.sperm_production = sperm_production\r\n self.egg_production = egg_production\r\n self.sperm_maturation = sperm_maturation\r\n\r\n class Circulation:\r\n def __init__(self):\r\n self.structure = self.Circulatory_Structure()\r\n self.functions = self.Circulatory_Functions()\r\n\r\n class Circulatory_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.heart = None\r\n\r\n self.aorta = None\r\n self.pulmonary_arteries = None\r\n self.coronary_arteries = None\r\n\r\n self.superior_vena_cava = None\r\n self.inferior_vena_cava = None\r\n self.pulmonary_veins = None\r\n self.coronary_sinus = None\r\n\r\n self.capillaries = None\r\n\r\n self.red_blood_cells = None\r\n self.white_blood_cells = None\r\n self.platelets = None\r\n\r\n def arteries(self, aorta, pulmonary_arteries, coronary_arteries):\r\n self.aorta = aorta\r\n self.pulmonary_arteries = pulmonary_arteries\r\n self.coronary_arteries = coronary_arteries\r\n\r\n def veins(self, superior_vena_cava, inferior_vena_cava, pulmonary_veins, coronary_sinus):\r\n self.superior_vena_cava = superior_vena_cava\r\n self.inferior_vena_cava = inferior_vena_cava\r\n self.pulmonary_veins = pulmonary_veins\r\n self.coronary_sinus = coronary_sinus\r\n\r\n def blood(self, red_blood_cells, white_blood_cells, platelets):\r\n self.red_blood_cells = red_blood_cells\r\n self.white_blood_cells = white_blood_cells\r\n self.platelets = platelets\r\n\r\n class Circulatory_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.cardiac_muscle = None\r\n self.conduction_system = None\r\n\r\n self.arteries = None\r\n self.veins = None\r\n self.capillaries = None\r\n\r\n self.erythropoiesis = None\r\n self.leukopoiesis = None\r\n self.thrombopoiesis = None\r\n\r\n self.red_blood_cells = None\r\n self.white_blood_cells = None\r\n self.platelets = None\r\n\r\n self.plasma = None\r\n self.proteins = None\r\n self.electrolytes = None\r\n\r\n self.coagulation_factor = None\r\n self.fibrinogen = None\r\n self.platelet_aggregation = None\r\n\r\n self.renin_angiotensin_system = None\r\n self.baroreceptor_reflex = None\r\n\r\n self.oxygen_transport = None\r\n self.hemoglobin = None\r\n\r\n self.insulin = None\r\n self.glucagon = None\r\n\r\n def heart_functions(self, cardiac_muscle, conduction_system):\r\n self.cardiac_muscle = cardiac_muscle\r\n self.conduction_system = conduction_system\r\n\r\n def blood_vessels_structure(self, arteries, veins, capillaries):\r\n self.arteries = arteries\r\n self.veins = veins\r\n self.capillaries = capillaries\r\n\r\n def blood_cell_production(self, erythropoiesis, leukopoiesis, thrombopoiesis):\r\n self.erythropoiesis = erythropoiesis\r\n self.leukopoiesis = leukopoiesis\r\n self.thrombopoiesis = thrombopoiesis\r\n\r\n def blood_cell_types(self, red_blood_cells, white_blood_cells, platelets):\r\n self.red_blood_cells = red_blood_cells\r\n self.white_blood_cells = white_blood_cells\r\n self.platelets = platelets\r\n\r\n def blood_composition(self, plasma, proteins, electrolytes):\r\n self.plasma = plasma\r\n self.proteins = proteins\r\n self.electrolytes = electrolytes\r\n\r\n def blood_clotting(self, coagulation_factor, fibrinogen, platelet_aggregation):\r\n self.coagulation_factor = coagulation_factor\r\n self.fibrinogen = fibrinogen\r\n self.platelet_aggregation = platelet_aggregation\r\n\r\n def blood_pressure_regulation(self, renin_angiotensin_system, baroreceptor_reflex):\r\n self.renin_angiotensin_system = renin_angiotensin_system\r\n self.baroreceptor_reflex = baroreceptor_reflex\r\n\r\n def blood_oxygenation(self, oxygen_transport, hemoglobin):\r\n self.oxygen_transport = oxygen_transport\r\n self.hemoglobin = hemoglobin\r\n\r\n def blood_sugar_regulation(self, insulin, glucagon):\r\n self.insulin = insulin\r\n self.glucagon = glucagon\r\n\r\n class Lymphatic:\r\n def __init__(self):\r\n self.structure = self.Lymphatic_Structure()\r\n self.functions = self.Lymphatic_Functions()\r\n\r\n class Lymphatic_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.lymphatic_vessels = None\r\n self.lymph_nodes = None\r\n self.tonsils = None\r\n self.spleen = None\r\n self.thymus = None\r\n self.bone_marrow = None\r\n\r\n class Lymphatic_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.b_cell_development = None\r\n self.t_cell_development = None\r\n self.natural_killer_cell_development = None\r\n\r\n self.cortex = None\r\n self.medulla = None\r\n self.sinuses = None\r\n\r\n self.lymph_flow = None\r\n self.lymphatic_drainage = None\r\n\r\n self.antigen_recognition = None\r\n self.antibody_production = None\r\n self.cell_mediated_immunity = None\r\n self.inflammation = None\r\n\r\n self.lymph_node_filtration = None\r\n self.spleen_filtration = None\r\n\r\n self.chemotaxis = None\r\n self.homing = None\r\n\r\n self.interstitial_fluid_drainage = None\r\n self.fat_absorption = None\r\n\r\n self.thymus = None\r\n self.tonsils = None\r\n self.spleen = None\r\n\r\n def lymphocyte_production(self, b_cell_development, t_cell_development, natural_killer_cell_development):\r\n self.b_cell_development = b_cell_development\r\n self.t_cell_development = t_cell_development\r\n self.natural_killer_cell_development = natural_killer_cell_development\r\n\r\n def lymph_node(self, cortex, medulla, sinuses):\r\n self.cortex = cortex\r\n self.medulla = medulla\r\n self.sinuses = sinuses\r\n\r\n def lymphatic_vessels(self, lymph_flow, lymphatic_drainage):\r\n self.lymph_flow = lymph_flow\r\n self.lymphatic_drainage = lymphatic_drainage\r\n\r\n def immune_response(self, antigen_recognition, antibody_production, cell_mediated_immunity, inflammation):\r\n self.antigen_recognition = antigen_recognition\r\n self.antibody_production = antibody_production\r\n self.cell_mediated_immunity = cell_mediated_immunity\r\n self.inflammation = inflammation\r\n\r\n def lymphatic_filtration(self, lymph_node_filtration, spleen_filtration):\r\n self.lymph_node_filtration = lymph_node_filtration\r\n self.spleen_filtration = spleen_filtration\r\n\r\n def immune_cell_migration(self, chemotaxis, homing):\r\n self.chemotaxis = chemotaxis\r\n self.homing = homing\r\n\r\n def lymphatic_transport(self, interstitial_fluid_drainage, fat_absorption):\r\n self.interstitial_fluid_drainage = interstitial_fluid_drainage\r\n self.fat_absorption = fat_absorption\r\n\r\n def lymphoid_organs(self, thymus, tonsils, spleen):\r\n self.thymus = thymus\r\n self.tonsils = tonsils\r\n self.spleen = spleen\r\n\r\n class Nervous:\r\n def __init__(self):\r\n self.structure = self.Nervous_Structure()\r\n self.functions = self.Nervous_Functions()\r\n\r\n class Nervous_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.brain = None\r\n self.spinal_cord = None\r\n\r\n self.cranial_nerves = None\r\n self.spinal_nerves = None\r\n self.autonomic_nerves = None\r\n\r\n self.photoreceptors = None\r\n self.thermoreceptors = None\r\n self.mechanoreceptors = None\r\n self.chemoreceptors = None\r\n self.nociceptors = None\r\n\r\n self.motor_neurons = None\r\n self.sensory_neurons = None\r\n self.interneurons = None\r\n\r\n self.dopamine = None\r\n self.serotonin = None\r\n self.acetylcholine = None\r\n self.gaba = None\r\n self.glutomate = None\r\n\r\n def central_nervous_system(self, brain, spinal_cord):\r\n self.brain = brain\r\n self.spinal_cord = spinal_cord\r\n\r\n def peripheral_nervous_system(self, cranial_nerves, spinal_nerves, autonomic_nerves):\r\n self.cranial_nerves = cranial_nerves\r\n self.spinal_nerves = spinal_nerves\r\n self.autonomic_nerves = autonomic_nerves\r\n\r\n def sensory_receptors(self, photoreceptors, thermoreceptors, mechanoreceptors, chemoreceptors, nociceptors):\r\n self.photoreceptors = photoreceptors\r\n self.thermoreceptors = thermoreceptors\r\n self.mechanoreceptors = mechanoreceptors\r\n self.chemoreceptors = chemoreceptors\r\n self.nociceptors = nociceptors\r\n\r\n def neurons(self, motor_neurons, sensory_neurons, interneurons):\r\n self.motor_neurons = motor_neurons\r\n self.sensory_neurons = sensory_neurons\r\n self.interneurons = interneurons\r\n\r\n def neurotransmitters(self, dopamine, serotonin, acetylcholine, gaba, glutomate):\r\n self.dopamine = dopamine\r\n self.serotonin = serotonin\r\n self.acetylcholine = acetylcholine\r\n self.gaba = gaba\r\n self.glutomate = glutomate\r\n\r\n class Nervous_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.neurogenesis = None\r\n self.axon_guidance = None\r\n self.synaptogenesis = None\r\n\r\n self.dopamine = None\r\n self.serotonin = None\r\n self.acetylcholine = None\r\n self.gaba = None\r\n self.glutomate = None\r\n\r\n self.action_potential = None\r\n self.synaptic_vesicle_release = None\r\n self.neurotransmitter_binding = None\r\n\r\n self.neural_connectivity = None\r\n self.synaptic_plasticity = None\r\n self.neural_network_formation = None\r\n\r\n self.vision = None\r\n self.hearing = None\r\n self.touch = None\r\n self.taste = None\r\n self.smell = None\r\n\r\n self.voluntary_movements = None\r\n self.reflexes = None\r\n self.balance = None\r\n self.coordination = None\r\n\r\n self.memory = None\r\n self.attention = None\r\n self.learning = None\r\n self.language = None\r\n self.problem_solving = None\r\n\r\n self.alzheimer_disease = None\r\n self.parkinson_disease = None\r\n self.multiple_sclerosis = None\r\n self.epilepsy = None\r\n\r\n def neuron_development(self, neurogenesis, axon_guidance, synaptogenesis):\r\n self.neurogenesis = neurogenesis\r\n self.axon_guidance = axon_guidance\r\n self.synaptogenesis = synaptogenesis\r\n\r\n def neurotransmitters(self, dopamine, serotonin, acetylcholine, gaba, glutomate):\r\n self.dopamine = dopamine\r\n self.serotonin = serotonin\r\n self.acetylcholine = acetylcholine\r\n self.gaba = gaba\r\n self.glutomate = glutomate\r\n\r\n def synaptic_transmission(self, action_potential, synaptic_vesicle_release, neurotransmitter_binding):\r\n self.action_potential = action_potential\r\n self.synaptic_vesicle_release = synaptic_vesicle_release\r\n self.neurotransmitter_binding = neurotransmitter_binding\r\n\r\n def neural_circuit_formation(self, neural_connectivity, synaptic_plasticity, neural_network_formation):\r\n self.neural_connectivity = neural_connectivity\r\n self.synaptic_plasticity = synaptic_plasticity\r\n self.neural_network_formation = neural_network_formation\r\n\r\n def sensory_processing(self, vision, hearing, touch, taste, smell):\r\n self.vision = vision\r\n self.hearing = hearing\r\n self.touch = touch\r\n self.taste = taste\r\n self.smell = smell\r\n\r\n def motor_control(self, voluntary_movements, reflexes, balance, coordination):\r\n self.voluntary_movements = voluntary_movements\r\n self.reflexes = reflexes\r\n self.balance = balance\r\n self.coordination = coordination\r\n\r\n def cognitive_functions(self, memory, attention, learning, language, problem_solving):\r\n self.memory = memory\r\n self.attention = attention\r\n self.learning = learning\r\n self.language = language\r\n self.problem_solving = problem_solving\r\n\r\n def neurological_disorders(self, alzheimer_disease, parkinson_disease, multiple_sclerosis, epilepsy):\r\n self.alzheimer_disease = alzheimer_disease\r\n self.parkinson_disease = parkinson_disease\r\n self.multiple_sclerosis = multiple_sclerosis\r\n self.epilepsy = epilepsy\r\n\r\n class Muscular:\r\n def __init__(self):\r\n self.structure = self.Muscular_Structure()\r\n self.functions = self.Muscular_Functions()\r\n\r\n class Muscular_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.head_muscle = None\r\n self.neck_muscle = None\r\n self.upper_limb_muscle = None\r\n self.lower_limb_muscle = None\r\n self.trunk_muscle = None\r\n\r\n self.digestive_muscle = None\r\n self.respiratory_muscle = None\r\n self.blood_vessels_muscle = None\r\n self.reproductive_muscle = None\r\n\r\n self.cardiac_muscle = None\r\n\r\n def skeletal_muscle(self, head_muscle, neck_muscle, upper_limb_muscle, lower_limb_muscle, trunk_muscle):\r\n self.head_muscle = head_muscle\r\n self.neck_muscle = neck_muscle\r\n self.upper_limb_muscle = upper_limb_muscle\r\n self.lower_limb_muscle = lower_limb_muscle\r\n self.trunk_muscle = trunk_muscle\r\n\r\n def smooth_muscle(self, digestive_muscle, respiratory_muscle, blood_vessels_muscle, reproductive_muscle):\r\n self.digestive_muscle = digestive_muscle\r\n self.respiratory_muscle = respiratory_muscle\r\n self.blood_vessels_muscle = blood_vessels_muscle\r\n self.reproductive_muscle = reproductive_muscle\r\n\r\n class Muscular_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.myogenesis = None\r\n self.muscle_fiber_formation = None\r\n\r\n self.actin = None\r\n self.myosin = None\r\n self.troponin = None\r\n self.tropomyosin = None\r\n\r\n self.atp_synthesis = None\r\n self.glycolysis = None\r\n self.oxidative_phosphorylation = None\r\n\r\n self.type_1_fibers = None\r\n self.type_2_fibers = None\r\n\r\n self.satellite_cell = None\r\n self.myostatin = None\r\n self.insulin = None\r\n\r\n self.muscle_motor_recruitment = None\r\n self.muscle_synergies = None\r\n\r\n self.muscular_dystrophy = None\r\n self.myasthenia_gravis = None\r\n self.rhabdomyolysis = None\r\n\r\n def muscle_development(self, myogenesis, muscle_fiber_formation):\r\n self.myogenesis = myogenesis\r\n self.muscle_fiber_formation = muscle_fiber_formation\r\n\r\n def muscle_contraction(self, actin, myosin, troponin, tropomyosin):\r\n self.actin = actin\r\n self.myosin = myosin\r\n self.troponin = troponin\r\n self.tropomyosin = tropomyosin\r\n\r\n def energy_metabolism(self, atp_synthesis, glycolysis, oxidative_phosphorylation):\r\n self.atp_synthesis = atp_synthesis\r\n self.glycolysis = glycolysis\r\n self.oxidative_phosphorylation = oxidative_phosphorylation\r\n\r\n def muscle_fiber(self, type_1_fibers, type_2_fibers):\r\n self.type_1_fibers = type_1_fibers\r\n self.type_2_fibers = type_2_fibers\r\n\r\n def muscle_growth(self, satellite_cell, myostatin, insulin):\r\n self.satellite_cell = satellite_cell\r\n self.myostatin = myostatin\r\n self.insulin = insulin\r\n\r\n def muscle_coordination(self, muscle_motor_recruitment, muscle_synergies):\r\n self.muscle_motor_recruitment = muscle_motor_recruitment\r\n self.muscle_synergies = muscle_synergies\r\n\r\n def muscle_disorder(self, muscular_dystrophy, myasthenia_gravis, rhabdomyolysis):\r\n self.muscular_dystrophy = muscular_dystrophy\r\n self.myasthenia_gravis = myasthenia_gravis\r\n self.rhabdomyolysis = rhabdomyolysis\r\n\r\n\r\n class Integumentary:\r\n def __init__(self):\r\n self.structure = self.Integumentary_Structure()\r\n self.functions = self.Integumentary_Functions()\r\n\r\n class Integumentary_Structure:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.dermis = None\r\n self.epidermis = None\r\n self.hypodermis = None\r\n\r\n self.hair_shaft = None\r\n self.hair_follicle = None\r\n\r\n self.nail_plate = None\r\n self.nail_bed = None\r\n self.nail_matrix = None\r\n\r\n self.eccrine_sweat_glands = None\r\n self.epocrine_sweat_glands = None\r\n\r\n self.sebaceous_glands = None\r\n\r\n def skin(self, dermis, epidermis, hypodermis):\r\n self.dermis = dermis\r\n self.epidermis = epidermis\r\n self.hypodermis = hypodermis\r\n\r\n def hairs(self, hair_shaft, hair_follicle):\r\n self.hair_shaft = hair_shaft\r\n self.hair_follicle = hair_follicle\r\n\r\n def nails(self, nail_plate, nail_bed, nail_matrix):\r\n self.nail_plate = nail_plate\r\n self.nail_bed = nail_bed\r\n self.nail_matrix = nail_matrix\r\n\r\n def sweat_glands(self, eccrine_sweat_glands, epocrine_sweat_glands):\r\n self.eccrine_sweat_glands = eccrine_sweat_glands\r\n self.epocrine_sweat_glands = epocrine_sweat_glands\r\n\r\n class Integumentary_Functions:\r\n def __init__(self):\r\n self.system = None\r\n\r\n self.epidermal_differentiation = None\r\n self.hair_follicle_development = None\r\n\r\n self.epidermal_barrier_formation = None\r\n self.stratum_corneum_maintenance = None\r\n\r\n self.melanogenesis = None\r\n self.pigment_transport = None\r\n self.pigment_distribution = None\r\n\r\n self.sweat_production = None\r\n self.sweat_secretion = None\r\n self.thermoregulation = None\r\n\r\n self.sebum_production = None\r\n self.sebum_secretion = None\r\n self.skin_moisture_regulation = None\r\n\r\n self.inflammatory_response = None\r\n self.granulation_tissue_formation = None\r\n self.epithelialization = None\r\n\r\n self.hair_follicle_cycling = None\r\n self.keratin_formation = None\r\n self.keratin_differentiation = None\r\n\r\n self.nail_matrix_function = None\r\n self.nail_plate_formation = None\r\n\r\n self.eczema = None\r\n self.acne = None\r\n self.psoriasis = None\r\n\r\n def skin_development(self, epidermal_differentiation, hair_follicle_development):\r\n self.epidermal_differentiation = epidermal_differentiation\r\n self.hair_follicle_development = hair_follicle_development\r\n\r\n def skin_barrier(self, epidermal_barrier_formation, stratum_corneum_maintenance):\r\n self.epidermal_barrier_formation = epidermal_barrier_formation\r\n self.stratum_corneum_maintenance = stratum_corneum_maintenance\r\n\r\n def pigmentation(self, melanogenesis, pigment_transport, pigment_distribution):\r\n self.melanogenesis = melanogenesis\r\n self.pigment_transport = pigment_transport\r\n self.pigment_distribution = pigment_distribution\r\n\r\n def sweat_glands(self, sweat_production, sweat_secretion, thermoregulation):\r\n self.sweat_production = sweat_production\r\n self.sweat_secretion = sweat_secretion\r\n self.thermoregulation = thermoregulation\r\n\r\n def sebaceous_glands(self, sebum_production, sebum_secretion, skin_moisture_regulation):\r\n self.sebum_production = sebum_production\r\n self.sebum_secretion = sebum_secretion\r\n self.skin_moisture_regulation = skin_moisture_regulation\r\n\r\n def wound_healing(self, inflammatory_response, granulation_tissue_formation, epithelialization):\r\n self.inflammatory_response = inflammatory_response\r\n self.granulation_tissue_formation = granulation_tissue_formation\r\n self.epithelialization = epithelialization\r\n\r\n def hair_growth(self, hair_follicle_cycling, keratin_formation, keratin_differentiation):\r\n self.hair_follicle_cycling = hair_follicle_cycling\r\n self.keratin_formation = keratin_formation\r\n self.keratin_differentiation = keratin_differentiation\r\n\r\n def nail_growth(self, nail_matrix_function, nail_plate_formation):\r\n self.nail_matrix_function = nail_matrix_function\r\n self.nail_plate_formation = nail_plate_formation\r\n\r\n def skin_disorders(self, eczema, acne, psoriasis):\r\n self.eczema = eczema\r\n self.acne = acne\r\n self.psoriasis = psoriasis\r\n\r\n","repo_name":"uswamaryam12/TrueDNA","sub_path":"TrueDNA v3/Anatomy.py","file_name":"Anatomy.py","file_ext":"py","file_size_in_byte":54244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28866887867","text":"'''\n给一个数组,数字是有序的,其中缺失1个数,输出这个数 (连续的一段,但不一定是1-N)\n 进阶1: 如果缺失了多个数,输出所有缺失的数字\n 进阶2: 如果只需要求缺失了多少个数字呢?\n\n给一个数组,数字是无序的,其中缺失1个数,输出这个数 (连续的一段,但不一定是1-N)\n\n\n给一个数组,数字是1~N,N是数组的长度,无序的,缺失一个数,输出这个数\n\n\n给一个数组,数字是1~N,N是数组的长度,有序的,缺失一个数,输出这个数\n\n\n参考LC 268\n给定一个包含 [0, n] 中 n 个数的数组 nums ,找出 [0, n] 这个范围内没有出现在数组中的那个数。\n\nLC 缺失的第一个正整数,41题\n\n缺失的数,非连续,且无序\n'''\n\n'''\n需要明确的条件:\n1. interger大小的范围\n2. 是否是有序的\n3. 是否是空\n\n\nif mid + 1 == arr[mid], the missing number is in [mid + 1 r], so move l to mid + 1\nif mid + 1 != arr[mid], the missing number is in [l, mid], so move r to mid - 1\n'''\n\n\n# '''\n# LC 268\n# 给定一个包含 [0, n] 中 n 个数的数组 nums ,找出 [0, n] 这个范围内没有出现在数组中的那个数。\n# '''\n\n\n'''\n给定一个包含 [1, n] 中 n - 1 个数的数组 nums ,找出 [1, n] 这个范围内没有出现在数组中的那个数。\n\nn == nums.length + 1\n1 <= n <= 10**4\n1 <= nums[i] <= n\nAll the numbers of nums are unique.\n'''\n\n'''\n方案一\n1. set nums\n2. traversal interger from 0 to N, check the interger is in nums or not\n时间复杂度:O(2N)\n空间复杂度:O(N)\n'''\ndef missingNumber(nums):\n '''return the missing number\n Args:\n nums(list)\n Reture:\n int:the missing nuber\n '''\n nums_set = set(nums)\n\n for i in range(1, len(nums) + 2):\n if i not in nums_set:\n return i\n\n'''\n方案二\n位运算里面的异或\n时间复杂度:O(N)\n空间复杂度:O(1)\n'''\ndef missingNumber(nums):\n res = len(nums) + 1\n\n for i, each in enumerate(nums):\n res ^= (i + 1)\n res ^= each\n \n return res\n\n'''\n方案三\n高斯求和\n求和是不是有溢出的风险\n时间复杂度:O(N)\n空间复杂度:O(1)\n'''\ndef missingNumber(nums):\n n = len(nums) + 1\n return (n + 1) * n // 2 - sum(nums)\n\n\n'''\n方案四\n排序+二分\n若是无序:\n时间复杂度:O(NlogN + logN)\n空间复杂度:O(1)\n\n若是有序:\n时间复杂度:O(logN)\n空间复杂度:O(1)\n'''\ndef missingNumber(nums):\n nums.sort()\n l = 0\n r = len(nums) - 1\n\n while l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == mid + 1:\n l = mid + 1\n else:\n r = mid - 1\n \n return l + 1\n\n\n# print(missingNumber([1,2,4,5,6]))\n# print(missingNumber([1]))\n# print(missingNumber([2]))\n# print(missingNumber([1, 2, 3, 4]))\n\ndef missingNumber(nums, N):\n nums_set = set(nums)\n\n res = []\n for i in range(1, N + 1):\n if i not in nums_set:\n res.append(i)\n return res \n\n# print(missingNumber([1, 1, 2, 2, 4, 5, 6], 7))\n\n\n\ndef missingNumber(nums):\n nums.sort()\n if (nums[-1] - nums[0] + 1) == len(nums) and nums[0] == 1:\n return -1\n\n l = 0\n r = len(nums) - 1\n\n while l <= r:\n mid = l + (r - l) // 2\n if nums[mid] == mid + 1:\n l = mid + 1\n else:\n r = mid - 1\n \n return l + 1\n\n# print(missingNumber([2, 3]))\n# print(missingNumber([1, 2, 4]))\n# print(missingNumber([1, 2, 3]))\n\n\n'''\ninput: sorted arr\nthe value of intergers in arr: 1 ~ N\nunique\nN = len(arr) + 1\n\n[1, 2, 3] -> missing 4 \n[1, 2, 4] -> missing 3\n\nbinary search\ntime complexity: O(logN)\nspace complexity: O(1)\n\n'''\n\n\ndef find_missing_number(arr):\n ''' return the missing number \n Args:\n arr(list): sorted array \n Return:\n int(integer)\n '''\n\n # corner case\n if arr == []:\n return 1\n \n # use binary search to find missing number\n l = 0\n r = len(arr) - 1\n\n while l <= r:\n mid = l + (r - l) // 2\n if arr[mid] == mid + 1:\n l = mid + 1\n else:\n r = mid - 1\n return l + 1\n\n# arr = []\n# print(find_missing_number(arr)) # except 1\n\n# arr = [1, 2, 4]\n# print(find_missing_number(arr)) # except 3\n\n# arr = [1, 2, 3]\n# print(find_missing_number(arr)) # except 4\n\n# arr = [2, 3]\n# print(find_missing_number(arr)) # except 1\n\n\n\n\ndef find_missing_number(arr):\n ''' return the missing number \n Args:\n arr(list): unsorted array \n Return:\n int(integer)\n '''\n\n # corner case\n if arr == []:\n return 1\n \n res = len(arr) + 1\n for i, each in enumerate(arr, 1):\n res ^= i \n res ^= each \n return res\n \n\n# arr = []\n# print(find_missing_number(arr)) # except 1\n\n# arr = [1, 2, 4]\n# print(find_missing_number(arr)) # except 3\n\n# arr = [1, 2, 3]\n# print(find_missing_number(arr)) # except 4\n\n# arr = [2, 3]\n# print(find_missing_number(arr)) # except 1\n\n\n'''\n缺失多个\n\ninput: sorted arr, N\n1~N\n\n\n[1, 2, 4], N = 6 --> [3, 4, 6]\n\n[1, 2, 4], N = ? --> [3...?]\n\n\n'''\n\ndef find_missing_number(arr, N):\n # corner case\n if arr == []:\n return [each for each in range(1, N + 1)]\n\n res = []\n j = 0\n n = len(arr) \n for i in range(1, N + 1):\n if j < n and arr[j] == i:\n j += 1\n continue \n \n res.append(i)\n return res \n\narr = []\nprint(find_missing_number(arr, 2)) # except [1, 2]\n\narr = [1, 2, 4]\nprint(find_missing_number(arr, 4)) # except [3]\n\narr = [1, 2, 3]\nprint(find_missing_number(arr, 4)) # except [4]\n\narr = [2, 3]\nprint(find_missing_number(arr, 4)) # except [1, 4]\n","repo_name":"Leahxuliu/Data-Structure-And-Algorithm","sub_path":"Python/巨硬/AA.缺失的数.py","file_name":"AA.缺失的数.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"22990988971","text":"import pandas as pd\nimport os\nfrom datetime import datetime, timedelta\nfrom FXaddsep import AddSeparator\n\nurl_download = \"/ptax_internet/consultaBoletim.do?method=gerarCSVFechamentoMoedaNoPeriodo&ChkMoeda=61&DATAINI=08/02/2022&DATAFIM=07/03/2022\"\nurl_base = url_download.split('ChkMoeda')[0]\ncurrency = input('Digite o código da moeda (USD: 61 ou EUR: 222): ')\nif currency == '222':\n curr_name = 'Euro'\nelif currency == '61':\n curr_name = 'Dolar'\n\n\n\nif os.path.isfile(f'./Cotacoes{curr_name}2022.csv'):\n try:\n cur_csv = r'C:\\Users\\miche\\diretorio\\WScrapingFXBacen\\Cotacoes' + curr_name + '2022.csv'\n readfile_df = pd.read_csv(cur_csv, header= None, sep= ';')\n \"\"\"Converter datas para date time antes de manipular\"\"\"\n last_date = readfile_df[0].tail(1)\n #date = str(last_date.iloc[-1])[:-6] + '/' + str(last_date.iloc[-1])[-6:-4] + '/' + str(last_date.iloc[-1])[-4:]\n dt_inicio = f\"{str(last_date.iloc[-1])[:-6]}/{str(last_date.iloc[-1])[-6:-4]}/{str(last_date.iloc[-1])[-4:]}\"\n dt_inicio = datetime.strptime(dt_inicio, '%d/%m/%Y') + timedelta(1)\n dt_inicio = str(dt_inicio)[:-9]\n dt_inicio = f\"{dt_inicio[8:]}/{dt_inicio[5:7]}/{dt_inicio[:4]}\"\n dt_final = datetime.strftime(datetime.now() - timedelta(1), '%d/%m/%Y')\n cur_csvnew = f'https://ptax.bcb.gov.br{url_base}ChkMoeda={currency}&DATAINI={dt_inicio}&DATAFIM={dt_final}'\n readnewfile_df = pd.read_csv(cur_csvnew, header= None, sep= ';')\n readnewfile_df.to_csv(f'./Cotacoes{curr_name}2022.csv', mode= 'a', sep= ';', index= False, header= False)\n except:\n print('A base já está atualizada.')\nelse:\n dt_inicio = input('Digite a data inicial (dd/mm/aaaa): ')\n dt_final = input('Digite a data final (dd/mm/aaaa): ')\n cur_csvnew = f'https://ptax.bcb.gov.br{url_base}ChkMoeda={currency}&DATAINI={dt_inicio}&DATAFIM={dt_final}'\n print(cur_csvnew)\n readnewfile_df = pd.read_csv(cur_csvnew, header= None, sep= ';', names= ['Data', 'Codigo da moeda', 'Nao sei', 'Moeda', 'Taxa compra', 'Taxa venda', 'Paridade dolar compra', 'Paridade dolar venda'])\n readnewfile_df.to_csv(f'./Cotacoes{curr_name}2022.csv', sep= ';', index= False, header= True)\n\nAddSeparator(curr_name).addSep()","repo_name":"michelvasconcelos/WScrapingFXBacen","sub_path":"WsFXBacen.py","file_name":"WsFXBacen.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"37532521025","text":"import numpy as np\nimport sys\nimport os as os\n\nfrom os.path import isfile, join, dirname, abspath\nimport matplotlib as mpl\nmpl.use(\"pgf\")\npgf_with_custom_preamble = {\n \"text.usetex\": True, # use inline math for ticks\n \"font.family\": \"serif\",\n \"font.serif\": [],\n #\"pgf.rcfonts\": False, # don't setup fonts from rc parameters\n}\nmpl.rcParams.update(pgf_with_custom_preamble)\nfrom matplotlib import pyplot as plt\n\n\ndef getNumber(s):\n pos_ = s.find('_n')+2\n posEnd = s.find('_L')\n print(s[pos_:posEnd])\n return float(s[pos_:posEnd])\n\ncurrent_dic = os.getcwd()\nsub_dic = abspath(join(current_dic, '..', 'results'))\nfiles = [f for f in os.listdir(sub_dic) if \"txt\" in f]\nfiles = sorted(files,key=getNumber)\n\nprint(files)\n\nlabels = [\"$n=5$\", \"$n=10$\", \"$n=20$\", \"$n=40$\",\"$n=50$\"]\n\n\ndef saveplot(fig, filename):\n file_str = join(sub_dic, 'figures', filename)\n fig.savefig(file_str + \".pgf\",bbox_inches='tight')\n fig.savefig(file_str + \".png\",bbox_inches='tight')\n\n\n\ndef print_all():\n fig = plt.figure(1,(13/2.54,13/2.54))\n ax = fig.add_subplot(1,1,1)\n\n for i,filename in enumerate(files):\n filename = join(sub_dic,filename)\n print(filename)\n M, Im = np.genfromtxt(filename,delimiter = \",\", unpack = True)\n print(Im)\n ax.plot(M, Im, 'o', ms = 4,label= labels[i])\n \"\"\"\n if i == 1 or i == 2:\n ax.plot(M[0:-1:2],Im[0:-1:2],'o',ms = 4,label=filenames[i])\n elif i == 0:\n ax.plot(M[:1808],Im[:1808],'o',ms = 4,label=filenames[i])\n else:\n ax.plot(M,Im,'o',ms = 4,label=filenames[i])\n \"\"\"\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.yaxis.set_ticks_position(\"left\")\n ax.set_xlabel(\"Number of configurations $M$\")\n ax.set_ylabel(r\"Average distance between particles $$\")\n #ax.set_ylim(6.65,7.15)\n ax.legend(loc=\"best\", frameon=False, labelspacing=0.05)\n\n fname = \"all_plots\"\n print(fname)\n saveplot(fig, fname)\n\ndef print_2():\n filename = join(\"vary_n/\",files[3])\n fig = plt.figure(1,(7.5/2.54,7.5/2.54))\n ax = fig.add_subplot(1,1,1)\n M, Im = np.genfromtxt(filename,delimiter = \"\\t\", unpack = True)\n print(M[1808])\n ax.plot(M[:1808],Im[:1808],'o',ms = 4)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.yaxis.set_ticks_position(\"left\")\n ax.set_xlabel(\"Number of configurations $M$\")\n ax.set_ylabel(r\"Average distance between particles $$\")\n ax.set_ylim(6.65,7.15)\n ax.legend(loc=\"best\", frameon=False, labelspacing=0.05)\n fname = filenames[3]\n print(filename)\n print(fname)\n\n saveplot(fig, fname)\n\nif __name__ == '__main__':\n print_all()\n #print_5_20()\n\n #print_2()\n","repo_name":"giuliaschneider/Computational-Physics","sub_path":"05-Monte-Carlo-Integral/python_scripts/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34398151776","text":"from odoo.addons.iap.tools import iap_tools\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import AccessError, UserError\n\nimport logging\nimport time\n\n\n_logger = logging.getLogger(__name__)\n\nCLIENT_OCR_VERSION = 130\n\n# list of result id that can be sent by iap-extract\nSUCCESS = 0\nNOT_READY = 1\nERROR_INTERNAL = 2\nERROR_NOT_ENOUGH_CREDIT = 3\nERROR_DOCUMENT_NOT_FOUND = 4\nERROR_NO_DOCUMENT_NAME = 5\nERROR_UNSUPPORTED_IMAGE_FORMAT = 6\nERROR_FILE_NAMES_NOT_MATCHING = 7\nERROR_NO_CONNECTION = 8\nERROR_SERVER_IN_MAINTENANCE = 9\n\nERROR_MESSAGES = {\n ERROR_INTERNAL: _(\"An error occurred\"),\n ERROR_DOCUMENT_NOT_FOUND: _(\"The document could not be found\"),\n ERROR_NO_DOCUMENT_NAME: _(\"No document name provided\"),\n ERROR_UNSUPPORTED_IMAGE_FORMAT: _(\"Unsupported image format\"),\n ERROR_FILE_NAMES_NOT_MATCHING: _(\"You must send the same quantity of documents and file names\"),\n ERROR_NO_CONNECTION: _(\"Server not available. Please retry later\"),\n ERROR_SERVER_IN_MAINTENANCE: _(\"Server is currently under maintenance. Please retry later\"),\n}\n\n\nclass HrExpenseExtractionWords(models.Model):\n _name = \"hr.expense.extract.words\"\n _description = \"Extracted words from expense scan\"\n\n expense_id = fields.Many2one(\"hr.expense\", string=\"Expense\")\n word_text = fields.Char()\n word_page = fields.Integer()\n\n\nclass HrExpense(models.Model):\n _inherit = ['hr.expense']\n _order = \"state_processed desc, date desc, id desc\"\n\n @api.depends('extract_status_code')\n def _compute_error_message(self):\n for record in self:\n if record.extract_status_code != SUCCESS and record.extract_status_code != NOT_READY:\n record.extract_error_message = ERROR_MESSAGES.get(record.extract_status_code, ERROR_MESSAGES[ERROR_INTERNAL])\n else:\n record.extract_error_message = ''\n\n def _compute_can_show_send_resend(self, record):\n can_show = True\n if not self.env.company.expense_extract_show_ocr_option_selection or self.env.company.expense_extract_show_ocr_option_selection == 'no_send':\n can_show = False\n if record.state != 'draft':\n can_show = False\n if record.message_main_attachment_id is None or len(record.message_main_attachment_id) == 0:\n can_show = False\n return can_show\n\n @api.depends('state', 'extract_state', 'message_main_attachment_id')\n def _compute_show_resend_button(self):\n for record in self:\n record.extract_can_show_resend_button = self._compute_can_show_send_resend(record)\n if record.extract_state not in ['error_status', 'not_enough_credit']:\n record.extract_can_show_resend_button = False\n\n @api.depends('state', 'extract_state', 'message_main_attachment_id')\n def _compute_show_send_button(self):\n for record in self:\n record.extract_can_show_send_button = self._compute_can_show_send_resend(record)\n if record.extract_state not in ['no_extract_requested']:\n record.extract_can_show_send_button = False\n\n @api.depends('extract_state')\n def _compute_state_processed(self):\n for record in self:\n record.state_processed = record.extract_state in ['waiting_extraction', 'waiting_upload']\n\n extract_state = fields.Selection([('no_extract_requested', 'No extract requested'),\n ('not_enough_credit', 'Not enough credit'),\n ('error_status', 'An error occurred'),\n ('waiting_upload', 'Waiting upload'),\n ('waiting_extraction', 'Waiting extraction'),\n ('extract_not_ready', 'waiting extraction, but it is not ready'),\n ('waiting_validation', 'Waiting validation'),\n ('to_validate', 'To validate'),\n ('done', 'Completed flow')],\n 'Extract state', default='no_extract_requested', required=True, copy=False)\n extract_status_code = fields.Integer(\"Status code\", copy=False)\n extract_error_message = fields.Text(\"Error message\", compute=_compute_error_message)\n extract_remote_id = fields.Integer(\"Id of the request to IAP-OCR\", default=\"-1\", copy=False, readonly=True)\n extract_word_ids = fields.One2many(\"hr.expense.extract.words\", inverse_name=\"expense_id\", copy=False)\n extract_can_show_resend_button = fields.Boolean(\"Can show the ocr resend button\", compute=_compute_show_resend_button)\n extract_can_show_send_button = fields.Boolean(\"Can show the ocr send button\", compute=_compute_show_send_button)\n # We want to see the records that are just processed by OCR at the top of the list\n state_processed = fields.Boolean(string='Status regarding OCR status', compute=_compute_state_processed, store=True)\n\n def attach_document(self, **kwargs):\n \"\"\"when an attachment is uploaded, send the attachment to iap-extract if this is the first attachment\"\"\"\n self._autosend_for_digitization()\n\n def _autosend_for_digitization(self):\n if self.env.company.expense_extract_show_ocr_option_selection == 'auto_send':\n self.filtered('extract_can_show_send_button').action_manual_send_for_digitization()\n\n def _message_set_main_attachment_id(self, attachment_ids):\n super(HrExpense, self)._message_set_main_attachment_id(attachment_ids)\n self._autosend_for_digitization()\n\n def get_validation(self, field):\n\n text_to_send = {}\n if field == \"total\":\n text_to_send[\"content\"] = self.unit_amount\n elif field == \"date\":\n text_to_send[\"content\"] = str(self.date) if self.date else False\n elif field == \"description\":\n text_to_send[\"content\"] = self.name\n elif field == \"currency\":\n text_to_send[\"content\"] = self.currency_id.name\n elif field == \"bill_reference\":\n text_to_send[\"content\"] = self.reference\n return text_to_send\n\n @api.model\n def _cron_validate(self):\n \"\"\"Send user corrected values to the ocr\"\"\"\n exp_to_validate = self.search([('extract_state', '=', 'to_validate')])\n documents = {\n record.extract_remote_id: {\n 'total': record.get_validation('total'),\n 'date': record.get_validation('date'),\n 'description': record.get_validation('description'),\n 'currency': record.get_validation('currency'),\n 'bill_reference': record.get_validation('bill_reference')\n } for record in exp_to_validate\n }\n params = {\n 'documents': documents,\n 'version': CLIENT_OCR_VERSION,\n }\n endpoint = self.env['ir.config_parameter'].sudo().get_param(\n 'hr_expense_extract_endpoint', 'https://iap-extract.odoo.com') + '/api/extract/expense/1/validate_batch'\n try:\n iap_tools.iap_jsonrpc(endpoint, params=params)\n exp_to_validate.extract_state = 'done'\n except AccessError:\n pass\n\n def action_submit_expenses(self, **kwargs):\n res = super(HrExpense, self).action_submit_expenses(**kwargs)\n self.extract_state = 'to_validate'\n self.env.ref('hr_expense_extract.ir_cron_ocr_validate')._trigger()\n return res\n\n @api.model\n def check_all_status(self):\n for record in self.search([('state', '=', 'draft'), ('extract_state', 'in', ['waiting_extraction', 'extract_not_ready'])]):\n try:\n record._check_status()\n except:\n pass\n\n def check_status(self):\n \"\"\"contact iap to get the actual status of the ocr requests\"\"\"\n if any(rec.extract_state == 'waiting_upload' for rec in self):\n _logger.info(\"Manual trigger of the parse cron\")\n try:\n self.env.ref('hr_expense_extract.ir_cron_ocr_parse')._try_lock()\n self.env.ref('hr_expense_extract.ir_cron_ocr_parse').sudo().method_direct_trigger()\n except UserError:\n _logger.warning(\"Lock acquiring failed, cron is already running\")\n return\n\n records_to_update = self.filtered(lambda exp: exp.extract_state in ['waiting_extraction', 'extract_not_ready'])\n \n for record in records_to_update:\n record._check_status()\n\n limit = max(0, 20 - len(records_to_update))\n if limit > 0:\n records_to_preupdate = self.search([('extract_state', 'in', ['waiting_extraction', 'extract_not_ready']), ('id', 'not in', records_to_update.ids), ('state', '=', 'draft')], limit=limit)\n for record in records_to_preupdate:\n try:\n record._check_status()\n except:\n pass\n\n def _check_status(self):\n self.ensure_one()\n endpoint = self.env['ir.config_parameter'].sudo().get_param(\n 'hr_expense_extract_endpoint', 'https://iap-extract.odoo.com') + '/api/extract/expense/1/get_result'\n params = {\n 'version': CLIENT_OCR_VERSION,\n 'document_id': self.extract_remote_id\n }\n result = iap_tools.iap_jsonrpc(endpoint, params=params)\n self.extract_status_code = result['status_code']\n if result['status_code'] == SUCCESS:\n self.extract_state = \"waiting_validation\"\n ocr_results = result['results'][0]\n self.extract_word_ids.unlink()\n\n description_ocr = ocr_results['description']['selected_value']['content'] if 'description' in ocr_results else \"\"\n total_ocr = ocr_results['total']['selected_value']['content'] if 'total' in ocr_results else \"\"\n date_ocr = ocr_results['date']['selected_value']['content'] if 'date' in ocr_results else \"\"\n currency_ocr = ocr_results['currency']['selected_value']['content'] if 'currency' in ocr_results else \"\"\n bill_reference_ocr = ocr_results['bill_reference']['selected_value']['content'] if 'bill_reference' in ocr_results else \"\"\n\n self.state = 'draft'\n if not self.name or self.name == self.message_main_attachment_id.name.split('.')[0]:\n self.name = description_ocr\n self.predicted_category = description_ocr\n predicted_product_id = self._predict_product(description_ocr, category=True)\n if predicted_product_id:\n self.product_id = predicted_product_id if predicted_product_id else self.product_id\n self.total_amount = total_ocr\n\n context_create_date = fields.Date.context_today(self, self.create_date)\n if not self.date or self.date == context_create_date:\n self.date = date_ocr\n\n if not self.total_amount:\n self.total_amount = total_ocr\n\n if not self.reference:\n self.reference = bill_reference_ocr\n\n if self.user_has_groups('base.group_multi_currency') and (not self.currency_id or self.currency_id == self.env.company.currency_id):\n currency = self.env[\"res.currency\"].search([\n '|', '|',\n ('currency_unit_label', 'ilike', currency_ocr),\n ('name', 'ilike', currency_ocr),\n ('symbol', 'ilike', currency_ocr)],\n limit=1\n )\n if currency:\n self.currency_id = currency\n\n elif result['status_code'] == NOT_READY:\n self.extract_state = 'extract_not_ready'\n else:\n self.extract_state = 'error_status'\n\n def action_manual_send_for_digitization(self):\n for rec in self:\n rec.env['iap.account']._send_iap_bus_notification(\n service_name='invoice_ocr',\n title=_(\"Expense is being Digitized\"))\n self.extract_state = 'waiting_upload'\n self.env.ref('hr_expense_extract.ir_cron_ocr_parse')._trigger()\n\n def action_send_for_digitization(self):\n if any(expense.state != 'draft' or expense.sheet_id for expense in self):\n raise UserError(_(\"You cannot send a expense that is not in draft state!\"))\n\n self.action_manual_send_for_digitization()\n\n if len(self) == 1:\n return {\n 'name': _('Generated Expense'),\n 'view_mode': 'form',\n 'res_model': 'hr.expense',\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'res_id': self[0].id,\n }\n else:\n return {\n 'name': _('Expenses sent'),\n 'type': 'ir.actions.act_window',\n 'view_mode': 'tree,form',\n 'res_model': 'hr.expense',\n 'target': 'current',\n 'domain': [('id', 'in', [expense.id for expense in self])],\n\n }\n\n @api.model\n def _cron_parse(self):\n for rec in self.search([('extract_state', '=', 'waiting_upload')]):\n rec.retry_ocr()\n rec.env.cr.commit()\n\n def retry_ocr(self):\n \"\"\"Retry to contact iap to submit the first attachment in the chatter\"\"\"\n self.ensure_one()\n if not self.env.company.expense_extract_show_ocr_option_selection or self.env.company.expense_extract_show_ocr_option_selection == 'no_send':\n return False\n attachments = self.message_main_attachment_id\n if (\n attachments.exists() and\n self.extract_state in ['no_extract_requested', 'waiting_upload', 'not_enough_credit', 'error_status']\n ):\n account_token = self.env['iap.account'].get('invoice_ocr')\n endpoint = self.env['ir.config_parameter'].sudo().get_param(\n 'hr_expense_extract_endpoint', 'https://iap-extract.odoo.com') + '/api/extract/expense/1/parse'\n\n #this line contact iap to create account if this is the first request. This allow iap to give free credits if the database is elligible\n self.env['iap.account'].get_credits('invoice_ocr')\n\n user_infos = {\n 'user_company_VAT': self.company_id.vat,\n 'user_company_name': self.company_id.name,\n 'user_company_country_code': self.company_id.country_id.code,\n 'user_lang': self.env.user.lang,\n 'user_email': self.env.user.email,\n }\n baseurl = self.get_base_url()\n webhook_url = f\"{baseurl}/hr_expense_extract/request_done\"\n params = {\n 'account_token': account_token.account_token,\n 'version': CLIENT_OCR_VERSION,\n 'dbuuid': self.env['ir.config_parameter'].sudo().get_param('database.uuid'),\n 'documents': [x.datas.decode('utf-8') for x in attachments],\n 'user_infos': user_infos,\n 'webhook_url': webhook_url,\n }\n try:\n result = iap_tools.iap_jsonrpc(endpoint, params=params)\n self.extract_status_code = result['status_code']\n if result['status_code'] == SUCCESS:\n self.extract_state = 'waiting_extraction'\n self.extract_remote_id = result['document_id']\n if 'isMobile' in self.env.context and self.env.context['isMobile']:\n for record in self:\n timer = 0\n while record.extract_state != 'waiting_validation' and timer < 10:\n timer += 1\n time.sleep(1)\n record._check_status()\n elif result['status_code'] == ERROR_NOT_ENOUGH_CREDIT:\n self.extract_state = 'not_enough_credit'\n else:\n self.extract_state = 'error_status'\n _logger.warning('There was an issue while doing the OCR operation on this file. Error: -1')\n\n except AccessError:\n self.extract_state = 'error_status'\n self.extract_status_code = ERROR_NO_CONNECTION\n\n def buy_credits(self):\n url = self.env['iap.account'].get_credits_url(base_url='', service_name='invoice_ocr')\n return {\n 'type': 'ir.actions.act_url',\n 'url': url,\n }\n\n @api.model\n def get_empty_list_help(self, help):\n expenses = self.search_count(\n [\n ('employee_id', 'in', self.env.user.employee_ids.ids),\n ('state', 'in', ['draft', 'reported', 'approved', 'done', 'refused'])\n ])\n if self.env.user.has_group('hr_expense.group_hr_expense_manager') and (not isinstance(help, str) or \"o_view_nocontent_empty_folder\" not in help):\n action_id = self.env.ref('hr_expense_extract.action_expense_sample_receipt').id\n html_to_return = \"\"\"\n

\n

\n Drag and drop files to create expenses\n

\n

\n Or\n

\n

\n Did you try the mobile app?\n

\n

\n

Snap pictures of your receipts and let Odoo
automatically create expenses for you.

\n

\n \n \"Apple\n \n \n \"Google\n \n

\"\"\"\n if not expenses:\n html_to_return += \"\"\"\n%(mail_alias)s\n

\n Try Sample Receipt\n

\"\"\" % {'action_id': action_id, 'mail_alias': self._get_empty_list_mail_alias()}\n return html_to_return\n return super().get_empty_list_help(help)\n\n\nclass HrExpenseSheet(models.Model):\n _inherit = ['hr.expense.sheet']\n\n def action_register_payment(self):\n samples = self.mapped('expense_line_ids.sample')\n if samples.count(True):\n action = self.env['ir.actions.actions']._for_xml_id('hr_expense_extract.action_expense_sample_register')\n action['context'] = {'active_id': self.id}\n return action\n\n return super().action_register_payment()\n","repo_name":"dinar-it/odoo_16_enter","sub_path":"hr_expense_extract/models/hr_expense.py","file_name":"hr_expense.py","file_ext":"py","file_size_in_byte":18816,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"32175133079","text":"class Solution:\n \"\"\"\n @param S: A set of numbers.\n @return: A list of lists. All valid subsets.\n \"\"\"\n def subsetsWithDup(self, S):\n def subsetsHelper(lst=[], pos=0):\n result.append(lst[:])\n for i in xrange(pos, len(S)):\n if i != pos and S[i] == S[i-1]: #!!! i != pos, otherwise [1,1] will only out put [[], [1]] instead of [[], [1], [1,1]]\n continue\n lst.append(S[i])\n subsetsHelper(lst[:], i+1)\n lst.pop()\n # write your code here\n if S is None:\n return []\n S.sort()\n result = []\n subsetsHelper()\n return result\n\n # helper function for subsetsWithDup(S)\n '''def subsetsHelper(self, S, result, lst=[], pos=0):\n result.append(lst[:])###!!!!!!!!!!!!!!!!!!!!!!!!! append list by value not reference!!!!!!!!!!!!\n tmp = None\n for i in xrange(pos, len(S)):\n if tmp == S[i]:\n continue\n lst.append(S[i])\n self.subsetsHelper(S, result, lst, i+1)##### \"i+1\" not pos+1\n tmp = lst.pop()\n '''\n","repo_name":"pddpp/alogrithm-practice","sub_path":"Subsets-II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"24090397258","text":"import paho.mqtt.client as mqtt\nfrom variables import *\n\ndef on_message(client, userdata, message):\n print(\"Python: \", message.topic, \" - \", str(message.payload.decode(\"utf-8\")))\n actual_state = str(message.payload.decode(\"utf-8\"))\n client.unsubscribe(coffee_topic)\n send_state = \"\"\n if(actual_state == \"off\"): \n send_state = \"on\"\n else:\n send_state = \"off\"\n client.publish(coffee_topic, send_state, 0, True)\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(coffee_topic)\n\ndef on_publish(client, userdata, mid):\n client.disconnect()\n\ndef mqtt_client_connect():\n print(\"connected to: \", broker_url)\n client.connect(broker_url)\n client.loop_forever()\n \nclient = mqtt.Client(\"client_name\")\nclient.on_connect = on_connect\nclient.on_message = on_message \nclient.on_publish = on_publish \n\nmqtt_client_connect()\n","repo_name":"hussanhijazi/mqtt-coffee-python","sub_path":"on_of_coffee.py","file_name":"on_of_coffee.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1480936589","text":"class Solution(object):\n def solve(self,board,point):\n if point==81:\n return True\n r=point/9\n c=point%9\n if board[r][c]!='.':\n point+=1\n if self.solve(board,point):\n return True\n else:\n return False\n else:\n for i in range(1,10):\n t=str(i)\n if t not in self.m[(r/3)*3+c/3] and t not in self.line[r] and t not in self.v[c]:\n board[r][c]=t\n self.line[r].append(t)\n self.v[c].append(t)\n self.m[(r/3)*3+c/3].append(t)\n if self.solve(board,point+1):\n return True\n else:\n board[r][c]='.'\n self.line[r].remove(t)\n self.v[c].remove(t)\n self.m[(r/3)*3+c/3].remove(t)\n return False\n \n def solveSudoku(self, board):\n self.line=[[] for i in range(9)]\n self.v=[[] for i in range(9)]\n self.m=[[] for i in range(9)]\n for i in range(9):\n for j in range(9):\n if board[i][j]!='.':\n self.line[i].append(board[i][j])\n self.v[j].append(board[i][j])\n self.m[(i/3)*3+j/3].append(board[i][j])\n self.solve(board,0)\n","repo_name":"liupengs/LeetCode","sub_path":"Sudoku Solver.py","file_name":"Sudoku Solver.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"7368835491","text":"import glob\r\nfrom keras.models import load_model\r\nfrom numpy import genfromtxt\r\nimport numpy\r\nimport cv2\r\nimport os\r\nimport h5py\r\nfrom decimal import Decimal\r\nimport matplotlib.pyplot as plt\r\n\r\n#Global variables\r\ncwd = os.getcwd()\r\nimg_cols, img_rows = 128, 128\r\n\r\ndef get_im(path):\r\n #print(\"Path: \", path)\r\n img = cv2.imread(path)\r\n\r\n resized = cv2.resize(img, (img_cols, img_rows))\r\n return resized\r\n\r\ndef load_images_predict(model, path):\r\n res = ''\r\n files = glob.glob(path)\r\n\r\n crt = 0\r\n for fl in files:\r\n if \"res\" in fl:\r\n continue\r\n\r\n #Load the image\r\n fl = cwd + \"/\" + fl\r\n img = get_im(fl)\r\n\r\n #Predict a result\r\n img = numpy.array(img).astype('float32')\r\n img /= 255\r\n img = numpy.reshape(img, (1, img_cols, img_rows, 3))\r\n\r\n prediction = model.predict(x = img)\r\n prediction = prediction[0][0]\r\n\r\n #print(path + \": \" + str(prediction) )\r\n #image = Image.open(path)\r\n #image.show()\r\n\r\n #Scatter the result\r\n #plt.scatter(x=prediction, y = 0)\r\n\r\n #Save the result\r\n res += str(fl) + \" => prediction: \" + str( prediction ) + \"\\n\"\r\n\r\n #crt += 1\r\n #if crt >= 550:\r\n # break\r\n\r\n plt.show()\r\n\r\n return res\r\n\r\ndef save_string_to_file(string, path):\r\n f = open(path, 'w')\r\n f.write(string)\r\n f.close()\r\n\r\ndef main():\r\n # Load the model\r\n model = load_model('12Captcha/12v4CNNLSTMModel-TrainCNNWeights-14-143922.4646.hdf5')\r\n\r\n # Load each image folder and predict\r\n predictionsGoodMushrooms = load_images_predict(model, os.path.join('mushrooms', 'GoodMushrooms', '*.jpg'))\r\n save_string_to_file(predictionsGoodMushrooms, './GoodMushroomsPredictions.txt')\r\n\r\n # Load each image folder and predict\r\n predictionsBadMushrooms = load_images_predict(model, os.path.join('mushrooms', 'BadMushrooms', '*.jpg'))\r\n save_string_to_file(predictionsBadMushrooms, './BadMushroomsPredictions.txt')\r\n \r\n#Execute the main function \r\nmain()","repo_name":"msorins/AI-MODELS","sub_path":"Keras/07LoadAndUseModel.py","file_name":"07LoadAndUseModel.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"19643776149","text":"import cv2\nimport numpy as np\nfrom numpy.typing import NDArray\nimport os\n\n\ndef make_combined_clip(frames) -> NDArray:\n \"\"\"Make a combined video from the source and target list of bgr frames.\"\"\"\n src_image = frames[0][0]\n tgt_image = frames[1][0]\n tgt_new_height = src_image.shape[0]\n tgt_aspect_ratio = tgt_image.shape[1] / tgt_image.shape[0] # width / height\n tgt_new_width = int(tgt_new_height * tgt_aspect_ratio)\n combined_clip = []\n for src, tgt in frames:\n combined_frame = np.concatenate([src, cv2.resize(tgt, (tgt_new_width, tgt_new_height))], axis=1)\n combined_clip.append(combined_frame)\n combined_clip = np.stack(combined_clip)\n\n # Cannot have odd shape in height or width.\n if combined_clip.shape[1] % 2 == 1:\n combined_clip = combined_clip[:, 1:, :, :]\n if combined_clip.shape[2] % 2 == 1:\n combined_clip = combined_clip[:, :, 1:, :]\n\n combined_clip = np.ascontiguousarray(np.stack(combined_clip, axis=0)) # TxHxWxC\n return combined_clip\n\n\n# This is currently NOT parallelized.\ndef save_images_to_video(images: np.ndarray, outfile: str, fps=6):\n if os.path.exists(outfile):\n os.remove(outfile)\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n images = [images[i] for i in range(images.shape[0])]\n height, width = images[0].shape[:2]\n writer = cv2.VideoWriter(outfile, fourcc, fps, (width,height), True)\n for frame in images:\n writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n writer.release()\n\n","repo_name":"mikepieper/assyst-common","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70006479063","text":"from flask import jsonify\n\nfrom server import db\nfrom ..models.dish import (\n Dish,\n DishSchema,\n CreateDishSchema,\n UpdateDishSchema\n)\n\n\ndef all_dishes():\n dish_schema = DishSchema(many=True)\n dishes = Dish.query.all()\n response = dish_schema.dump(dishes)\n return jsonify(response), 200\n\n\ndef get_a_dish(dish_id):\n dish = Dish.query.get(dish_id)\n if dish:\n dish_schema = DishSchema()\n response = dish_schema.dump(dish), 200\n else:\n response = jsonify('Dish not found'), 404\n return response\n\n\ndef create_dish(data):\n create_dish_schema = CreateDishSchema()\n errors = create_dish_schema.validate(data)\n if errors:\n return jsonify(errors), 400\n dish = Dish.query.filter_by(name=data['name']).first()\n if not dish:\n dish = Dish(\n name=data['name'],\n description=data['description'],\n country=data['country'],\n category=data['category']\n )\n _save_dish(dish)\n dish_schema = DishSchema()\n response = dish_schema.dump(dish), 201\n else:\n response = jsonify('Dish already exists'), 409\n return response\n\n\ndef update_dish(data, dish_id):\n update_dish_schema = UpdateDishSchema()\n errors = update_dish_schema.validate(data)\n if errors:\n return jsonify(errors), 400\n dish = Dish.query.get(dish_id)\n if dish:\n check_for_update = ['description', 'country', 'category']\n for field in check_for_update:\n if data.get(field):\n setattr(dish, field, data[field])\n _save_dish(dish)\n response = jsonify('Dish sucessfully updated'), 200\n else:\n response = jsonify('Dish not found'), 404\n return response\n\n\ndef delete_a_dish(dish_id):\n dish = Dish.query.get(dish_id)\n if dish:\n db.session.delete(dish)\n db.session.commit()\n response = jsonify('Dish deleted'), 200\n else:\n response = jsonify('Dish not found'), 404\n return response\n\n\ndef _save_dish(dish):\n db.session.add(dish)\n db.session.commit()\n","repo_name":"CarlosEspinoTimon/containerized-environment","sub_path":"backend/server/services/dish_service.py","file_name":"dish_service.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"212031394","text":"# -- coding:utf-8 --\n\"\"\"\n\n计算单字准确率\n\"\"\"\nfrom keras import backend as K\nimport os\nimport shutil\nfrom imp import reload\nimport tensorflow as tf\nimport cv2\nfrom PIL import Image\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.utils import multi_gpu_model\n\nfrom chinese_ocr.train.train import random_uniform_num, get_session, get_model\nfrom chinese_ocr.densenet_common import densenet\nfrom chinese_ocr.densenet_common.densenet_model import data_generator\nfrom chinese_ocr.train.synthtext_config import SynthtextConfig\nfrom chinese_ocr.densenet_common.dataset_format import DataSetSynthtext\nfrom predict_tf_tool import DensenetOcr\n\nreload(densenet)\nK.set_learning_phase(0)\n\ndataset_path = \"/media/chenhao/study/code/other/out\"\n\nclass_id_file = \"char_7476.txt\"\ntrain_label_name = \"label_train.txt\"\nval_label_name = \"label_val.txt\"\ntest_label_name = \"label_test.txt\"\ndataset_format = 0\nsub_img_folder = \"default\"\n\ndata_test = DataSetSynthtext()\ndata_test.load_data(class_id_file, dataset_path, test_label_name, subset=sub_img_folder)\n# label_list = data.load_instance(33)\n# print(label_list)\ndata_test.prepare()\n\nnclass = data_test.num_classes\nchar_set_line = data_test.char_set_line\nprint(\"class num:\", nclass)\nprint(\"char_set_line:\", char_set_line)\n#\n# input = Input(shape=(32, None, 1), name='the_input')\n# # input = Input(tensor=resize_image)\n# y_pred= densenet.dense_cnn(input, nclass)\n# basemodel = Model(inputs=input, outputs=y_pred)\n#\n# modelPath = '/media/chenhao/study/code/work/github/chinese_ocr/chinese_ocr/models/weights_densenet-12-0.98.h5'\n# # basemodel = multi_gpu_model(basemodel, gpus=8)\n# print(modelPath)\n# w = basemodel.get_weights()[2]\n# print(w)\n# print(\"loading weights..............\")\n# basemodel.load_weights(modelPath)\n# w = basemodel.get_weights()[2]\n# print(w)\n\nconfig = SynthtextConfig()\nconfig.display()\n''''''\ntest_gen = data_generator(data_test, config=config)\n\nprint(\"test num_images\", data_test.num_images)\n# predict\n\nocr_predict_7476 = DensenetOcr(\"./train/char_7476.txt\", model_name=\"7476_model\")\n\n\ndef eva_batch():\n aaa = next(test_gen)\n # print(aaa)\n input_tuple = aaa[0]\n # print(input_tuple[\"the_input\"].shape)\n batch_lines_img = input_tuple[\"the_input\"]\n y_true = input_tuple[\"the_labels\"]\n img_paths = input_tuple[\"img_paths\"]\n\n batch_img_result = ocr_predict_7476.run_func(batch_lines_img)\n print(batch_img_result.shape)\n\n accs = []\n for i in range(config.BATCH_SIZE):\n one_line = batch_img_result[i, :, :]\n line_label = y_true[i, :]\n line_label = np.array(line_label).astype(int)\n one_line = np.expand_dims(one_line, axis=0)\n line_result = ocr_predict_7476.decode_to_id(one_line)\n\n print(\"y_pres \", line_result)\n print(\"line_label \", line_label)\n print(\"img_path \", img_paths[i])\n print(\"id_to_char \", ocr_predict_7476.id_to_char(line_result))\n\n \"\"\"\n y_pres [1, 360, 21, 5, 5, 175, 16, 36, 26, 258, 264]\n line_label [ 1 360 21 5 175 16 36 26 258 264]\n img_path train/images/20459843_2752426851.jpg\n 的近20名大学生保安\n id_to_char 的近200名大学生保安\n 有重复 [1, 360, 21, 5, 5, 175, 16, 36, 26, 258, 264]\n acc 0.4\n \"\"\"\n # if len(line_result) > len(line_label):\n # aaa = line_result.copy()\n # # if remove_duplicates(aaa) == len(line_label):\n # print(\"-------------->有重复\", aaa)\n # # line_result = aaa[:len(line_label)]\n # line_result = line_result[:len(line_label)]\n pre_arr = np.array(line_result)\n result = calculate_char_equal(line_label, pre_arr)\n totalRight = np.sum(result)\n acc = totalRight / len(result)\n print('acc', acc)\n accs.append(acc)\n print(\"batch mean acc\", np.mean(acc))\n return accs\n\n\ndef calculate_char_equal(line_label, predict_arr):\n label_len = len(line_label)\n predict_len = predict_arr.shape[0]\n print(\"predict_len \", predict_len)\n # 长度不足的补字符\n if predict_len < label_len:\n print(\"predict shorter than true label\")\n supplement = label_len - predict_len\n p_ = -1 # 补-1\n for i in range(supplement):\n predict_arr = np.append(predict_arr, p_) # 直接向p_arr里添加p_\n # 过长的直接截取\n elif predict_len > label_len:\n print(\"predict longer than true label\")\n predict_arr = predict_arr[:label_len]\n else:\n pass\n return np.equal(line_label, predict_arr)\n\n\naccs = []\n# epoch = data_test.num_images // config.BATCH_SIZE\nepoch = 50\nfor i in range(epoch):\n print(\"---------------- eval epoch ----------\", i)\n acc = eva_batch()\n accs += acc\n\nprint(accs)\nprec = np.mean(accs)\nprint(\"total mean acc \", prec)\n","repo_name":"bing1zhi2/chinese_ocr","sub_path":"chinese_ocr/7476_model_eval.py","file_name":"7476_model_eval.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"12"} +{"seq_id":"36365539843","text":"\"\"\"Pytest configuration.\"\"\"\nimport pytest\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--slow\",\n action=\"store_true\",\n default=False,\n help=\"Run every test, even slow ones.\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\", \"slow: Mark a test or option as slow to run\")\n\n\ndef pytest_collection_modifyitems(config, items):\n if config.getoption(\"--slow\"):\n # --slow given in cli: do not skip slow tests\n return\n skip_slow = pytest.mark.skip(reason=\"skipped, --slow not selected\")\n for item in items:\n if \"slow\" in item.keywords:\n item.add_marker(skip_slow)\n","repo_name":"tequilahub/tequila","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":334,"dataset":"github-code","pt":"12"} +{"seq_id":"34360432596","text":"from odoo import fields, models, _\nfrom odoo.exceptions import AccessError\n\n\nclass Digest(models.Model):\n _inherit = 'digest.digest'\n\n kpi_account_bank_cash = fields.Boolean('Bank & Cash Moves')\n kpi_account_bank_cash_value = fields.Monetary(compute='_compute_kpi_account_total_bank_cash_value')\n\n def _compute_kpi_account_total_bank_cash_value(self):\n if not self.env.user.has_group('account.group_account_user'):\n raise AccessError(_(\"Do not have access, skip this data for user's digest email\"))\n for record in self:\n start, end, company = record._get_kpi_compute_parameters()\n account_moves = self.env['account.move']._read_group([\n ('date', '>=', start),\n ('date', '<', end),\n ('journal_id.type', 'in', ['cash', 'bank']),\n ('company_id', '=', company.id)], ['journal_id', 'amount_total'], ['journal_id'])\n record.kpi_account_bank_cash_value = sum([account_move['amount_total'] for account_move in account_moves])\n\n def _compute_kpis_actions(self, company, user):\n res = super(Digest, self)._compute_kpis_actions(company, user)\n res.update({'kpi_account_bank_cash': 'account.open_account_journal_dashboard_kanban&menu_id=%s' % (self.env.ref('account.menu_finance').id)})\n return res\n","repo_name":"dinar-it/odoo_16_enter","sub_path":"account_accountant/models/digest.py","file_name":"digest.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"40963371328","text":"\"\"\"Dataset class.\"\"\"\nfrom pathlib import Path\nfrom typing import Sequence\n\nimport monai\nimport pandas as pd\nimport numpy as np\nimport torch\nfrom typeguard import typechecked\n\nfrom deepsurfer_train import locations\nfrom deepsurfer_train.data import (\n list_dataset_files,\n MEDIAN_VOLUMES,\n BACKGROUND_MEDIAN_VOLUME,\n)\nfrom deepsurfer_train.enums import (\n DatasetPartition,\n BrainRegions,\n)\nfrom deepsurfer_train.preprocess.transforms import (\n RandomizableIdentityd,\n SynthTransformd,\n VoxynthAugmentd,\n)\n\n\nDEFAULT_EXCLUDED_REGIONS = (\n \"FIFTH_VENTRICLE\",\n \"NON_WM_HYPOINTENSITIES\",\n \"LEFT_VESSEL\",\n \"RIGHT_VESSEL\",\n)\n\n\ndef get_label_mapping(\n excluded_regions: Sequence[BrainRegions] | None = None,\n) -> pd.DataFrame:\n \"\"\"Get a mapping between original label and \"internal\" label.\n\n The \"internal\" set of labels is consecutive, starting at 1, and omits\n any excluded labels. The \"merged internal\" set of labels is similar\n but additionally merges lateralized labels into a single label.\n\n Parameters\n ----------\n excluded_regions: Sequence[deepsurfer_train.enums.BrainRegions] | None\n Regions to exclude from the output labels.\n\n Returns\n -------\n pandas.DataFrame\n Mapping of all original label values to internal labels and\n merged internal labels. Contains the following columns:\n \"name\", \"original_value, \"internal_value\", \"merged_name\",\n \"merged_internal_value\".\n\n \"\"\"\n\n def merge_name(name: str) -> str:\n for lat_str in [\"LEFT_\", \"RIGHT_\"]:\n if name.startswith(lat_str):\n return name.split(lat_str, maxsplit=1)[1]\n return name\n\n # Rows will be appended here\n mapping: list[dict[str, int | str]] = []\n next_label = 1\n next_merged_label = 1\n\n # Dictionary of merged label names to values\n merged_labels: dict[str, int] = {}\n\n background_row = {\n \"name\": \"BACKGROUND\",\n \"original_value\": 0,\n \"internal_value\": 0,\n \"original_volume\": BACKGROUND_MEDIAN_VOLUME,\n \"merged_name\": \"BACKGROUND\",\n \"merged_internal_value\": 0,\n }\n mapping.append(background_row)\n\n excluded_regions = [] if excluded_regions is None else excluded_regions\n for label in BrainRegions:\n row = {\"name\": label.name, \"original_value\": label.value}\n row[\"original_volume\"] = MEDIAN_VOLUMES[label]\n if label in excluded_regions:\n # Map this value to zero\n row[\"internal_value\"] = 0\n row[\"merged_internal_value\"] = 0\n else:\n row[\"internal_value\"] = next_label\n merged_name = merge_name(label.name)\n next_label += 1\n\n row[\"merged_name\"] = merged_name\n if merged_name in merged_labels:\n row[\"merged_internal_value\"] = merged_labels[merged_name]\n else:\n row[\"merged_internal_value\"] = next_merged_label\n merged_labels[merged_name] = next_merged_label\n next_merged_label += 1\n\n mapping.append(row)\n\n mapping_df = pd.DataFrame(mapping)\n\n # Calculate weights for each internal class as inversely proportional\n # to the median volume, rescaled to sum to 1\n mapping_df[\"weight\"] = pd.Series(0.0, index=mapping_df.index)\n weight_sum = 0.0\n for v in range(mapping_df.internal_value.max() + 1):\n source_rows = mapping_df[mapping_df.internal_value == v]\n internal_volume = source_rows.original_volume.sum()\n weight = 1.0 / internal_volume\n mapping_df.loc[\n mapping_df.internal_value == v,\n \"weight\"\n ] = weight\n weight_sum += weight\n mapping_df[\"weight\"] = mapping_df[\"weight\"] / weight_sum\n\n # Repeat for the merged classes\n mapping_df[\"merged_weight\"] = pd.Series(0.0, index=mapping_df.index)\n weight_sum = 0.0\n for v in range(mapping_df.merged_internal_value.max() + 1):\n source_rows = mapping_df[mapping_df.merged_internal_value == v]\n merged_internal_volume = source_rows.original_volume.sum()\n weight = 1.0 / merged_internal_volume\n mapping_df.loc[\n mapping_df.merged_internal_value == v,\n \"merged_weight\"\n ] = weight\n weight_sum += weight\n mapping_df[\"merged_weight\"] = mapping_df[\"merged_weight\"] / weight_sum\n\n return mapping_df\n\n\n@typechecked\nclass DeepsurferSegmentationDataset(monai.data.CacheDataset):\n \"\"\"Dataset for use with segmentation problems.\"\"\"\n\n def __init__(\n self,\n dataset: str,\n partition: DatasetPartition | str | None,\n processed_version: str | None = None,\n root_dir: Path | str = locations.project_dataset_dir,\n image_file: str = \"mri/brainmask.mgz\",\n mask_file: str = \"mri/aseg.mgz\",\n imsize: Sequence[int] | int = 255,\n excluded_regions: Sequence[BrainRegions | str]\n | None = DEFAULT_EXCLUDED_REGIONS,\n use_spatial_augmentation: bool = False,\n use_intensity_augmentation: bool = False,\n synth_probability: float = 0.0,\n use_gpu: bool = True,\n ):\n \"\"\"Dataset class encapsulate loading and transforming data.\n\n Parameters\n ----------\n dataset: str\n Name of dataset, e.g. \"buckner40\"\n partition: deepsurfer_train.enums.DatasetPartition | str\n Partition of the dataset (or none, which will return\n entire dataset).\n processed_version: str | None\n Version of processing used to provide images and labels.\n If not specified, the latest version of freesurfer for\n which processed images exist will be used.\n root_dir: Path | str\n The root directory of all datasets.\n imsize: Sequence[int] | int\n Image size in pixels.\n excluded_regions: Sequence[deepsurfer_train.enums.BrainRegions | str] | None\n Brain regions to omit from the segmentation masks.\n use_spatial_augmentation: bool\n Whether to augment images and masks with spatial transforms.\n use_intensity_augmentation: bool\n Whether to augment images and masks with intensity transforms.\n synth_probability: float\n Probability of applying the synth transform to the mask.\n use_gpu: bool\n Use the GPU for all preprocessing.\n\n \"\"\"\n partition = DatasetPartition(partition)\n image_key = \"image\"\n mask_key = \"mask\"\n merged_mask_key = \"merged_mask\"\n elements_list = list_dataset_files(\n dataset=dataset,\n filenames={\n image_key: image_file,\n mask_key: mask_file,\n },\n partition=partition,\n processed_version=processed_version,\n root_dir=root_dir,\n )\n if isinstance(imsize, int):\n imsize = [imsize] * 3\n if len(imsize) != 3:\n raise ValueError(\"Imsize must have length 3.\")\n\n if synth_probability < 0.0 or synth_probability > 1.0:\n raise ValueError(\n \"Argument 'synth_probability' must be between 0.0 and 1.0.\"\n )\n\n excluded_regions = [] if excluded_regions is None else excluded_regions\n excluded_regions_ = [\n r if isinstance(r, BrainRegions) else BrainRegions[r]\n for r in excluded_regions\n ]\n self.label_mapping = get_label_mapping(excluded_regions_)\n\n load_keys = [image_key, mask_key]\n all_keys = [image_key, mask_key, merged_mask_key]\n\n transforms: list[monai.transforms.MapTransform] = []\n\n transforms.append(\n monai.transforms.LoadImaged(\n keys=load_keys,\n ensure_channel_first=True,\n simple_keys=True,\n reader=\"NibabelReader\",\n image_only=True,\n )\n )\n transforms.extend(\n [\n monai.transforms.MapLabelValued(\n keys=mask_key,\n orig_labels=self.label_mapping.original_value.values.tolist(),\n target_labels=self.label_mapping.internal_value.values.tolist(),\n ),\n monai.transforms.CopyItemsd(\n keys=[mask_key],\n names=[merged_mask_key],\n ),\n monai.transforms.MapLabelValued(\n keys=merged_mask_key,\n orig_labels=self.label_mapping.internal_value.values.tolist(),\n target_labels=self.label_mapping.merged_internal_value.values.tolist(),\n ),\n ]\n )\n\n # Scale intensity from input range\n transforms.append(\n monai.transforms.ScaleIntensityRanged(\n keys=[image_key],\n a_min=0.0,\n a_max=255.0,\n b_min=0.0,\n b_max=1.0,\n clip=True,\n )\n )\n\n if use_gpu:\n device = torch.device(\"cuda:0\")\n # This prevents items being cached on the GPU\n transforms.append(\n RandomizableIdentityd(keys=[image_key, mask_key, \"subject_id\"])\n )\n transforms.append(monai.transforms.ToDeviced(device=device, keys=all_keys))\n else:\n device = torch.device(\"cpu\")\n\n transforms.append(\n monai.transforms.Orientationd(\n keys=all_keys,\n axcodes=\"PLI\",\n lazy=True,\n )\n )\n\n transforms.append(\n monai.transforms.ResizeWithPadOrCropd(\n keys=all_keys,\n spatial_size=imsize,\n lazy=True,\n )\n )\n\n if use_spatial_augmentation:\n transforms.append(\n monai.transforms.RandAffined(\n keys=all_keys,\n prob=0.75,\n rotate_range=np.pi / 8.0,\n shear_range=np.pi / 16.0,\n translate_range=10.0,\n scale_range=0.2,\n mode=[\"bilinear\", \"nearest\", \"nearest\"],\n padding_mode=\"zeros\",\n lazy=True,\n spatial_size=imsize,\n cache_grid=True,\n device=device,\n )\n )\n\n if use_intensity_augmentation:\n if synth_probability > 0.0:\n transforms.append(\n SynthTransformd(\n mask_key=merged_mask_key,\n image_output_keys=[image_key],\n apply_probability=synth_probability,\n )\n )\n\n transforms.append(\n VoxynthAugmentd(\n keys=[image_key],\n mask_key=None,\n bias_field_probability=0.2,\n inversion_probability=0.0,\n smoothing_probability=0.2,\n smoothing_one_axis_probability=0.2,\n background_noise_probability=0.0,\n background_blob_probability=0.0,\n added_noise_probability=0.5,\n added_noise_max_sigma=0.1,\n wave_artifact_probability=0.2,\n line_corruption_probability=0.2,\n gamma_scaling_probability=0.2,\n resized_one_axis_probability=0.2,\n )\n )\n\n transforms.extend(\n [\n monai.transforms.AsDiscreted(\n keys=[mask_key],\n to_onehot=int(self.label_mapping.internal_value.max()) + 1,\n ),\n monai.transforms.AsDiscreted(\n keys=[merged_mask_key],\n to_onehot=int(self.label_mapping.merged_internal_value.max()) + 1,\n ),\n ]\n )\n\n composed_transforms = monai.transforms.Compose(transforms, lazy=True)\n\n super().__init__(elements_list, composed_transforms)\n\n # A transform that can be used to map the data back to the original\n non_omitted_mapping = self.label_mapping[self.label_mapping.internal_value > 0]\n self.inverse_label_map_transform = monai.transforms.MapLabelValue(\n orig_labels=non_omitted_mapping.internal_value.values.tolist(),\n target_labels=non_omitted_mapping.original_value.values.tolist(),\n )\n self.image_key = image_key\n self.mask_key = mask_key\n self.merged_mask_key = merged_mask_key\n\n def get_region_label_enum(self, label: int) -> BrainRegions:\n \"\"\"Get the original label for an internal label pixel value.\n\n Parameters\n ----------\n label: int\n Internal label value (used in segmentation masks).\n\n Returns\n -------\n deepsurfer_train.enums.BrainRegions:\n Original label corresponding to the input label.\n\n \"\"\"\n row = self.label_mapping[self.label_mapping.internal_value == label].iloc[0]\n return BrainRegions(row.original_value)\n\n def get_region_label(self, label: int) -> str:\n \"\"\"Get the original label for an internal label pixel value.\n\n Parameters\n ----------\n label: int\n Internal label value (used in segmentation masks).\n\n Returns\n -------\n str:\n Original label name corresponding to the input label.\n\n \"\"\"\n row = self.label_mapping[self.label_mapping.internal_value == label].iloc[0]\n return row.name\n\n def get_merged_region_label(self, label: int) -> str:\n \"\"\"Get the original label for a merged internal label pixel value.\n\n Parameters\n ----------\n label: int\n Internal label value (used in segmentation masks).\n\n Returns\n -------\n str:\n Label name corresponding to the merged input label.\n\n \"\"\"\n row = self.label_mapping[\n self.label_mapping.merged_internal_value == label\n ].iloc[0]\n return row.merged_name\n\n @property\n def labels(self) -> list[str]:\n \"\"\"List of all labels used, in order, excluding background (0).\"\"\"\n return self.label_mapping[\n self.label_mapping.internal_value > 0\n ].sort_values(\"internal_value\").name.values.tolist()\n\n @property\n def merged_labels(self) -> list[str]:\n \"\"\"List of all merged labels used, in order, excluding background (0).\"\"\"\n return (\n self.label_mapping[self.label_mapping.merged_internal_value > 0]\n .sort_values(\"merged_internal_value\")\n .name.values.tolist()\n )\n\n @property\n def n_foreground_labels(self) -> int:\n \"\"\"int: Number of foreground labels in segmentation masks.\"\"\"\n return int(self.label_mapping.internal_value.max())\n\n @property\n def n_merged_foreground_labels(self) -> int:\n \"\"\"int: Number of merged foreground labels in segmentation masks.\"\"\"\n return int(self.label_mapping.merged_internal_value.max())\n\n @property\n def n_total_labels(self) -> int:\n \"\"\"int: Number of total labels (inc background) in segmentation masks.\"\"\"\n return self.n_foreground_labels + 1\n\n @property\n def n_total_merged_labels(self) -> int:\n \"\"\"int: Number of total labels (inc background) in segmentation masks.\"\"\"\n return self.n_merged_foreground_labels + 1\n\n @property\n def weights(self) -> list[float]:\n \"\"\"Weights for each internal label (including background).\"\"\"\n return [\n self.label_mapping[self.label_mapping.internal_value == v].iloc[0].weight\n for v in range(self.label_mapping.internal_value.max() + 1)\n ]\n\n @property\n def merged_weights(self) -> list[float]:\n \"\"\"Weights for each merged internal label (including background).\"\"\"\n return [\n self.label_mapping[self.label_mapping.merged_internal_value == v].iloc[0].merged_weight\n for v in range(self.label_mapping.merged_internal_value.max() + 1)\n ]\n\n def get_unmerging_indices(self) -> list[int]:\n \"\"\"Get indices to use to undo the label merging.\n\n Returns\n -------\n list[int]:\n List of integers that, when applied to the channel dimension of a\n one-hot encoded array of the merged label set (with lateral\n structures merged into a single label), creates a one-hot encoded\n array of the unmerged internal labels. This is used to map the\n sagittal model's outputs to match the shape of the other models\n in preparation for merging.\n\n \"\"\"\n df = self.label_mapping[self.label_mapping.internal_value > 0].sort_values(\n \"internal_value\"\n )\n return [0] + df.merged_internal_value.values.tolist()\n","repo_name":"CPBridge/deepsurfer-train","sub_path":"deepsurfer_train/preprocess/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":16982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4340390360","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 26 09:13:40 2019\n\n@author: falcon1\n\"\"\"\nfrom configparser import ConfigParser\nimport numpy as np\nfrom sklearn.utils import shuffle\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom semisupLearner_keras import displayMetrics\nfrom semisupLearner_keras import sup_loss, SemisupLearner\nfrom dataHelper import loadBenchmarkData,padSequences\nfrom nets import semisup_net, sup_net\nfrom keras import optimizers\n\ndef readConfParam(loc):\n conf = ConfigParser()\n conf.read('conf.ini')\n confParam = {}\n confParam['posFile'] = conf.get(loc, 'posFile')\n confParam['negFile'] = conf.get(loc, 'negFile')\n confParam['fpostrain'] = conf.get(loc, 'fpostrain')\n confParam['fnegtrain'] = conf.get(loc, 'fnegtrain')\n confParam['fpostest'] = conf.get(loc, 'fpostest')\n confParam['fnegtest'] = conf.get(loc, 'fnegtest')\n confParam['fposvldt'] = conf.get(loc, 'fposvldt')\n confParam['fnegvldt'] = conf.get(loc, 'fnegvldt')\n \n confParam['batch_size'] = int(conf.get('netparam', 'batch_size'))\n confParam['epochs'] = int(conf.get('netparam', 'epochs'))\n confParam['patience'] = int(conf.get('netparam', 'patience'))\n confParam['learning_rate'] = float(conf.get('netparam', 'learning_rate'))\n \n confParam['rampup_length'] = int(conf.get('semisupparam', 'rampup_length'))\n confParam['rampdown_length'] = int(conf.get('semisupparam', 'rampdown_length'))\n confParam['learning_rate_max'] = float(conf.get('semisupparam', 'learning_rate_max'))\n confParam['scaled_unsup_weight_max'] = int(conf.get('semisupparam', 'scaled_unsup_weight_max'))\n confParam['gammer'] = float(conf.get('semisupparam', 'gammer'))\n confParam['beita'] = float(conf.get('semisupparam', 'beita'))\n \n confParam['hasBenchmarkData'] = bool(int(conf.get('otherparam', 'hasBenchmarkData')))\n confParam['maxlen'] = int(conf.get('otherparam', 'maxlen'))\n \n return confParam\n\ndef supLearn(x_train, y_train, x_test, y_test, modelFile, noteInfo, metricsFile, **confParam):\n # supervised learning\n model = sup_net()\n model.compile(loss=sup_loss, optimizer=optimizers.Adam(lr=confParam['learning_rate']), metrics=['accuracy'])\n model.fit(x_train, y_train, \n batch_size=confParam['batch_size'], \n epochs=confParam['epochs'], \n validation_split=0.1,\n callbacks=[EarlyStopping(patience=confParam['patience']),\n ModelCheckpoint(filepath=modelFile,\n save_weights_only=True,\n save_best_only=False)])\n pred_prob = model.predict(x_test)\n # print predicting metrics\n displayMetrics(y_test, pred_prob, noteInfo, metricsFile) \n \ndef semisupLearn(x_train, y_train, x_test, y_test, modelFile, noteInfo, metricsFile, **confParam):\n # semi-supervised learning\n model = semisup_net() \n ssparam={}\n ssparam['x_train'] = x_train\n ssparam['y_train'] = [y_train, y_train]\n ssparam['batch_size'] = confParam['batch_size']\n ssparam['epochs'] = confParam['epochs']\n ssparam['patience'] = confParam['patience'] \n ssparam['rampup_length'] = confParam['rampup_length']\n ssparam['rampdown_length'] = confParam['rampdown_length']\n ssparam['learning_rate_max'] = confParam['learning_rate_max']\n ssparam['scaled_unsup_weight_max'] = confParam['scaled_unsup_weight_max']\n ssparam['gammer'] = confParam['gammer']\n ssparam['beita'] = confParam['beita'],\n ssparam['learning_rate'] = confParam['learning_rate']\n ssl = SemisupLearner(modelFile, model, **ssparam)\n # Train net\n ssl.train()\n # predict\n pred_prob = ssl.predict(x_test)\n # print predicting metrics\n displayMetrics(y_test, pred_prob, noteInfo, metricsFile) \n\n### main ...\ndef main(loc):\n #num_upsamp = 0\n is_on_bechmark = False\n is_on_upsamp = True\n is_supervised = False\n is_semisup = True\n confParam = readConfParam(loc)\n print('Generating labels and features...')\n (x_train, y_train), (x_test, y_test)=loadBenchmarkData(confParam['posFile'], confParam['negFile'],\n confParam['fpostrain'], confParam['fnegtrain'],\n confParam['fpostest'], confParam['fnegtest'],\n hasBenchmarkData=confParam['hasBenchmarkData'])\n x_train, y_train = shuffle(x_train, y_train)\n ## learning on bechmark data \n if is_on_bechmark: \n if is_supervised:\n noteInfo = '\\nOn bechmark dataset, supervised learning predicting result'\n metricsFile = '{}_supervised_info.txt'.format(loc)\n modelFile = './modelFile/{}_superModel_benchmark.hdf5'.format(loc)\n supLearn(x_train, y_train, x_test, y_test, modelFile, noteInfo, metricsFile, **confParam) \n if is_semisup:\n noteInfo = '\\nOn bechmark dataset, semi-supervised learning predicting result'\n metricsFile = '{}_semisup_info.txt'.format(loc)\n modelFile = './modelFile/{}_semiSuperModel_benchmark.hdf5'.format(loc)\n semisupLearn(x_train, y_train, x_test, y_test, modelFile, noteInfo, metricsFile, **confParam)\n ## learning on up-sampling \n if is_on_upsamp:\n for mulrate in [1,2,3,4,5,6,7]:\n upsampleFile = './data/{}/fake_pos_{}.fa'.format(loc,mulrate)\n semisup_modelFile = './modelFile/{}_semiSupModel_upsample_{}.hdf5'.format(loc,mulrate)\n sup_modelFile = './modelFile/{}_supModel_upsample_{}.hdf5'.format(loc,mulrate)\n \n x_fake = padSequences(upsampleFile, confParam['maxlen'])\n y_fake = np.zeros((len(x_fake), 2))\n y_fake[:,0] = 1\n x_train_upsamp = np.concatenate((x_train, x_fake))\n y_train_upsamp = np.concatenate((y_train, y_fake))\n x_train_upsamp, y_train_upsamp = shuffle(x_train_upsamp, y_train_upsamp)\n \n if is_supervised:\n noteInfo = '\\ngenerate positive sample {} multiple samples by pssm'.format(mulrate)\n metricsFile = '{}_supervised_info.txt'.format(loc)\n supLearn(x_train_upsamp, y_train_upsamp, x_test, y_test, sup_modelFile, noteInfo, metricsFile, **confParam)\n if is_semisup:\n noteInfo = '\\ngenerate positive sample {} multiple samples by pssm'.format(mulrate)\n metricsFile = '{}_semisup_info.txt'.format(loc)\n semisupLearn(x_train_upsamp, y_train_upsamp, x_test, y_test, semisup_modelFile, noteInfo, metricsFile, **confParam)\n \nmain('cytoplasm')","repo_name":"deepbioinfo/DeepUperSampling","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"4159167924","text":"#Kの倍数\n#7, 77, 777, 7777, 77777, ...\n# 余りがゼロになるかどうか調べればよい\n# 7777..=k*商+余りなので、同じ余りが繰り返されたらもう割り切れないことがわかる\n# 余りはかならずKより小さくなるので、K回試行すれば、かならず0になるかどうか判定できる(鳩ノ巣理論)\n# 7, 77, 777は10倍+7の漸化式なので、\n# 余りを漸化式していけばよい\n\nK=int(input())\nsevens=7\nfor i in range(1,K+1):\n amari=sevens%K\n if amari==0:\n print(i)\n exit()\n else:\n sevens=amari*10+7\n\nprint(-1)","repo_name":"masajoki/atcoder","sub_path":"abc174c.py","file_name":"abc174c.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"17811813611","text":"import os\n\nfrom rdflib import Graph, URIRef\nfrom rdflib.namespace import FOAF\nfrom rdflib.plugins.sparql import prepareQuery, prepareUpdate\n\n\ndef test_prepare_update():\n q = prepareUpdate(\n \"\"\"\\\nPREFIX dc: \nINSERT DATA\n{ dc:title \"A new book\" ;\n dc:creator \"A.N.Other\" .\n } ;\n\"\"\",\n initNs={},\n )\n\n g = Graph()\n g.update(q, initBindings={})\n assert len(g) == 2\n\n\ndef test_prepare_query():\n q = prepareQuery(\n \"SELECT ?name WHERE { ?person foaf:knows/foaf:name ?name . }\",\n initNs={\"foaf\": FOAF},\n )\n\n g = Graph()\n g.parse(\n location=os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"examples\", \"foaf.n3\"\n ),\n format=\"n3\",\n )\n\n tim = URIRef(\"http://www.w3.org/People/Berners-Lee/card#i\")\n\n assert len(list(g.query(q, initBindings={\"person\": tim}))) == 50\n","repo_name":"RDFLib/rdflib","sub_path":"test/test_sparql/test_prepare.py","file_name":"test_prepare.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1991,"dataset":"github-code","pt":"12"} +{"seq_id":"10495010919","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# This is an example how NOT TO MAKE a secure application!\n# It is used in a game at Openfest 2021 https://www.openfest.org/2021/en/\n\n\nimport hashlib\nimport re\nimport subprocess\n\nSECRET_CODE = \"XXXX\" # It is replaced in the real application\nURL = \"http://example.com\"\n\n\nmessage = f\"\"\"\n Здравейте! Благодарим ви, че се включвате в играта организирана от\n StorPool Storage за OpenFest 2021!\n\n За да участвате, въведете реален e-mail адрес, на който ще получите\n потвърждение за успешното решаване на задачата.\n\n Не оставяйте нищо на случайността! С малко съобразителност и\n хакерски умения, можете да повишите вашите шансове и да спечелите\n свежа StorPool тениска. Наградата може да получите на нашия щанд\n на OpenFest 2021 или с куриер до ваш адрес.\n\n Изходният код на програмата, с която можете да хакнете играта на\n StorPool, ще намерите тук {URL}\n\n Успех! Нека силата бъде с вас!\n\"\"\"\n\ndef main():\n print(message)\n\n name = None\n email = None\n\n try:\n while True:\n name = input(\"Име: \")\n if re.match(r'[\\w\\- ]+$', name):\n break\n print(\"Невалиден формат.\")\n\n while True:\n email = input(\"e-mail адрес: \")\n if re.match(r'[\\w\\-\\.]+@[a-zA-z_.-]+$', email):\n break\n print(\"Невалиден формат.\")\n\n except UnicodeDecodeError as e:\n print(\"Bad encoding:\", e)\n return\n except EOFError:\n print(\"Goodbye!\")\n return\n\n m = hashlib.md5()\n m.update(name.encode('utf-8'))\n m.update(email.encode('utf-8'))\n\n h = m.hexdigest()\n if h.startswith('00'):\n print(\"Честито! Вие печелите!\")\n print(f\"Изпратихме Ви информация как да получите наградата си на посочения от Вас адрес {email}\")\n send_confirmation(email, name)\n else:\n print(\"Съжалявам, не печелите. Може да опитате пак с друго име \"\n \"или e-mail адрес.\")\n\n\ndef send_confirmation(email, name):\n try:\n subprocess.run([\"./send_message\", email, name])\n except subprocess.CalledProcessError as e:\n print(\"Error sending email\", e)\n\nif __name__ == '__main__':\n main()\n","repo_name":"vmoyankov/misc","sub_path":"of2021/game1.py","file_name":"game1.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74383933782","text":"from scrabble import ScrabbleDict # First we need to import the ScrabbleDict class\n\n\nclass ScrabbleCalculator(ScrabbleDict): # Inheriting from the Parent class\n def __init__(self, word): # Takes a word argument to calculate its score\n super().__init__() # Using the parent classes initialisation\n self.score = 0 # Set the score to 0 to start with\n self.word = word.lower() # using .lower() saves us having to check for capitals and lowercase in the dict\n\n def score_check(self):\n for letters in self.word: # Loops through the word and increments the value with the appropriate score\n if letters in self.one_point:\n self.score += 1\n elif letters in self.two_points:\n self.score += 2\n elif letters in self.three_points:\n self.score += 3\n elif letters in self.four_points:\n self.score += 4\n elif letters in self.five_points:\n self.score += 5\n elif letters in self.eight_points:\n self.score += 8\n elif letters in self.ten_points:\n self.score += 10\n else: # If the character received wasn't found in any of the dictionaries it must not be a letter\n return \"Please only include letters!\" # So give the user some feedback\n return self.score # Return the calculated score\n\n\ntest = ScrabbleCalculator(\"Amazing\") # Testing for an expected input\nprint(test.score_check())\ntest2 = ScrabbleCalculator(\"aglamgl!26262\") # Testing for an unexpected input\nprint(test2.score_check())\n","repo_name":"jatkin-wasti/python_scrabble_OOP","sub_path":"scrabble_calculator.py","file_name":"scrabble_calculator.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35854428998","text":"from dict_from_lists import dict_from_lists\nfrom pytest import fixture\n\n@fixture(params=[\n {\n \"keys\": [\"key1\", \"key2\"],\n \"values\": [1, 2],\n 'dict': {\"key1\": 1, 'key2': 2}\n },\n {\n \"keys\": [\"key1\", \"key2\", \"key3\"],\n \"values\": [1, 2],\n 'dict': {\"key1\": 1, 'key2': 2, 'key3': None}\n },\n {\n \"keys\": [\"key1\", \"key2\"],\n \"values\": [1, 2, 3, 4, 5],\n 'dict': {\"key1\": 1, 'key2': 2}\n },\n])\ndef test_data(request):\n return request.param\n\ndef test_dict_from_lists(test_data):\n assert dict_from_lists(test_data['keys'], test_data['values']) == test_data['dict']\n","repo_name":"ansaev/dict_from_lists","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28768909823","text":"from datetime import datetime, timedelta\nfrom sqlalchemy import func\nfrom sqlalchemy.dialects.postgresql import JSONB\n\nfrom aleph.core import db\nfrom aleph.util import expand_json\nfrom aleph.model.collection import Collection\n\n\nclass CrawlerState(db.Model):\n \"\"\"Report the state of a file being processed.\"\"\"\n\n TIMEOUT = timedelta(minutes=60)\n\n STATUS_OK = 'ok'\n STATUS_FAIL = 'fail'\n\n id = db.Column(db.BigInteger, primary_key=True)\n crawler_id = db.Column(db.Unicode(), index=True)\n crawler_run = db.Column(db.Unicode(), nullable=True)\n content_hash = db.Column(db.Unicode(65), nullable=True)\n foreign_id = db.Column(db.Unicode, nullable=True)\n status = db.Column(db.Unicode(10), nullable=False)\n error_type = db.Column(db.Unicode(), nullable=True)\n error_message = db.Column(db.Unicode(), nullable=True)\n error_details = db.Column(db.Unicode(), nullable=True)\n meta = db.Column(JSONB)\n collection_id = db.Column(db.Integer(), db.ForeignKey('collection.id'), index=True)\n collection = db.relationship(Collection, backref=db.backref('crawl_states', cascade='all, delete-orphan')) # noqa\n created_at = db.Column(db.DateTime, default=datetime.utcnow)\n\n @classmethod\n def _from_meta(cls, meta, collection_id):\n obj = cls()\n obj.collection_id = collection_id\n obj.crawler_id = meta.crawler\n obj.crawler_run = meta.crawler_run\n obj.foreign_id = meta.foreign_id\n obj.content_hash = meta.content_hash\n obj.meta = expand_json(meta.to_attr_dict(compute=True))\n db.session.add(obj)\n return obj\n\n @classmethod\n def store_stub(cls, collection_id, crawler_id, crawler_run):\n obj = cls()\n obj.collection_id = collection_id\n obj.crawler_id = crawler_id\n obj.crawler_run = crawler_run\n obj.error_type = 'init'\n obj.status = cls.STATUS_OK\n db.session.add(obj)\n return obj\n\n @classmethod\n def store_ok(cls, meta, collection_id):\n obj = cls._from_meta(meta, collection_id)\n obj.status = cls.STATUS_OK\n return obj\n\n @classmethod\n def store_fail(cls, meta, collection_id, error_type=None,\n error_message=None, error_details=None):\n obj = cls._from_meta(meta, collection_id)\n obj.status = cls.STATUS_FAIL\n obj.error_type = error_type\n obj.error_message = error_message\n obj.error_details = error_details\n return obj\n\n @classmethod\n def crawler_last_run(cls, crawler_id):\n q = db.session.query(cls.crawler_run, cls.created_at)\n q = q.filter(cls.crawler_id == crawler_id)\n q = q.order_by(cls.created_at.desc())\n q = q.limit(1)\n res = q.first()\n if res is None:\n return None, None\n return (res.crawler_run, res.created_at)\n\n @classmethod\n def crawler_stats(cls, crawler_id):\n stats = {}\n last_run_id, last_run_time = cls.crawler_last_run(crawler_id)\n\n # Check if the crawler was active very recently, if so, don't\n # allow the user to execute a new run right now.\n timeout = (datetime.utcnow() - CrawlerState.TIMEOUT)\n stats['running'] = last_run_time > timeout if last_run_time else False\n\n q = db.session.query(func.count(cls.id))\n q = q.filter(cls.crawler_id == crawler_id)\n for section in ['last', 'all']:\n data = {}\n sq = q\n if section == 'last':\n sq = sq.filter(cls.crawler_run == last_run_id)\n okq = sq.filter(cls.status == cls.STATUS_OK)\n data['ok'] = okq.scalar() - 1 if last_run_id else 0\n failq = sq.filter(cls.status == cls.STATUS_FAIL)\n data['fail'] = failq.scalar() if last_run_id else 0\n stats[section] = data\n stats['last']['updated'] = last_run_time\n stats['last']['run_id'] = last_run_id\n return stats\n\n @classmethod\n def all(cls):\n return db.session.query(CrawlerState)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'status': self.status,\n 'crawler_id': self.crawler_id,\n 'crawler_run': self.crawler_run,\n 'content_hash': self.content_hash,\n 'foreign_id': self.foreign_id,\n 'error_type': self.error_type,\n 'error_message': self.error_message,\n 'error_details': self.error_details,\n 'meta': self.meta,\n 'collection_id': self.collection_id,\n 'created_at': self.created_at\n }\n\n def __repr__(self):\n return '' % (self.id, self.status)\n\n def __unicode__(self):\n return self.id\n","repo_name":"karna1995/aleph","sub_path":"aleph/model/crawler_state.py","file_name":"crawler_state.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"70911703701","text":"def detectCycle(self, head: ListNode) -> ListNode:\n if head is None or head.next is None: # Safety case\n return None\n walker, runner = head, head\n isCircular = False # Checks basic loop\n while runner.next and runner.next.next: # Floyd Cycle Detection - '01-Optimal.py'\n walker = walker.next\n runner = runner.next.next\n if runner == walker:\n isCircular = True\n break\n if not isCircular: # If no loop, finish\n return None\n firstStep = head # Initiate index search\n while firstStep != walker: # Position searching\n firstStep = firstStep.next # Move at same speed\n walker = walker.next\n return firstStep # When matched, return location","repo_name":"EmperorArthurIX/LeetCodeProblems","sub_path":"02-Optimal.py","file_name":"02-Optimal.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30329400341","text":"#!/usr/bin/env python3\n\nimport os, sys, json, datetime\n\n# read all the events\n# list the ones that have youtube value which is not - and that does NOT have the video directory.\n# list the ones that have no youtube entry or that it is empty\n# Only show events that have already finished.\n\nwith open(os.path.join('html', 'cat.json')) as fh:\n cat = json.load(fh)\n\nnow = datetime.datetime.now()\nnow_str = now.strftime('%Y-%m-%d')\n\nno_videos = ''\nno_youtube = ''\non_vimeo = ''\nfor e in sorted(cat['events'].values(), key=lambda e: e['event_start'], reverse=True):\n #exit(e)\n if e.get('videos_url'):\n continue\n\n youtube = e.get('youtube')\n vimeo = e.get('vimeo')\n\n if e['event_end'] > now_str:\n if youtube:\n exit(\"ERROR. There is a youtube entry in a future event {}\".format(e['nickname']))\n continue\n\n if youtube:\n if youtube != '-':\n if not os.path.exists('data/videos/' + e['nickname']):\n no_videos += \"--list {:30} -d {} -e {}\\n\".format( youtube, e['event_start'], e['nickname'])\n elif vimeo:\n on_vimeo += \"vimeo {} {}\\n\".format( e['event_start'], e['nickname'] )\n else:\n no_youtube += \"{} {}\\n\".format( e['event_start'], e['nickname'] )\n\nif no_videos:\n print(\"Has youtube ID but videos were not included\")\n print(no_videos)\n\nif on_vimeo:\n print(\"On vimeo\")\n print(on_vimeo)\n\nif no_youtube:\n print(\"Has no youtube ID\")\n print(no_youtube)\n\n# vim: expandtab\n\n","repo_name":"szabgab/codeandtalk.com","sub_path":"bin/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"12"} +{"seq_id":"26963441086","text":"def produce(a):\r\n b = list(str(a))\r\n b.append(a)\r\n # print(b)\r\n\r\n hap = 0\r\n for i in range(len(b)):\r\n hap += int(b[i])\r\n return hap\r\n # print(hap)\r\n\r\n\r\nlist1 = []\r\nfor i in range(1, 10000):\r\n a = i\r\n list1.append(produce(a))\r\nlist2 = sorted(list1)\r\n# print(list2)\r\n\r\nlist3 = []\r\nfor i in range(1, 10001):\r\n list3.append(i)\r\n\r\nfor i in list3:\r\n if i not in list2:\r\n print(i)","repo_name":"sikweon1996/Baekjoon","sub_path":"6단계 함수/4673 셀프넘버.py","file_name":"4673 셀프넘버.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21011119468","text":"# Television\n# A television simulator\n\nclass Television(object):\n \"\"\"A Television object\"\"\"\n\n def __init__(self, volume = 0, channel = 0):\n self.volume = volume\n self.channel = channel\n\n def vol(self, volume):\n self.volume = volume\n if self.volume < 0:\n self.volume = 0\n elif self.volume > 50:\n self.volume = 50\n print(\"The volume has been changed to\", self.volume)\n\n def chan(self, channel):\n self.channel = channel\n if self.channel < 0:\n self.channel = 0\n elif self.channel > 25:\n self.channel = 25\n print(\"The channel has been changed to\", self.channel)\n\ndef main():\n tele = Television()\n\n choice = None\n while choice != \"0\":\n print(\"\"\"\n Television simulator\n 0 - Quit\n 1 - Change volume\n 2 - Change channel\n \"\"\")\n print(\"Volume:\", tele.volume,)\n print(\"Channel:\", tele.channel, \"\\n\")\n choice = input(\"Choice: \")\n # exit\n if choice == \"0\":\n print(\"Good-bye.\")\n # change the volume\n elif choice == \"1\":\n volume = int(input(\"What do you want the volume to be: \"))\n tele.vol(volume)\n # change the channel\n elif choice == \"2\":\n channel = int(input(\"What do you want the channel to be: \"))\n tele.chan(channel)\n # some unknown choice\n else:\n print(\"\\nSorry, but\", choice, \"isn't a valid choice.\")\n\nmain()\ninput(\"\\n\\nPress the enter key to exit: \")","repo_name":"theglitchmitch/M3-Learning-Python","sub_path":"Chapter_8/Television.py","file_name":"Television.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"21010376940","text":"#!/home/nick/.virtualenvs/twitterbots/bin/python3.5\n\nimport tweepy\nimport sqlite3\nfrom configparser import ConfigParser\n\n\ndef main():\n\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n\n consumer_key = parser.get('Keys',\n 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets',\n 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens',\n 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets',\n 'access_token_secret').strip(\"'\")\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n conn = sqlite3.connect('tweet_dump.db')\n c = conn.cursor()\n\n c.execute('SELECT user_id FROM tdump ORDER BY user_id ASC')\n tweets = [x[0] for x in c.fetchall()]\n\n seen = []\n count = 0\n\n for tweet in tweets:\n\n if tweet not in seen:\n\n get_id = api.get_user(tweet)\n id_ = get_id.id_str\n screen_name = get_id.screen_name\n count += 1\n if count % 1000 == 0:\n print(count)\n\n if tweet != id_:\n\n print(tweet, id_, screen_name)\n\n seen.append(tweet)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nickbonne/twitter_friends_data","sub_path":"id_fix.py","file_name":"id_fix.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27730273925","text":"import argparse\nimport subprocess\nimport sys\nfrom typing import List, Optional\n\nfrom dtags import style\nfrom dtags.commons import (\n dtags_command,\n fix_color_for_windows,\n get_argparser,\n normalize_dir,\n normalize_tag,\n reverse_map,\n)\nfrom dtags.files import load_config_file\n\nUSAGE = \"run DEST [DEST ...] -c ...\"\nDESCRIPTION = f\"\"\"\nExecute a command in one or more directories.\n\nTarget directories are iterated in alphabetical order.\nPaths take precedence over tags on name collisions.\nThe command is run only once per directory in subprocesses.\n\nexamples:\n\n # run \"git status\" in all directories tagged \"work\"\n {style.command(\"run work -c git status\")}\n\n # run \"git status\" in directories ~/foo and ~/bar\n {style.command(\"run ~/foo ~/bar -c git status\")}\n\n # run \"git status\" in directories tagged \"work\" and in ~/foo\n {style.command(\"run work ~/foo -c git status\")}\n\"\"\"\n\n\n@dtags_command\ndef execute(args: Optional[List[str]] = None) -> None:\n parser = get_argparser(prog=\"run\", desc=DESCRIPTION, usage=USAGE)\n parser.add_argument(\n \"destinations\",\n metavar=\"DEST\",\n nargs=\"+\",\n help=\"directory path or tag\",\n )\n parser.add_argument(\n \"-c\",\n \"--cmd\",\n dest=\"command\",\n nargs=argparse.REMAINDER,\n required=True,\n help=\"command to execute\",\n )\n parsed_args = parser.parse_args(sys.argv[1:] if args is None else args)\n\n if not parsed_args.command:\n parser.error(\"the following arguments are required: -c/--cmd\")\n else:\n run_command(parsed_args.destinations, parsed_args.command)\n\n\ndef run_command(destinations: List[str], command: List[str]) -> None:\n config = load_config_file()\n tag_config = config[\"tags\"]\n\n tag_to_dirpaths = reverse_map(tag_config)\n dirpaths = set()\n\n for dest in destinations:\n dirpath = normalize_dir(dest)\n if dirpath is not None:\n dirpaths.add(dirpath)\n else:\n tag = normalize_tag(dest)\n if tag in tag_to_dirpaths:\n for dirpath in tag_to_dirpaths[tag]:\n if dirpath.is_dir():\n dirpaths.add(dirpath)\n\n return_code = 0\n for dirpath in sorted(dirpaths):\n tags = tag_config.get(dirpath, set())\n\n fix_color_for_windows()\n print(f\"\\n{style.mapping(dirpath, tags)}:\")\n try:\n process = subprocess.run(\n command,\n cwd=dirpath,\n stderr=subprocess.STDOUT,\n )\n except FileNotFoundError:\n print(f\"Invalid command: {command[0]}\", file=sys.stderr)\n except NotADirectoryError: # pragma no cover\n print(f\"Not a directory: {dirpath.as_posix()}\", file=sys.stderr)\n else:\n if process.returncode != 0:\n return_code = 1\n\n sys.exit(return_code)\n","repo_name":"joowani/dtags","sub_path":"dtags/commands/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":363,"dataset":"github-code","pt":"12"} +{"seq_id":"13594267514","text":"from hashlib import blake2s\nimport codecs\n\nclass prg:\n\n def f(self, inp):\n\n converted = inp[0:int(len(inp)/4)]+ inp[int(len(inp)/2): int(3*len(inp)/4)] + inp[int(len(inp)/4): int(len(inp)/2)] + inp[int(3*len(inp)/4) :]\n return converted\n\n\n def g(self, inp):\n\n return blake2s(codecs.decode(inp, \"hex_codec\")).hexdigest()\n\n \n\n def __init__(self, seed) -> None:\n self.__seed = blake2s(seed.encode()).hexdigest();\n self.__currentHash = self.g(self.f(self.__seed))\n\n \n def random(self):\n self.__currentHash = self.g(self.f(self.__currentHash))\n return self.__currentHash\n\n\nc = prg(seed=\"\")\n\nout = \"\"\n\nfor i in range(100000):\n out = str(bin(int(c.random(), 16)))[2:] + \"\\n\" + out\n\n\nf = open(\"ranodm.txt\", \"w\")\nf.write(out)","repo_name":"Namishk/PsudoRandomKeys","sub_path":"psudoRandomGenerator.py","file_name":"psudoRandomGenerator.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26942642005","text":"#!/bin/env python\n\nfrom enum import Enum, auto\nfrom state import NoQuarterState\n\n\nclass COIN(Enum):\n SOLD_OUT = 0\n NO_QUARTER = auto()\n HAS_QUARTER = auto()\n SOLD = auto()\n\n\nclass GumballMachine:\n def __init__(self, count: int):\n self.__state = NoQuarterState(self)\n self.__count = count\n\n def insert_quarter(self):\n self.__state.insert_quarter()\n","repo_name":"qtsky89/DesignPattern","sub_path":"chapter10_state/machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38679851753","text":"#!/usr/bin/env python3\n\nimport fcntl\nimport os\nimport sys\nimport time\nimport tty\nimport termios\n\nclass raw(object):\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n def __enter__(self):\n self.original_stty = termios.tcgetattr(self.stream)\n tty.setcbreak(self.stream)\n def __exit__(self, type, value, traceback):\n termios.tcsetattr(self.stream, termios.TCSANOW, self.original_stty)\n\nclass nonblocking(object):\n def __init__(self, stream):\n self.stream = stream\n self.fd = self.stream.fileno()\n def __enter__(self):\n self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)\n fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)\n def __exit__(self, *args):\n fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)\n\ndef get_input(timeout=0.5, default=0):\n res = None\n start = time.time()\n with raw(sys.stdin):\n with nonblocking(sys.stdin):\n while res is None:\n try:\n c = sys.stdin.read(1)\n if c.lower() == \"a\":\n res = -1\n elif c.lower() == \"d\":\n res = 1\n elif c.lower() == \"s\":\n res = 0\n except IOError:\n continue\n if time.time() - start > timeout:\n res = default\n else:\n time.sleep(.1)\n return res","repo_name":"fdibaldassarre/advent-of-code-2019","sub_path":"13/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70432220503","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.support.select import Select\n\ndriver=webdriver.Chrome(ChromeDriverManager().install())\ndriver.get(\"https://www.jqueryscript.net/demo/jQuery-Plugin-For-Filterable-Bootstrap-Dropdown-Select-Bootstrap-Select/\")\ndriver.implicitly_wait(30)\ndriver.maximize_window()\n\nbutton=driver.find_element_by_id(\"bts-ex-1\")\nbutton.click()\nall_options=button.find_elements_by_class_name(\"items\")\ntry:\n print(driver.find_element_by_xpath(\"//span[class='text]\").text)\nexcept NoSuchElementException as ex:\n print(\"Element is not selected\")\nprint(len(all_options))\n\nfor webelement in all_options:\n if webelement.text==\"Item 2\":\n webelement.click()\n break\nprint(driver.find_element_by_xpath(\"//span[@class='text']\").text)\n\ndriver.quit()","repo_name":"anandsitaram/selenium-python-basics","sub_path":"simpleineractions/Demo02_BootstrapDropDowns.py","file_name":"Demo02_BootstrapDropDowns.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"2673128538","text":"import cv2 as cv\nimport utils\n\n\ndef orb(gray):\n orb = cv.ORB_create(nfeatures=2000, scaleFactor=1.1, nlevels=8, edgeThreshold=11, firstLevel=0, patchSize=11,\n fastThreshold=5)\n msk,_ = utils.mask(gray)\n KP, des = orb.detectAndCompute(gray, msk)\n return KP\n\n\ndef sift(gray):\n sift = cv.SIFT_create(contrastThreshold=0.04, edgeThreshold=10)\n msk,_ = utils.mask(gray)\n KP, des = sift.detectAndCompute(gray, msk)\n return KP\n\n\ndef fast(gray):\n fast = cv.FastFeatureDetector_create(threshold=10, nonmaxSuppression=True, type=cv.FAST_FEATURE_DETECTOR_TYPE_9_16)\n msk,_ = utils.mask(gray)\n KP = fast.detect(gray, msk)\n return KP\n","repo_name":"wtqqtw/auto-chemistry","sub_path":"keyPoints.py","file_name":"keyPoints.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24147640637","text":"from random import choice\nfrom typing import List, Union\n\nfrom fedot.core.data.data import InputData\nfrom fedot.core.data.data_preprocessing import data_has_categorical_features, data_has_missing_values\nfrom fedot.core.data.multi_modal import MultiModalData\nfrom fedot.core.pipelines.node import PrimaryNode, SecondaryNode\nfrom fedot.core.pipelines.pipeline import Pipeline\nfrom fedot.core.repository.tasks import Task, TaskTypesEnum\nfrom fedot.core.repository.dataset_types import DataTypesEnum\nfrom fedot.core.repository.operation_types_repository import OperationTypesRepository\nfrom fedot.core.log import Log\n\nNOT_FITTED_ERR_MSG = 'Model not fitted yet'\nUNSUITABLE_AVAILABLE_OPERATIONS_MSG = \"Unable to construct an initial assumption from the passed \" \\\n \"available operations, default initial assumption will be used\"\n\n\nclass ApiInitialAssumptions:\n def get_initial_assumption(self,\n data: Union[InputData, MultiModalData],\n task: Task,\n available_operations: List[str] = None,\n logger: Log = None) -> List[Pipeline]:\n\n has_categorical_features = data_has_categorical_features(data)\n has_gaps = data_has_missing_values(data)\n\n if isinstance(data, MultiModalData):\n if available_operations:\n logger.message(\"Available operations are not taken into account when \"\n \"forming the initial assumption for multi-modal data\")\n initial_assumption = self.create_multidata_pipelines(task, data, has_categorical_features, has_gaps)\n elif isinstance(data, InputData):\n if available_operations:\n initial_assumption = \\\n self.create_unidata_pipelines_on_available_operations(task, data, has_categorical_features,\n has_gaps, available_operations,\n logger)\n else:\n initial_assumption = self.create_unidata_pipelines(task, has_categorical_features, has_gaps)\n else:\n raise NotImplementedError(f\"Don't handle {type(data)}\")\n return initial_assumption\n\n @staticmethod\n def _get_operations_for_the_task(task_type: TaskTypesEnum, data_type: DataTypesEnum, repo: str,\n available_operations: List[str]):\n \"\"\" Returns the intersection of the sets of passed available operations and\n operations that are suitable for solving the given problem \"\"\"\n\n operations_for_the_task = \\\n OperationTypesRepository(repo).suitable_operation(task_type=task_type,\n data_type=data_type)[0]\n operations_to_choose_from = list(set(operations_for_the_task).intersection(available_operations))\n return operations_to_choose_from\n\n @staticmethod\n def _are_only_available_operations(pipeline: Pipeline, available_operations: List[str]):\n \"\"\" Checks if the pipeline contains only nodes with passed available operations \"\"\"\n\n for node in pipeline.nodes:\n if node.operation.operation_type not in available_operations:\n return False\n return True\n\n def _create_unidata_pipeline_on_random_operation(self, task, data, pipeline, available_operations, logger):\n \"\"\" Creates pipeline from one model randomly selected from the pool of available operations.\n For time series problem, first node with 'lagged' operation, then the randomly selected model.\n If it is impossible to create a valid pipeline from the given available operations,\n returns the default one \"\"\"\n\n if task.task_type == TaskTypesEnum.ts_forecasting:\n node_lagged = PrimaryNode('lagged')\n operations_to_choose_from = \\\n self._get_operations_for_the_task(task_type=TaskTypesEnum.regression, data_type=data.data_type,\n repo='model', available_operations=available_operations)\n if not operations_to_choose_from:\n logger.message(UNSUITABLE_AVAILABLE_OPERATIONS_MSG)\n return pipeline\n\n node_final = SecondaryNode(choice([operations_to_choose_from]), nodes_from=[node_lagged])\n return Pipeline(node_final)\n\n elif task.task_type == TaskTypesEnum.regression or \\\n task.task_type == TaskTypesEnum.classification:\n operations_to_choose_from = \\\n self._get_operations_for_the_task(task_type=task.task_type, data_type=data.data_type,\n repo='model', available_operations=available_operations)\n if not operations_to_choose_from:\n logger.message(UNSUITABLE_AVAILABLE_OPERATIONS_MSG)\n return pipeline\n\n node = PrimaryNode(choice(operations_to_choose_from))\n return Pipeline(node)\n else:\n raise NotImplementedError(f\"Don't have initial pipeline for task type: {task.task_type}\")\n\n def create_unidata_pipelines_on_available_operations(self, task: Task, data: InputData,\n has_categorical_features: bool, has_gaps: bool,\n available_operations: List[str],\n logger: Log) -> List[Pipeline]:\n \"\"\" Creates a pipeline for Uni-data using only available operations \"\"\"\n\n pipelines = self.create_unidata_pipelines(task, has_categorical_features, has_gaps)\n correct_pipelines = []\n for pipeline in pipelines:\n if self._are_only_available_operations(pipeline, available_operations):\n correct_pipelines.append(pipeline)\n else:\n correct_pipeline = self._create_unidata_pipeline_on_random_operation(task, data,\n pipeline, available_operations,\n logger)\n correct_pipelines.append(correct_pipeline)\n return correct_pipelines\n\n def create_unidata_pipelines(self,\n task: Task,\n has_categorical_features: bool,\n has_gaps: bool) -> List[Pipeline]:\n # TODO refactor as builder\n node_preprocessed = preprocessing_builder(task.task_type, has_gaps, has_categorical_features)\n if task.task_type == TaskTypesEnum.ts_forecasting:\n pipelines = [create_glm_ridge_pipeline(node_preprocessed),\n create_lagged_ridge_pipeline(node_preprocessed),\n create_polyfit_ridge_pipeline(node_preprocessed),\n create_ar_pipeline(node_preprocessed)]\n elif task.task_type == TaskTypesEnum.classification:\n if has_categorical_features:\n pipelines = [create_rf_classifier_pipeline(node_preprocessed),\n create_logit_classifier_pipeline(node_preprocessed)]\n else:\n pipelines = [create_rf_classifier_pipeline(node_preprocessed),\n create_logit_classifier_pipeline(node_preprocessed)]\n elif task.task_type == TaskTypesEnum.regression:\n if has_categorical_features:\n pipelines = [create_rfr_regression_pipeline(node_preprocessed),\n create_ridge_regression_pipeline(node_preprocessed)]\n else:\n pipelines = [create_rfr_regression_pipeline(node_preprocessed),\n create_ridge_regression_pipeline(node_preprocessed)]\n else:\n raise NotImplementedError(f\"Don't have initial pipeline for task type: {task.task_type}\")\n return pipelines\n\n def create_multidata_pipelines(self, task: Task, data: MultiModalData,\n has_categorical_features: bool,\n has_gaps: bool) -> List[Pipeline]:\n if task.task_type == TaskTypesEnum.ts_forecasting:\n node_final = SecondaryNode('ridge', nodes_from=[])\n for data_source_name, values in data.items():\n if data_source_name.startswith('data_source_ts'):\n node_primary = PrimaryNode(data_source_name)\n node_lagged = SecondaryNode('lagged', [node_primary])\n node_last = SecondaryNode('ridge', [node_lagged])\n node_final.nodes_from.append(node_last)\n elif task.task_type == TaskTypesEnum.classification:\n node_final = SecondaryNode('rf', nodes_from=[])\n node_final.nodes_from = self.create_first_multimodal_nodes(data, has_categorical_features, has_gaps)\n elif task.task_type == TaskTypesEnum.regression:\n node_final = SecondaryNode('rfr', nodes_from=[])\n node_final.nodes_from = self.create_first_multimodal_nodes(data, has_categorical_features, has_gaps)\n else:\n raise NotImplementedError(f\"Don't have initial pipeline for task type: {task.task_type}\")\n\n return [Pipeline(node_final)]\n\n def create_first_multimodal_nodes(self, data: MultiModalData,\n has_categorical: bool, has_gaps: bool) -> List[Pipeline]:\n nodes_from = []\n\n for data_source_name, values in data.items():\n node_primary = PrimaryNode(data_source_name)\n node_imputation = SecondaryNode('simple_imputation', [node_primary])\n if has_gaps:\n if data_source_name.startswith('data_source_table') and has_categorical:\n node_encoder = SecondaryNode('one_hot_encoding', [node_imputation])\n node_preprocessing = SecondaryNode('scaling', [node_encoder])\n else:\n node_preprocessing = SecondaryNode('scaling', [node_imputation])\n else:\n if data_source_name.startswith('data_source_table') and has_categorical:\n node_encoder = SecondaryNode('one_hot_encoding', [node_primary])\n node_preprocessing = SecondaryNode('scaling', [node_encoder])\n else:\n node_preprocessing = SecondaryNode('scaling', [node_primary])\n node_last = SecondaryNode('ridge', [node_preprocessing])\n nodes_from.append(Pipeline(node_last))\n\n return nodes_from\n\n\ndef preprocessing_builder(task_type: TaskTypesEnum, has_gaps: bool = False, has_categorical_features: bool = False):\n \"\"\"\n Function that accepts special info about data and create preprocessing part of pipeline\n\n :param task_type: type of task\n :param has_gaps: flag is showed is there are gaps in the data\n :param has_categorical_features: flag is showed is there are categorical_features\n :return: node_preprocessing: last node of preprocessing\n \"\"\"\n node_imputation = PrimaryNode('simple_imputation')\n if task_type == TaskTypesEnum.ts_forecasting:\n if has_gaps:\n return node_imputation\n else:\n if has_gaps:\n if has_categorical_features:\n node_encoder = SecondaryNode('one_hot_encoding', nodes_from=[node_imputation])\n node_preprocessing = SecondaryNode('scaling', [node_encoder])\n else:\n node_preprocessing = SecondaryNode('scaling', nodes_from=[node_imputation])\n else:\n if has_categorical_features:\n node_encoder = PrimaryNode('one_hot_encoding')\n node_preprocessing = SecondaryNode('scaling', [node_encoder])\n else:\n node_preprocessing = PrimaryNode('scaling')\n return node_preprocessing\n\n\ndef create_lagged_ridge_pipeline(node_preprocessed=None):\n \"\"\" Pipeline for time series forecasting task \"\"\"\n if node_preprocessed:\n node_lagged = SecondaryNode('lagged', nodes_from=[node_preprocessed])\n else:\n node_lagged = PrimaryNode('lagged')\n node_final = SecondaryNode('ridge', nodes_from=[node_lagged])\n return Pipeline(node_final)\n\n\ndef create_glm_ridge_pipeline(node_preprocessed=None):\n \"\"\" Pipeline for time series forecasting task \"\"\"\n if node_preprocessed:\n node_glm = SecondaryNode('glm', nodes_from=[node_preprocessed])\n node_lagged = SecondaryNode('lagged', nodes_from=[node_preprocessed])\n else:\n node_glm = PrimaryNode('glm')\n node_lagged = PrimaryNode('lagged')\n\n node_ridge = SecondaryNode('ridge', nodes_from=[node_lagged])\n\n node_final = SecondaryNode('ridge', nodes_from=[node_ridge, node_glm])\n return Pipeline(node_final)\n\n\ndef create_polyfit_ridge_pipeline(node_preprocessed=None):\n \"\"\" Pipeline for time series forecasting task \"\"\"\n if node_preprocessed:\n node_polyfit = SecondaryNode('polyfit', nodes_from=[node_preprocessed])\n node_lagged = SecondaryNode('lagged', nodes_from=[node_preprocessed])\n else:\n node_polyfit = PrimaryNode('polyfit')\n node_lagged = PrimaryNode('lagged')\n\n node_ridge = SecondaryNode('ridge', nodes_from=[node_lagged])\n\n node_final = SecondaryNode('ridge', nodes_from=[node_ridge, node_polyfit])\n return Pipeline(node_final)\n\n\ndef create_ar_pipeline(node_preprocessed=None):\n \"\"\" Pipeline for time series forecasting task \"\"\"\n if node_preprocessed:\n node_smoothing = SecondaryNode('smoothing', nodes_from=[node_preprocessed])\n else:\n node_smoothing = PrimaryNode('smoothing')\n node_final = SecondaryNode('ar', nodes_from=[node_smoothing])\n return Pipeline(node_final)\n\n\ndef create_rf_classifier_pipeline(node_preprocessed):\n return Pipeline(SecondaryNode('rf', nodes_from=[node_preprocessed]))\n\n\ndef create_logit_classifier_pipeline(node_preprocessed):\n return Pipeline(SecondaryNode('logit', nodes_from=[node_preprocessed]))\n\n\ndef create_rfr_regression_pipeline(node_preprocessed):\n return Pipeline(SecondaryNode('rfr', nodes_from=[node_preprocessed]))\n\n\ndef create_ridge_regression_pipeline(node_preprocessed):\n return Pipeline(SecondaryNode('ridge', nodes_from=[node_preprocessed]))\n","repo_name":"IldarZayrullin/ScW_F_BAMT","sub_path":"FEDOT-fedot_for_bn/fedot/api/api_utils/initial_assumptions.py","file_name":"initial_assumptions.py","file_ext":"py","file_size_in_byte":14476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"69815676820","text":"#coding:utf-8\n\n# f = open('data.txt','w+')\n# f.write('hello world, i am here!')\n# f.close()\n\n# result = f.read() # read没有传参,读取整个文件 ,read没有返回结果,则文件已经读完\n\nf =open(r'd:\\22.jpg','rb')\nnew_f =open(r\"d:\\\\666.jpg\",'wb')\ndata = f.read(1024)\nwhile data:\n new_f.write(data)\n data=f.read(1024)\nnew_f.close()\nf.close()","repo_name":"LhWorld/Machine_learing","sub_path":"test/文件.py","file_name":"文件.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27930601084","text":"from datetime import datetime\n\nfrom utils import config\nfrom utils.basic import Basic\nfrom utils.log import logger\n\n\nclass Visa(Basic):\n\n def __init__(self, driver):\n super().__init__(driver)\n\n def open_page(self, page):\n self.driver.get(page)\n\n def select_centre(self, county, city, category):\n self.wait_for_secs()\n self.click_el(name=\"JurisdictionId\")\n self.click_el(xpath=\"//select[@name='JurisdictionId']/option[contains(text(),'{}')]\".format(county))\n self.wait_for_loading()\n self.click_el(name=\"centerId\")\n self.click_el(xpath=\"//select[@name='centerId']/option[contains(text(),'{}')]\".format(city))\n self.wait_for_secs()\n self.click_el(name=\"category\")\n self.click_el(xpath=\"//select[@name='category']/option[contains(text(),'{}')]\".format(category))\n self.wait_for_secs()\n self.click_el(name='checkDate')\n logger.info(\"select centre finished\")\n\n def go_to_appointment_page(self, phone='', email=''):\n self.open_page(config.OPENED_PAGE)\n # self.select_centre(\"England\", \"Manchester\", \"Normal\")\n # self.enter_phone_and_email(phone, email)\n # self.enter_wrong_code(email, config.PASSWORD)\n # self.enter_code_from_email(email)\n\n def login(self):\n try:\n # self.click_el(xpath=\"//a[text() = 'Log in']\")\n element = self.driver.find_element_by_xpath(\"//a[contains(text(),'Log in')]\")\n element.click()\n self.wait_for_secs()\n self.enter_message(config.EMAIL, name='email')\n self.wait_for_secs()\n self.enter_message(config.PASSWORD, name='password')\n self.wait_for_secs()\n self.click_el(name=\"login\")\n logger.info(\"log in finished\")\n except Exception as e:\n logger.error(e)\n\n def go_to_book_appointment(self):\n unique_suffix = config.OPENED_PAGE.split('/')[-1]\n link = f'book-appointment/{unique_suffix}'\n logger.info(f\"date appointment link = [{link}]\")\n # open a new tab\n self.driver.execute_script(f'window.open(\\\"{link}\\\",\"_blank\");')\n # switch to the new tab\n self.driver.switch_to.window(self.driver.window_handles[-1])\n logger.info(\"go to book appointment finished\")\n\n def check_available_dates(self):\n self.click_el(id=\"VisaTypeId\")\n self.click_el(xpath=\"//select[@id='VisaTypeId']/option[contains(text(),'{}')]\".format(config.CENTER[3]))\n self.wait_for_secs(0)\n\n # check date\n self.click_el(id=\"app_date\")\n available_dates = {}\n next_button_xpath = \"//div[@class = 'datepicker-days']//th[@class = 'next' and @style = 'visibility: visible;']\" # next month\n while True:\n nd = self.get_normal_dates()\n if nd:\n available_dates.update(nd)\n if self.driver.find_elements_by_xpath(next_button_xpath):\n self.wait_for_secs(0)\n self.click_el(xpath=next_button_xpath)\n else:\n break\n return available_dates\n\n def get_normal_dates(self):\n normal_dates_xpath = \"//div[@class='datepicker-days']//td[not(contains(@class, 'disabled'))]\" # days in the current month\n result_dates = {}\n dates = []\n if len(self.driver.find_elements_by_xpath(normal_dates_xpath)):\n found_month = self.driver.find_element_by_xpath(\n \"//div[@class='datepicker-days']//th[@class='datepicker-switch']\").text\n for day in self.driver.find_elements_by_xpath(normal_dates_xpath): # need refactor fix\n dates.append(day.text)\n for day in dates:\n found_date = datetime.strptime(day + \" \" + found_month, '%d %B %Y')\n result_dates[found_date.strftime(\"%d/%m/%Y\")] = []\n self.click_el(normal_dates_xpath) # 自动点击\n\n return result_dates\n","repo_name":"vxwong/spain-visa-monitor","sub_path":"visa.py","file_name":"visa.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"12"} +{"seq_id":"8304191442","text":"import unittest\nimport os\nimport tempfile\n\n# internal modules:\nfrom yotta.lib.fsutils import mkDirP, rmRf\nfrom . import cli\n\n\nTest_Module_JSON = '''{\n \"name\": \"git-access-testing\",\n \"version\": \"0.0.2\",\n \"description\": \"Git Access Testing\",\n \"author\": \"autopulated\",\n \"homepage\": \"https://github.com/autopulated/git-access-testing\",\n \"licenses\": [\n {\n \"url\": \"about:blank\",\n \"type\": \"\"\n }\n ],\n \"dependencies\": {\n \"testing-dummy\": \"git@bitbucket.org:autopulated/testing-dummy.git\",\n \"other-testing-dummy\": \"git@bitbucket.org:autopulated/other-testing-dummy.git#0.0.2\"\n }\n}\n'''\n\nclass TestCLIOwners(unittest.TestCase):\n def setUp(self):\n self.test_dir = tempfile.mkdtemp()\n with open(os.path.join(self.test_dir, 'module.json'), 'w') as f:\n f.write(Test_Module_JSON)\n \n def tearDown(self):\n rmRf(self.test_dir)\n\n # you have have to be authenticated to list owners, so this doesn't work\n # yet...\n #def test_listOwners(self):\n # stdout = self.runCheckCommand(['owners', 'ls'])\n # self.assertTrue(stdout.find('autopulated@gmail.com') != -1)\n\n def runCheckCommand(self, args):\n stdout, stderr, statuscode = cli.run(args, cwd=self.test_dir)\n self.assertEqual(statuscode, 0)\n return stdout or stderr\n\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","repo_name":"bridadan/yotta","sub_path":"yotta/test/cli/owners.py","file_name":"owners.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"33796073458","text":"\"\"\"\nThis module shows how to ITERATE (i.e. loop) through a SEQUENCE:\n -- list\n -- string\n -- tuple\n\nIt shows two ways to do so:\n -- using RANGE\n -- using just IN (no RANGE)\n\nAuthors: David Mutchler, Vibha Alangar, Dave Fisher, Matt Boutell, Mark Hays,\n Mohammed Noureddine, Sana Ebrahimi, Sriram Mohan, and their colleagues.\n\"\"\"\n\nimport rosegraphics as rg\n\n\ndef main():\n \"\"\" Calls the TEST functions in this module. \"\"\"\n run_test_sum_abs_of_all()\n run_test_sum_abs_of_all_without_range()\n run_test_fill_from_colors()\n run_test_print_letters()\n\n\n###############################################################################\n# The TEST functions are further down in the file,\n# so that you can focus on the following examples.\n###############################################################################\ndef sum_abs_of_all(sequence):\n \"\"\"\n What comes in:\n -- A sequence of numbers.\n What goes out:\n Returns the sum of the absolute values of the numbers.\n Side effects: None.\n Examples:\n sum_all([5, -1, 10, 4, -33])\n would return 5 + 1 + 10 + 4 + 33, which is 53\n\n sum_all([10, -30, -20]) would return 10 + 30 + 20, which is 60\n Type hints:\n :type sequence: list or tuple (of numbers)\n \"\"\"\n # -------------------------------------------------------------------------\n # EXAMPLE 1. Iterates through a sequence of numbers, summing them.\n # -------------------------------------------------------------------------\n total = 0\n for k in range(len(sequence)):\n total = total + abs(sequence[k])\n\n return total\n\n\ndef sum_abs_of_all_without_range(sequence):\n \"\"\"\n Same specification as sum_abs_of_all above,\n but with a different implementation.\n \"\"\"\n # -------------------------------------------------------------------------\n # EXAMPLE 2. Iterates through a sequence of numbers, summing them.\n # Same as Example 1 above, but uses the \"no range\" form.\n # -------------------------------------------------------------------------\n total = 0\n for number in sequence:\n total = total + abs(number)\n\n return total\n\n # -------------------------------------------------------------------------\n # The above example shows how you can iterate through a sequence\n # WITHOUT using a RANGE expression. This works ONLY\n # ** IF you do NOT need the index variable. **\n #\n # You can ALWAYS use the form in Example 1 that uses RANGE;\n # this form in Example 2 is just \"syntactic sugar.\"\n # Use this form if you like, but:\n # -- Don't let it keep you from understanding the critical\n # concept of an INDEX.\n # -- Be aware of the limitation of this form.\n # -- Don't confuse the two forms!\n # -------------------------------------------------------------------------\n\n\ndef fill_from_colors(window, graphics_object, colors):\n \"\"\"\n What comes in:\n -- An rg.RoseWindow\n -- A rosegraphics object that can be attached to a RoseWindow\n and has a fill color (e.g. an rg.Circle or rg.Rectangle)\n -- A sequence of rosegraphics colors.\n What goes out: Nothing (i.e., None).\n Side effects: \n -- Attaches the given graphics object to the given RoseWindow.\n -- Then iterates through the given sequence of colors, using\n those colors to set the given graphics object's fill color.\n -- At each iteration, renders the window with a brief pause\n after doing so, to create a \"flashing\" display. \n Type hints:\n :type window: rg.RoseWindow\n :type graphics_object: rg._Shape\n :type colors: list or tuple str\n \"\"\"\n # -------------------------------------------------------------------------\n # EXAMPLE 3. Iterates through a sequence of colors.\n # -------------------------------------------------------------------------\n graphics_object.attach_to(window)\n\n for k in range(len(colors)):\n graphics_object.fill_color = colors[k]\n window.render(0.25)\n\n\ndef print_letters(string):\n \"\"\"\n Prints the characters in the given string, one character per line.\n \"\"\"\n # -------------------------------------------------------------------------\n # EXAMPLE 4. Iterates through a STRING.\n # -------------------------------------------------------------------------\n for k in range(len(string)):\n print(string[k])\n\n\n###############################################################################\n# Just TEST functions below here.\n###############################################################################\ndef run_test_sum_abs_of_all():\n \"\"\" Tests the sum_abs_of_all function. \"\"\"\n print()\n print(\"--------------------------------------------------\")\n print(\"Testing the sum_abs_of_all function:\")\n print(\"--------------------------------------------------\")\n\n total1 = sum_abs_of_all([8, 13, 7, 5])\n print(\"Returned, expected:\", total1, 33)\n\n total2 = sum_abs_of_all([10, -30, -20])\n print(\"Returned, expected:\", total2, 60)\n\n total3 = sum_abs_of_all([])\n print(\"Returned, expected:\", total3, 0)\n\n\ndef run_test_sum_abs_of_all_without_range():\n \"\"\" Tests the sum_abs_of_all_without_range function. \"\"\"\n print()\n print(\"--------------------------------------------------\")\n print(\"Testing the sum_abs_of_all_without_range function:\")\n print(\"--------------------------------------------------\")\n\n total1 = sum_abs_of_all_without_range([8, 13, 7, 5])\n print(\"Returned, expected:\", total1, 33)\n\n total2 = sum_abs_of_all_without_range([10, -30, -20])\n print(\"Returned, expected:\", total2, 60)\n\n total3 = sum_abs_of_all_without_range([])\n print(\"Returned, expected:\", total3, 0)\n\n\ndef run_test_fill_from_colors():\n \"\"\" Tests the fill_from_colors function. \"\"\"\n print(\"--------------------------------------------------\")\n print(\"Testing the fill_from_colors function:\")\n print(\"See the two graphics windows that pop up.\")\n print(\"--------------------------------------------------\")\n\n # -------------------------------------------------------------------------\n # Test 1: Flashes red, white, blue -- 5 times.\n # -------------------------------------------------------------------------\n title = \"Red, white and blue, repeated 5 times!\"\n window = rg.RoseWindow(400, 180, title, canvas_color=\"dark gray\")\n\n circle = rg.Circle(rg.Point(150, 100), 40)\n circle.attach_to(window.initial_canvas)\n\n number_of_cycles = 5\n window.continue_on_mouse_click(\"Click anywhere in here to start\")\n\n for _ in range(number_of_cycles):\n fill_from_colors(window, circle, [\"red\", \"white\", \"blue\"])\n\n window.close_on_mouse_click()\n\n # -------------------------------------------------------------------------\n # Test 2: Flashes through a bunch of colors, looping through the\n # list forwards in a rectangle, then backwards in an ellipse.\n # -------------------------------------------------------------------------\n colors = [\"red\", \"white\", \"blue\", \"chartreuse\", \"chocolate\",\n \"DodgerBlue\", \"LightPink\", \"maroon\", \"yellow\", \"green\",\n \"SteelBlue\", \"black\"]\n\n title = \"Loop through 12 colors, forwards then backwards!\"\n window = rg.RoseWindow(450, 250, title, canvas_color=\"yellow\")\n\n rect_width = 100\n rect_height = 40\n rect_center = rg.Point(125, 100)\n rectangle = rg.Rectangle(rg.Point(rect_center.x - (rect_width / 2),\n rect_center.y - (rect_height / 2)),\n rg.Point(rect_center.x + (rect_width / 2),\n rect_center.y + (rect_height / 2)))\n\n oval_width = 70\n oval_height = 160\n oval_center = rg.Point(300, 100)\n ellipse = rg.Ellipse(rg.Point(oval_center.x - (oval_width / 2),\n oval_center.y - (oval_height / 2)),\n rg.Point(oval_center.x + (oval_width / 2),\n oval_center.y + (oval_height / 2)))\n\n rectangle.attach_to(window)\n ellipse.attach_to(window)\n window.render()\n window.continue_on_mouse_click(\"Click anywhere in here to start\")\n\n # This function call iterates through the colors,\n # filling the rectangle with those colors:\n fill_from_colors(window, rectangle, colors)\n\n # The reverse method reverses its list IN PLACE\n # (i.e., it \"mutates\" its list -- more on that in future sessions).\n colors.reverse()\n\n window.continue_on_mouse_click()\n\n # This function call iterates through the colors,\n # filling the ellipse (oval) with those colors:\n fill_from_colors(window, ellipse, colors)\n\n window.close_on_mouse_click()\n\n\ndef run_test_print_letters():\n \"\"\" Tests the print_letters function. \"\"\"\n print()\n print(\"--------------------------------------------------\")\n print(\"Testing the print_letters function:\")\n print(\"--------------------------------------------------\")\n\n print()\n print(\"Test 1: Print the letters in 'Eric Clapton'\")\n print_letters(\"Eric Clapton\")\n\n print()\n print(\"Test 2: Print the letters in 'Layla'\")\n print_letters(\"Layla\")\n\n\n# -----------------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# -----------------------------------------------------------------------------\nmain()\n","repo_name":"CSSE120StartingCode/Sequences","sub_path":"src/m2e_iterating.py","file_name":"m2e_iterating.py","file_ext":"py","file_size_in_byte":9443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"14812764979","text":"from pathlib import Path\r\n\r\n# path = Path(\"emails\")\r\n# print(path.exists()) # check if this path exists\r\n# print(path.mkdir()) # makes directory\r\n# print(path.rmdir()) # delete directory\r\n\r\npath = Path()\r\n# print(path.glob(\"*.py\")) # search for directories \"*.*\" for all files \"*.py\" all py files \"*.xml\" all xml files # IN THE CURRENT DIRECTORY\r\n\r\nfor file in path.glob(\"*\"):\r\n print(file)","repo_name":"dimiturmomchev/Python","sub_path":"Working_with_Directories/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"40272834872","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse, QueryDict\nfrom django.core import serializers\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import authenticate\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm, ProfileUpdateCreate\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom .models import User, Profile, Hobby\n\nfrom datetime import date\nfrom django.utils.timezone import now\n\nimport json\n\n\n@login_required\ndef profile(request):\n\n if request.method == \"POST\":\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(\n request.POST, request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n redirect('profile')\n\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n request.user.save()\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form,\n 'views': request.user.profile.views,\n 'heat': request.user.profile.user_heat.count()\n }\n\n return render(request, 'users/profile.html', context)\n\n\ndef apiLogin(request):\n if request.method == 'POST':\n user = authenticate(username=request.POST.get(\n 'username'), password=request.POST.get('password'))\n if user is not None:\n auth_login(request, user)\n return JsonResponse({\"success\": True, \"redirect\": \"profile\"})\n else:\n return JsonResponse({\"success\": False})\n\n# Get individual profiles by specifying the ID, specifying nothing will return own profile\n\n\n@login_required\ndef apiProfile(request, userid=None):\n try:\n\n # Uploading profile pics required a seprate HTTP endpoint, due to file encoding.\n if request.method == \"POST\":\n profile = User.objects.get(id=request.user.id).profile\n image = request.FILES['file-0']\n profile.image = image\n profile.save()\n return JsonResponse({\"success\": True})\n\n # Checks if profile exists increase view\n if request.method == \"HEAD\":\n if userid:\n profile = User.objects.get(id=userid).profile\n # Increase counter every time you make call to get individual profile if not user\n profile.views = profile.views+1\n # Save user changes\n profile.save()\n\n # Get Individual Profiles or Current profile\n if request.method == \"GET\":\n if userid:\n profile = User.objects.get(id=userid).profile\n # Increase counter every time you make call to get individual profile if not user\n profile.views = profile.views+1\n # Save user changes\n profile.save()\n else:\n profile = User.objects.get(id=request.user.id).profile\n\n hobbies = []\n for hobby in profile.hobbies.all():\n hobbies.append(hobby.name)\n\n jsonProduct = {'id': profile.user.id,\n 'image': '/media/' + str(profile.image),\n 'firstname': profile.user.first_name,\n 'lastname': profile.user.last_name,\n 'email': profile.user.email,\n 'dob': profile.dob.strftime('%Y-%m-%d'),\n 'gender': profile.gender,\n 'location': profile.location,\n 'description': profile.description,\n 'adjectives': profile.adjectives,\n 'views': str(profile.views),\n 'heat': profile.user_heat.count()\n }\n\n jsonProduct['hobbies'] = hobbies\n\n jsonData = json.dumps(jsonProduct)\n return HttpResponse(jsonData, content_type=\"application/json\")\n\n # Update your profile\n if request.method == \"PUT\":\n request.PUT = QueryDict(request.body)\n hobbyPks = request.PUT.getlist('hobbies[]')\n hobbies = Hobby.objects.filter(pk__in=hobbyPks)\n\n u_form = UserUpdateForm(request.PUT, instance=request.user)\n p_form = ProfileUpdateForm(\n request.PUT, request.FILES, instance=request.user.profile)\n \n if request.PUT['pw'] == 'true':\n pw_form = PasswordChangeForm(request.user, request.PUT)\n\n if u_form.is_valid() and p_form.is_valid() and (request.PUT['pw'] == 'false' or pw_form.is_valid()):\n u_form.save()\n p_form.save()\n if request.PUT['pw'] != 'false':\n pw_form.save()\n request.user.profile.hobbies.set(hobbies)\n return JsonResponse({\"success\": True, \"redirect\": \"profile/\"})\n else:\n if(request.PUT['pw'] == 'false'):\n return JsonResponse({\"success\": False, \"errors_user\": u_form.errors, \"errors_profile\": p_form.errors})\n else:\n return JsonResponse({\"success\": False, \"errors_user\": u_form.errors, \"errors_profile\": p_form.errors, \"errors_pw\": pw_form.errors})\n else:\n return JsonResponse({\"success\": False})\n except:\n return JsonResponse({\"success\": False})\n\n@login_required\ndef apiProfiles(request):\n if request.method == \"GET\":\n # Query Filter are lazily evaluated\n current = now().date()\n minAge = request.GET.get('minAge')\n maxAge = request.GET.get('maxAge')\n gender = request.GET.get('gender')\n\n # If set will return only the people you are matching with that like you back\n matches = request.GET.get('matches')\n\n if matches:\n res = request.user.profile.user_heat.all() & request.user.profile.heat.all()\n else:\n res = Profile.objects.exclude(user=request.user.id)\n\n if minAge:\n minAge = int(minAge)\n min_date = date(current.year - minAge, current.month, current.day)\n res = res.filter(dob__lte=min_date)\n\n\n if maxAge:\n maxAge = int(maxAge)\n max_date = date(current.year - maxAge, current.month, current.day)\n res = res.filter(dob__gte=max_date)\n\n if gender:\n if gender == \"M\":\n res = res.filter(gender='M')\n else:\n res = res.filter(gender='F')\n\n # Manually Making Json File\n # res = serializers.serialize('json', res)\n\n # Hashmap for all the ones that you have favourited\n favorited = []\n for profile in request.user.profile.heat.all():\n favorited.append(profile.id)\n\n userhobbies = []\n for hobby in request.user.profile.hobbies.all():\n userhobbies.append(hobby.name)\n\n jsonData = []\n\n for profile in res:\n\n # Temp fix for dates\n # if(profile.dob):\n # dob = profile.dob.strftime('%Y-%m-%d')\n # else:\n # dob = \"N/A\"\n jsonProduct = {'id': profile.user.id,\n 'image': '/media/' + str(profile.image),\n 'firstname': profile.user.first_name,\n 'lastname': profile.user.last_name,\n 'dob': profile.dob.strftime('%Y-%m-%d'),\n 'gender': profile.gender,\n 'location': profile.location,\n 'description': profile.description,\n 'adjectives': profile.adjectives,\n 'views': str(profile.views)\n }\n\n hobbies = []\n chobbies = 0\n for hobby in profile.hobbies.all():\n hobbies.append(hobby.name)\n if(hobby.name in userhobbies):\n chobbies = chobbies + 1\n\n jsonProduct['hobbies'] = hobbies\n jsonProduct['commonhobbies'] = chobbies\n\n if(profile.id in favorited):\n jsonProduct['heat'] = True\n else:\n jsonProduct['heat'] = False\n\n jsonData.append(jsonProduct)\n\n # return JsonResponse(jsonData, safe=False)\n\n # Sort the json data by common hobbies\n jsonData = sorted(\n jsonData, key=lambda k: k['commonhobbies'], reverse=True)\n jsonData = json.dumps(jsonData)\n return HttpResponse(jsonData, content_type=\"application/json\")\n\n return JsonResponse({\"success\": False})\n\n\ndef apiHobby(request, id):\n if request.method == \"GET\":\n res = Hobby.objects.filter(pk=id)\n res = serializers.serialize('json', res)\n return HttpResponse(res, content_type=\"application/json\")\n\n return JsonResponse({\"success\": False})\n\n\ndef apiHobbies(request):\n if request.method == \"GET\":\n res = Hobby.objects.all()\n res = serializers.serialize('json', res)\n return HttpResponse(res, content_type=\"application/json\")\n\n\ndef apiRegister(request):\n if request.method == \"POST\":\n uform = UserRegisterForm(request.POST)\n pform = ProfileUpdateCreate(request.POST)\n\n if uform.is_valid() and pform.is_valid():\n uform.save()\n user = uform.instance\n pform = ProfileUpdateCreate(request.POST, instance=user.profile)\n pform.save()\n\n # pform.user = uform\n # pform.save()\n # print(uform)\n # print(pform)\n\n # Welcome Email Details\n firstName = user.first_name\n lastName = user.last_name\n email = user.email\n subject = 'Thank you for registering with MatchMaker!'\n fromemail = 'no-reply@neshanthan.com'\n\n context = {\n 'firstName': firstName,\n 'lastName': lastName\n }\n\n # Send Email to user when new heat recieved\n send_mail(\n subject,\n render_to_string('emails/welcome.txt', context),\n fromemail,\n [email],\n fail_silently=False,\n )\n\n return JsonResponse({\"success\": True, \"redirect\": \"login/\"})\n else:\n print(\"not valid\")\n return JsonResponse({\"success\": False, \"errors\": str(uform.errors) + \" \" + str(pform.errors)})\n else:\n return JsonResponse({\"success\": False})\n\n@login_required\ndef apiNotifications(request):\n profile = User.objects.get(id=request.user.id).profile\n # If user then change prevHeats and tell user how many new ones since last time\n newHeats = profile.user_heat.count()-profile.prevHeat\n profile.prevHeat = profile.user_heat.count()\n\n # Newmatches\n newMatches = profile.newMatches\n profile.newMatches = 0\n\n profile.save()\n\n return JsonResponse({\"newheats\": newHeats, \"newmatches\": newMatches})\n\n\n\n\n@login_required\ndef apiProfileIDHeat(request):\n if request.method == \"POST\":\n # request.PUT = QueryDict(request.body)\n if(request.POST.get(\"username\") != None):\n username = request.POST['username']\n profile = Profile.objects.get(user=username)\n request.user.profile.heat.add(profile)\n \n # Email Details\n firstName = profile.user.first_name\n lastName = profile.user.last_name\n email = profile.user.email\n fromemail = 'no-reply@neshanthan.com'\n\n context = {\n 'firstName': firstName,\n 'lastName': lastName\n }\n \n # If the user being liked likes the person liking him then add new match notification on both\n if request.user.profile in profile.heat.all():\n request.user.profile.newMatches += 1\n profile.newMatches += 1 \n\n # Save Changes\n request.user.profile.save()\n profile.save()\n\n #Send email saying new match has been made to other user\n subject = 'You have matched with someone!'\n send_mail(\n subject,\n render_to_string('emails/matches.txt', context),\n fromemail,\n [email],\n fail_silently=False,\n )\n else:\n #Send new email to other user for new heat recieved\n subject = 'Someone has given you some heat!'\n send_mail(\n subject,\n render_to_string('emails/newheat.txt', context),\n fromemail,\n [email],\n fail_silently=False,\n )\n\n return JsonResponse({\"success\": True})\n else:\n return JsonResponse({\"success\": False})\n\n elif request.method == \"DELETE\":\n delete = QueryDict(request.body)\n if(delete.get(\"username\") != None):\n username = delete['username']\n profile = Profile.objects.get(user=username)\n request.user.profile.heat.remove(profile)\n\n # take away 1 from prevHeat so you will get notfication on new heats\n profile.prevHeat = profile.prevHeat-1\n if profile.prevHeat < 0:\n profile.prevHeat = 0\n \n profile.save()\n\n return JsonResponse({\"success\": True})\n else:\n return JsonResponse({\"success\": False})\n else:\n return JsonResponse({\"success\": False})\n","repo_name":"thomasbunyan/matchmaking-site","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"10773086974","text":"import argparse\nimport os\nimport json\nfrom glob import glob\nimport logging\nfrom datetime import datetime\n\nfrom sf_daq_broker.writer.bsread_writer import write_from_imagebuffer, write_from_databuffer_api3\nfrom sf_daq_broker.utils import get_data_api_request\nfrom sf_daq_broker import config\n\n#logger = logging.getLogger(\"data_api3\")\nlogger = logging.getLogger(\"broker_writer\")\nlogger.setLevel(\"INFO\")\n#logger.setLevel(\"DEBUG\")\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--source\", default=\"image\", type=str, help=\"retrieve from image or data buffer (possible values data_api3, image, epics)\")\nparser.add_argument(\"--run_info\", default=None, type=str, help=\"run_info json file\")\nargs = parser.parse_args()\n\nsource = None\nif args.source == \"image\":\n source = \"image\"\nelif args.source == \"data_api3\":\n source = \"data_api3\"\nelif args.source == \"epics\":\n source = \"epics\"\n \nif args.run_info is None:\n print(\"provide run info file\")\n exit(1)\n\nif not os.path.exists(args.run_info):\n print(f'{args.run_info} is not reachable or available')\n exit(1)\n\nwith open(args.run_info, \"r\") as read_file:\n run_info = json.load(read_file)\n\nif source == \"image\":\n if \"camera_list\" not in run_info:\n print(\"No cameras defined in run_info file\")\n exit(1)\n channels = run_info.get(\"camera_list\", [])\nelif source == \"data_api3\":\n if \"channels_list\" not in run_info:\n print(\"No BS channels defined in run_info file\")\n exit(1)\n channels = run_info.get(\"channels_list\", [])\nelse:\n if \"pv_list\" not in run_info:\n print(\"No PV channels defined in run_info file\")\n exit(1)\n channels = run_info.get(\"pv_list\", [])\n\nstart_pulse_id = run_info[\"start_pulseid\"]\nstop_pulse_id = run_info[\"stop_pulseid\"]\n\ndata_request = {}\ndata_request[\"range\"] = {}\ndata_request[\"range\"][\"startPulseId\"] = run_info[\"start_pulseid\"]\ndata_request[\"range\"][\"endPulseId\"] = run_info[\"stop_pulseid\"]\ndata_request[\"channels\"] = [{'name': ch, 'backend': config.IMAGE_BACKEND if ch.endswith(\":FPICTURE\") else config.DATA_BACKEND}\n for ch in channels]\n\nrun_number = run_info.get(\"run_number\", 0)\nacquisition_number = run_info.get(\"acquisition_number\", 0)\n\nparameters = None\n\nlist_data_directories_run = glob(f'/sf/{run_info[\"beamline\"]}/data/{run_info[\"pgroup\"]}/raw/run{run_number:04}*')\nif len(list_data_directories_run) != 1:\n print(f\"Ambiguous data directries : {list_data_directories_run}\")\n exit()\ndata_directory=list_data_directories_run[0]\n\nif source == \"image\":\n output_file = f'{data_directory}/data/acq{acquisition_number:04}.CAMERAS.h5.2'\n\n write_from_imagebuffer(data_request, output_file, parameters)\n\nelif source == \"data_api3\":\n output_file = f'{data_directory}/data/acq{acquisition_number:04}.BSDATA.h5.2'\n\n write_from_databuffer_api3(data_request, output_file, parameters)\n\nelse:\n output_file = f'{data_directory}/data/acq{acquisition_number:04}.PVCHANNELS.h5'\n\n metadata = {\n \"general/user\": run_info[\"pgroup\"],\n \"general/process\": __name__,\n \"general/created\": str(datetime.now()),\n \"general/instrument\": run_info[\"beamline\"]\n }\n\n print(\"post-retrieve for EPICS-BUFFER is not implemented\")\n","repo_name":"paulscherrerinstitute/sf_daq_broker","sub_path":"sf_daq_broker/writer/post_retrieve.py","file_name":"post_retrieve.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"39071071909","text":"import streamlit as st\nimport pandas as pd\nfrom st_aggrid import AgGrid\nfrom st_aggrid.grid_options_builder import GridOptionsBuilder\nimport re\n\n\nimport numpy as np\nimport math\nimport datetime\n\nst.set_page_config(layout=\"wide\")\n\n\n@st.cache\ndef fetch_and_clean_data():\n sonarcube = \"\"\"https://docs.google.com/spreadsheets/d/1jea9N1mL8T8lOH37v1aB3ZRJ31JRS7Es5J3ofLsqlfk/gviz/tq?tqx=out:csv&sheet=sonarcube\"\"\"\n sonarcube = pd.read_csv(sonarcube).drop_duplicates(keep = 'first')\n sonarcube.date_detected = pd.to_datetime(sonarcube['date_detected']).dt.date\n sonarcube['source'] = 'sonarcube'\n\n zap = \"\"\"https://docs.google.com/spreadsheets/d/1jea9N1mL8T8lOH37v1aB3ZRJ31JRS7Es5J3ofLsqlfk/gviz/tq?tqx=out:csv&sheet=zap\"\"\"\n zap = pd.read_csv(zap).drop_duplicates(keep = 'first')\n zap.date_detected = pd.to_datetime(zap['date_detected']).dt.date\n zap['source'] = 'zap'\n\n burp = \"\"\"https://docs.google.com/spreadsheets/d/1jea9N1mL8T8lOH37v1aB3ZRJ31JRS7Es5J3ofLsqlfk/gviz/tq?tqx=out:csv&sheet=burp\"\"\"\n burp = pd.read_csv(burp).drop_duplicates(keep = 'first')\n burp.date_detected = pd.to_datetime(burp['date_detected']).dt.date\n burp['source'] = 'burp'\n\n df = pd.concat([sonarcube, zap, burp], axis=0)\n date_lst = list(df.date_detected.unique())\n date_lst.sort(reverse=True)\n\n return df, date_lst\n\ndf, date_lst = fetch_and_clean_data()\n\n\n\n\n##### loading data \n\n\n\n\n\n\nst.title(\"Analysis Dashboard\")\n\nif st.button('Display data'):\n AgGrid(df)\n\n\ndate_lst = list(df.date_detected.unique())\nanalysis_dt = st.selectbox('Select Analysis Date', date_lst)\n\n#error_cat = ['sql injection','cookies present without secure flag','private ip in html', 'http parameter override', 'cross domain javascript source file present']\n\n\n\n\n#df = pd.melt(df,id_vars=['date_detected'],var_name=['source'], value_name='description').dropna()\ndf1 = df.groupby(['type'], as_index=False)['description','date_detected','source'].agg(lambda x: list(set(x)))\n\na =df1[df1.type.isin(df[df.date_detected == analysis_dt]['type'])]\na['is_new'] = a.date_detected.apply(lambda x: len(x)<2 or min(x) == analysis_dt)\n\n#print(('\\n').join(['a','b','c']))\n\na['date_detected'] = a['date_detected'].apply(lambda x: (\"\"\"; \\n\"\"\").join(j.strftime(\"%Y-%m-%d\") for j in x ))\na['description'] = a['description'].apply(lambda x: (\"; \\n\").join(j for j in x ))\na['source'] = a['source'].apply(lambda x: (\"; \\n\").join(j for j in x ))\n\n\na.columns = ['Vulnerability', 'Possible Descriptions', 'Observation Dates', 'Sources', 'Is New']\na = a[['Vulnerability', 'Sources', 'Is New', 'Observation Dates','Possible Descriptions']]\n#a['date_detected'] = (', \\n').join(['aaa','bbbbb','cbbbb'])\n#bug = 'Cross-Domain JavaScript Source File Inclusion Cross-Domain JavaScript Source File Inclusion '\n\n\n#analysis_dt\n \n#temp = df[df.date_detected == analysis_dt].sort_values(by=['date_detected'])\n \n#temp = df[df.date_detected == analysis_dt].sort_values(by=['date_detected']).drop_duplicates(subset=['vulnerability_type','source'], keep = 'first').loc[:, [ 'source', 'vulnerability', 'vulnerability_type']].reset_index().drop('index', axis = 1)\n#st.dataframe(a)\n#st.table(a)\n\nAgGrid(a)\n\n\n\n#st.code('for i in range(8): foo()')\n\n\n#st.code('for i in range(8): foo()')\n\n\nst.subheader('Google Sheet')\nst.code('https://docs.google.com/spreadsheets/d/1jea9N1mL8T8lOH37v1aB3ZRJ31JRS7Es5J3ofLsqlfk/edit?usp=sharing')\n\n\n\n\n# option = st.selectbox(\n# ... 'How would you like to be contacted?',\n# ... ('Email', 'Home phone', 'Mobile phone'))\n\n\n# st.session_state.clb_nbr = st.selectbox('Select Club Number', list(df.club_nbr.unique()))\n# st.session_state.wght= st.slider(\"Enter Sales Contribution (Membership Contribution = 100 - Sales Contribution)\",value = 88, min_value=0, max_value=100)\n# st.session_state.capacity = int(df_capacity[df_capacity.club_nbr == st.session_state.clb_nbr].total_pal.iloc[0])\n\n# st.session_state.df = df.query(\"club_nbr == @st.session_state.clb_nbr\").loc[:,['category_nbr', 'n_pal' , 'se_member', 'se_sales']].copy()\n# st.session_state.df = pd.melt(st.session_state.df, id_vars=['category_nbr','n_pal'], value_vars=['se_member', 'se_sales'], var_name='measure_name', value_name='measure').copy()\n# df_cat_temp = df_cat.query(\"club_nbr == @st.session_state.clb_nbr\").loc[:,['category_nbr', 'n_palamax' , 'n_palamin']].copy()\n\n# #st.session_state. \n# #st.session_state.\n# #######################################################\n# #df_cat_temp = df_cat[df_cat.club_nbr == clb_nbr]\n# #st.session_state.flag = False\n\n\n# def optimize():\n# # if 'df_cat' in st.session_state:\n# # st.dataframe(st.session_state.df_cat)\n# fract_dict = {'se_member': 0.5106788995796772, 'se_sales': 0.48932110042032284}\n# weight = {'se_sales':st.session_state.wght, 'se_member':(100 -st.session_state.wght)}\n \n# input_df_temp = (st.session_state.df\n# .assign(measure=st.session_state.df[['measure','measure_name']].groupby('measure_name').transform(lambda x: x / x.sum())\n# .loc[:,'measure'] * (st.session_state.df.measure_name.map(weight)) * (st.session_state.df.measure_name.map(fract_dict)) ) \n# .drop('measure_name', 1)\n# ).copy()\n\n# #summing up measures per category per club\n# input_df_temp = (input_df_temp.groupby(['category_nbr', 'n_pal'], as_index=False)[[\"measure\"]].sum()).copy()\n\n# #display(input_df_temp)\n\n# mat = (input_df_temp.assign(ind=1)\n# .pivot(columns = 'category_nbr', values = 'ind')\n# .rename_axis(None, axis = 'columns')\n# .transpose()\n# .fillna(0)\n# .to_numpy()\n# ) \n# nrow = input_df_temp.shape[0]\n\n\n\n\n\n# model = pyo.ConcreteModel()\n# model.x = pyo.Var(range(nrow), domain= pyo.Binary)\n\n# model.constraints = pyo.ConstraintList()\n# for arow in mat:\n# model.constraints.add( pyo.summation(arow , model.x) == 1)\n# model.constraints.add( pyo.summation(list(input_df_temp['n_pal']) , model.x) <= st.session_state.capacity)\n# model.obj_sales = pyo.Objective(expr = pyo.summation(list(input_df_temp['measure']), model.x), sense = pyo.maximize) \n# results = SolverFactory('glpk').solve(model)\n\n# solution = [model.x[j].value for j in range(nrow)]\n# df_sol = (input_df_temp.loc[:, ['category_nbr', 'n_pal']]\n# .assign(solution = solution)\n# .query('solution == 1')\n# .drop(columns = 'solution')\n# .rename(columns = {'n_pal': 'n_pal_opt'})\n# )\n \n# #df_sol.index = [\"\"] * len(df_sol)\n# gb = GridOptionsBuilder.from_dataframe(df_sol)\n \n# AgGrid(df_sol)\n\n# #st.dataframe(df_sol)\n\n\n\n\n\n\n# def foo():\n# if st.button('Edit Constraints'):\n# st.session_state.flag = True\n# edit_constraints()\n# if st.button('Optimize'):\n# optimize()\n\n\n \n# def edit_constraints():\n# if st.button('Review Optimization'):\n# del st.session_state['flag']\n# foo()\n# return 0\n\n# st.subheader('Capacity')\n# st.session_state.capacity = st.slider(\"Capacity \", min_value=1000, max_value=2000, value=st.session_state.capacity )\n\n\n\n\n# lst = []\n# st.subheader('Mimimum/Maximum pallet count for each category')\n\n# for i in list(df_cat_temp.category_nbr.unique()):\n# c, d = st.slider(\"Category \"+str(i),value = [int(df_cat_temp.query(\" category_nbr == {}\".format(int(i))).n_palamin.iloc[0]),int(df_cat_temp.query(\" category_nbr == {}\".format(int(i))).n_palamax.iloc[0])])\n# lst.append([i,c,d]) \n# st.session_state.df_cat = pd.DataFrame(lst, columns =['Category', 'n_pal_min', 'n_pal_max'])\n\n\n\n\n\n# # PAGES = {\n# # \"App1\": foo,\n# # \"Edit Constrains\": edit_constraints\n# # }\n\n# #st.sidebar.title('Navigation')\n\n# #selection = st.sidebar.radio(\"Go to\", list(PAGES.keys()))\n# # page = PAGES[selection]\n\n# # page()\n# if 'flag' not in st.session_state:\n# foo()\n# else:\n# edit_constraints()","repo_name":"inside-job/armorcode_ml","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35128233908","text":"#!/usr/bin/python3\nfrom ufal import morphodita\n\nif __name__ == \"__main__\":\n testfilecontent = open(\"../../corpus_data/ebooks_corpus_CZ/few_sentences.txt\").read()\n tf = \"../../contrib/preprocessing/cz_morphodita/models/czech-morfflex-pdt-160310.tagger\"\n tagger = morphodita.Tagger.load(tf)\n forms = morphodita.Forms()\n lemmas = morphodita.TaggedLemmas()\n tranges = morphodita.TokenRanges()\n tokenizer = tagger.newTokenizer()\n processed_chunk = []\n stopwords_file = \"../../contrib/preprocessing/cz_stopwords/czechST.txt\"\n stopwords = open(stopwords_file, encoding=\"utf-8\", mode='r').read().splitlines()\n tokenizer.setText(\"pravdě je velmi zvláštní důkaz zrzavých genocida nedotkla jaké teď plány vyptával ho stále týž civilista a mírně něho usmál jedna věc jistá panamerické nevrátím projekt lady diany naprosto nesmyslná záležitost a následek polovina až čtvrtiny světové energie spotřebovány k jedinému účelu k vybudování obřího tunelu sever jih umožnit přístup k bohatství skrytému pod ledem k d.ol ům naftovým zdrojům lesům nalezištím přírodních surovin takového šílenství nezúčastním výpadek energie způsobit polovina lidstva i třetiny vymře nemyslím si stavba neobejde přinejmenším však dojde k velkému zpoždění žádám o politický azyl tím mohu desítkám milionů lidí zachránit život si lady diana najde náhradu\")\n while tokenizer.nextSentence(forms, tranges):\n tagger.tag(forms, lemmas)\n for i in range(len(lemmas)):\n lemma = lemmas[i]\n lemmatized = tagger.getMorpho().rawLemma(lemma.lemma)\n if lemma.tag[0] != \"Z\" and forms[i] not in stopwords:\n processed_chunk.append(lemmatized)\n print(\" \".join(processed_chunk))\n","repo_name":"MFajcik/NLP-FIT","sub_path":"nlpfit/demos/morphodita_demo.py","file_name":"morphodita_demo.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"71843532500","text":"#!/usr/bin/env python3\nimport unittest\nfrom R_ev3dev import ev3_interpreter\n\n\nclass TestInfrared(unittest.TestCase):\n def test_get_value_with_channel(self):\n i = ev3_interpreter()\n i.evaluate_internal(\"infrared 1 on #1\")\n ir_value = i.evaluate(\"infrared 1 distance 2\")\n self.assertEqual(ir_value, 'value int 46')\n\n def test_get_value_without_channel(self):\n i = ev3_interpreter()\n i.evaluate_internal(\"infrared 1 on #1\")\n ir_value = i.evaluate(\"infrared 1 distance\")\n self.assertEqual(ir_value, 'value int 45')\n\n\nclass TestColor(unittest.TestCase):\n def test_get_value(self):\n i = ev3_interpreter()\n color = i.evaluate_internal(\"color 1 on #1\").value\n self.assertEqual(color.color, 0)\n color.color = 9\n color_value = i.evaluate(\"color 1 color\")\n self.assertEqual(color_value, 'value int 9')\n\n\nclass TestGyro(unittest.TestCase):\n def test_angle(self):\n i = ev3_interpreter()\n gyro = i.evaluate_internal(\"gyro 1 on #1\").value\n self.assertEqual(gyro.angle, 45)\n gyro.angle = 90\n value = i.evaluate(\"gyro 1 angle\")\n self.assertEqual(value, 'value int 90')\n\n\nclass TestTouch(unittest.TestCase):\n def test_is_pressed(self):\n i = ev3_interpreter()\n touch = i.evaluate_internal(\"touch 1 on #1\").value\n self.assertEqual(touch.is_pressed, True)\n touch.is_pressed = False\n value = i.evaluate(\"touch 1 is_pressed\")\n self.assertEqual(value, 'value boolean False')\n\n\nclass TestListSensors(unittest.TestCase):\n def test_list(self):\n i = ev3_interpreter()\n self.assertEqual(i.evaluate_internal('list_sensors').value, [\n {'address': 'in1', 'driver_name': 'lego-ev3-touch'},\n {'address': 'in2', 'driver_name': 'lego-ev3-ir'},\n {'address': 'in3', 'driver_name': 'lego-ev3-color'},\n ])\n","repo_name":"thomasvolk/R_ev3dev","sub_path":"tests/test_sensor.py","file_name":"test_sensor.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"11888056921","text":"\nfrom ..form.build.map import ObjectMapper, TypeMap\nfrom ..form.build.builders import GroupBuilder\n\n\ndef decode(val):\n if isinstance(val, bytes):\n return val.decode('UTF-8')\n else:\n return val\n\n\nclass ObjectMapperLegacy(ObjectMapper):\n\n @ObjectMapper.constructor_arg('source')\n def source_gettr(self, builder, manager):\n\n if 'source' in builder.attributes:\n return builder.attributes['source']\n else:\n return 'No known source'\n\n\nclass TypeMapLegacy(TypeMap):\n\n def get_builder_dt(self, builder):\n\n if builder.name == 'roi_ids':\n pass\n elif builder.name == 'root':\n return 'NWBFile'\n attrs = builder.attributes\n ndt = attrs.get('neurodata_type')\n if ndt == 'Module':\n return 'ProcessingModule'\n elif ndt == 'TimeSeries':\n ancestry = attrs['ancestry']\n if decode(ancestry[-1]) == 'TwoPhotonSeries' and decode(builder.name) == 'corrected':\n return 'ImageSeries'\n else:\n return decode(ancestry[-1])\n\n elif ndt == 'Interface':\n return builder.name\n if ndt == 'Epoch':\n return 'Epoch'\n else:\n parent_ndt = self.get_builder_dt(builder.parent)\n if parent_ndt == 'Epoch':\n return 'EpochTimeSeries'\n if parent_ndt == 'MotionCorrection':\n return 'CorrectedImageStack'\n if parent_ndt == 'ImagingPlane' and isinstance(builder, GroupBuilder):\n return 'OpticalChannel'\n if parent_ndt == 'ImageSegmentation':\n if builder.name in ('roi_ids', 'cell_specimen_ids'):\n return None\n else:\n return 'PlaneSegmentation'\n\n else:\n if parent_ndt == 'PlaneSegmentation':\n if builder.name in ('roi_list', 'imaging_plane_name'):\n return None\n else:\n return 'ROI'\n\n parent_names = {\n 'extracellular_ephys': 'ElectrodeGroup',\n 'intracellular_ephys': 'IntracellularElectrodeGroup',\n 'optophysiology': 'ImagingPlane',\n 'optogenetics': 'OptogeneticStimulusSite'\n }\n return decode(parent_names.get(builder.parent.name))\n return None\n\n def get_builder_ns(self, builder):\n return 'core'\n","repo_name":"q0j0p/pynwb","sub_path":"src/pynwb/legacy/map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"7709378644","text":"import time\nimport RPi.GPIO as GPIO\n\n# doesn't use pigpio library. Only steps, no pwm\n\nclass Motor2:\n CW = 1 # Clockwise Rotation\n CCW = 0 # Counterclockwise Rotation\n SPR = 200 # Steps per Revolution (360 / 1.8)\n\n RESOLUTION = {'Full': (0, 0, 0),\n 'Half': (1, 0, 0),\n '1/4': (0, 1, 0),\n '1/8': (1, 1, 0),\n '1/16': (0, 0, 1),\n '1/32': (1, 0, 1)}\n\n delays = {1: 0.01, 2: 0.005, 3: 0.002, 4:0.001, 5:0.0004}\n\n def __init__(self, DIR_PIN, STEP_PIN, rotation=CW, resolution=\"Full\", mode=(14,15,18)):\n\n self.DIR_PIN = DIR_PIN\n self.STEP_PIN = STEP_PIN\n self.resolution = resolution\n self.mode = mode\n self.gpio_setup()\n self.set_dir(rotation)\n\n\n def gpio_setup(self):\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.DIR_PIN, GPIO.OUT)\n GPIO.setup(self.STEP_PIN, GPIO.OUT)\n GPIO.setup(self.mode, GPIO.OUT)\n GPIO.output(self.mode, Motor2.RESOLUTION[self.resolution])\n\n\n def set_dir(self, rotation):\n GPIO.output(self.DIR_PIN, rotation)\n \n\n def go(self, steps=200, step_delay=.001, rev=None):\n if rev:\n steps = (int) (rev * Motor2.SPR)\n step_delay = max(step_delay, 0.0006) # tested minimum delay of 0.0006\n # step_delay = 0.0208/32\n\n for x in range(steps):\n GPIO.output(self.STEP_PIN, GPIO.HIGH)\n time.sleep(step_delay)\n GPIO.output(self.STEP_PIN, GPIO.LOW)\n time.sleep(step_delay)\n print(\"go\", x)\n\n\n def spin(self, rev, speed, is_cw=True):\n \"\"\" Simplified interface \"\"\"\n self.set_dir(Motor2.CW if is_cw else Motor2.CCW)\n if type(speed) is not int or speed not in (1,2,3,4,5):\n print(\"Your speed {} is invalid\".format(speed))\n speed = 2\n self.go(step_delay=Motor2.delays[speed], rev=rev)\n \n\n\n def exit(self):\n GPIO.cleanup()\n","repo_name":"williammlu/toasty_bot","sub_path":"run/motor2.py","file_name":"motor2.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"33464692186","text":"from time import sleep\nqtd_brancos = 0 #quantitativo += 1\nqtd_pardos = 0 #quantitativo += 1\nqtd_negros = 0 #quantitativo += 1\nhomens_maior = 0 #quantitativo += 1\nmulher_negra = 0 #quantitativo += 1\nfor p in range(1, 8):\n print(f'------{p}º Registro------')\n nome = str(input('NOME: ')).strip().upper()\n idade = int(input('IDADE: '))\n cor = str(input('COR: ')).strip().upper()\n sexo = str(input('SEXO[M/F]: ')).upper()[0]\n if cor in 'PARDO/PARDA':\n qtd_pardos += 1\n if cor in 'NEGRO/NEGRA':\n qtd_negros += 1\n if cor in 'BRANCO/BRANCA':\n qtd_brancos += 1\n if sexo in 'M' and idade > 21:\n homens_maior += 1\n if sexo in 'F' and cor in 'NEGRO/NEGRA' and idade < 40:\n mulher_negra += 1\n sleep(2)\nprint('ANLISANDO OS DADOS FORNECIDOS...')\nprint('AGUARDE...')\nsleep(3)\nprint('RESULTADO DA ANÁLISE:')\nprint(f'Número de pardos: {qtd_pardos}')\nprint(f'Número de negros: {qtd_negros}')\nprint(f'Número de brancos: {qtd_brancos}')\nprint(f'Homens com idade superior a 21 anos: {homens_maior}')\nprint(f'Mulheres negras com idade inferior a 40 anos: {mulher_negra}')\n\n","repo_name":"ClaudionorMeira/analisadores1","sub_path":"p6.py","file_name":"p6.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26337459255","text":"from datetime import datetime\nfrom typing import Optional, List\n\nfrom sqlalchemy import ForeignKey, UniqueConstraint\nfrom sqlalchemy.orm import mapped_column, relationship, Mapped\n\nfrom ...source.sql import DataSource\nfrom ...utils.sql import BLOB, JSON, UTCDateTime\n\n\n@DataSource.registry.mapped\nclass DownloadCache:\n __tablename__ = \"download_cache\"\n\n illust_id: Mapped[int] = mapped_column(primary_key=True)\n page: Mapped[int] = mapped_column(primary_key=True, default=0)\n content: Mapped[bytes] = mapped_column(BLOB)\n\n update_time: Mapped[datetime] = mapped_column(UTCDateTime, index=True)\n\n\n@DataSource.registry.mapped\nclass IllustDetailCache:\n __tablename__ = \"illust_detail_cache\"\n\n illust_id: Mapped[int] = mapped_column(primary_key=True)\n illust: Mapped[dict] = mapped_column(JSON)\n\n update_time: Mapped[datetime] = mapped_column(UTCDateTime, index=True)\n\n\n@DataSource.registry.mapped\nclass UserDetailCache:\n __tablename__ = \"user_detail_cache\"\n\n user_id: Mapped[int] = mapped_column(primary_key=True)\n user: Mapped[dict] = mapped_column(JSON)\n\n update_time: Mapped[datetime] = mapped_column(UTCDateTime, index=True)\n\n\n@DataSource.registry.mapped\nclass IllustSetCache:\n __tablename__ = \"illust_set_cache\"\n\n id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)\n cache_type: Mapped[str]\n key: Mapped[dict] = mapped_column(JSON)\n\n update_time: Mapped[datetime] = mapped_column(UTCDateTime, index=True)\n pages: Mapped[Optional[int]]\n next_qs: Mapped[Optional[dict]] = mapped_column(JSON)\n\n illust_id: Mapped[List[\"IllustSetCacheIllust\"]] = relationship(foreign_keys=\"IllustSetCacheIllust.cache_id\",\n cascade=\"save-update, delete\",\n passive_deletes=True)\n\n size: Mapped[int] = mapped_column(default=0)\n\n __table_args__ = (\n UniqueConstraint('cache_type', 'key'),\n )\n\n\n@DataSource.registry.mapped\nclass IllustSetCacheIllust:\n __tablename__ = \"illust_set_cache_illust\"\n\n cache_id: Mapped[int] = mapped_column(ForeignKey(\"illust_set_cache.id\", ondelete=\"cascade\"), primary_key=True)\n illust_id: Mapped[int] = mapped_column(primary_key=True)\n rank: Mapped[int] = mapped_column(default=0)\n\n\n@DataSource.registry.mapped\nclass UserSetCache:\n __tablename__ = \"user_set_cache\"\n\n id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)\n cache_type: Mapped[str]\n key: Mapped[dict] = mapped_column(JSON)\n\n update_time: Mapped[datetime] = mapped_column(UTCDateTime, index=True)\n pages: Mapped[Optional[int]]\n next_qs: Mapped[Optional[dict]] = mapped_column(JSON)\n\n illust_id: Mapped[List[\"UserSetCacheUser\"]] = relationship(foreign_keys=\"UserSetCacheUser.cache_id\",\n cascade=\"save-update, delete\",\n passive_deletes=True)\n\n __table_args__ = (\n UniqueConstraint('cache_type', 'key'),\n )\n\n\n@DataSource.registry.mapped\nclass UserSetCacheUser:\n __tablename__ = \"user_set_cache_user\"\n\n cache_id: Mapped[int] = mapped_column(ForeignKey(\"user_set_cache.id\", ondelete=\"cascade\"), primary_key=True)\n user_id: Mapped[int] = mapped_column(primary_key=True)\n","repo_name":"bot-ssttkkl/nonebot-plugin-pixivbot","sub_path":"src/nonebot_plugin_pixivbot/data/pixiv_repo/local_repo/sql_models.py","file_name":"sql_models.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"12"} +{"seq_id":"26852088536","text":"# Задача 2. Задайте натуральное число N. Напишите программу,\n# которая составит список простых множителей числа N.\n\nn = int(input('Введите число N: '))\nm = n # сохраняем для вывода\nlst = [] # создаем пустой список для множителей\nmultip = 2 # множитель для деления\nwhile n > 1:\n if n % multip == 0: # пока делится без остатка на множитель..\n lst.append(multip) # .. добавляем множитель в список\n n = int(n / multip) # делим на множитель\n else:\n multip += 1 # если не делится на множитель, то увеличиваем множитель на 1\nprint(f'{m} = ', end='')\nprint(*lst, sep='*')","repo_name":"aleks231979/python-seminar4","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29656663795","text":"# scraping for jos on one page posted few days ago\r\n# Adding some more functionality in scraping_3.py\r\n\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nprint('Put some unfamiliar skills')\r\nUnfamiliar_skill = input('>')\r\nprint(f'filtering out {Unfamiliar_skill}') \r\n\r\nurl = '''https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='''\r\nhtml_text = requests.get(url).text\r\n#print(html_text)\r\nsoup = BeautifulSoup(html_text,'lxml')\r\njobs = soup.find_all('li', class_ = 'clearfix job-bx wht-shd-bx')\r\n\r\n# looking for each job in jobs\r\nno_of_jobs = 0\r\nfor job in jobs:\r\n posted_on = job.find('span', class_ = 'sim-posted').text\r\n \r\n if 'few' in posted_on:\r\n no_of_jobs += 1\r\n company_name = job.find('h3', class_ = 'joblist-comp-name').text.rstrip().lstrip()\r\n print(company_name)\r\n key_skills = [i for i in job.find('span', class_ = 'srp-skills').text.split() if len(i)>1]\r\n more_info = job.header.h2.a['href']\r\n \r\n if Unfamiliar_skill not in key_skills:\r\n \r\n print(f'For More info check --> {more_info}')\r\n print(f'''Company Name: {company_name}, \\nRequired_skills: {key_skills}''')\r\n print('*' * 20)\r\n \r\n#print(no_of_jobs)\r\n","repo_name":"chiragpalan/WebScraping","sub_path":"scraping_4.py","file_name":"scraping_4.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1929652801","text":"#!/usr/bin/env python\r\n\r\n__author__ = \"Andrea Fioraldi\"\r\n__copyright__ = \"Copyright 2017, Andrea Fioraldi\"\r\n__license__ = \"BSD 2-Clause\"\r\n__email__ = \"andreafioraldi@gmail.com\"\r\n\r\nfrom setuptools import setup\r\n\r\nVER = \"1.0.14\"\r\n\r\nsetup(\r\n name='angrgdb',\r\n version=VER,\r\n license=__license__,\r\n description='Use angr inside GDB. Create an angr state from the current debugger state. ',\r\n author=__author__,\r\n author_email=__email__,\r\n url='https://github.com/andreafioraldi/angrgdb',\r\n download_url = 'https://github.com/andreafioraldi/angrgdb/archive/' + VER + '.tar.gz',\r\n package_dir={'angrgdb': 'angrgdb'},\r\n packages=['angrgdb'],\r\n install_requires=['angrdbg'],\r\n)\r\n","repo_name":"andreafioraldi/angrgdb","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"12"} +{"seq_id":"14542879879","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # 单调栈\n def nextLargerNodes(self, head: ListNode) -> List[int]:\n l_head = head\n n = 0 # 长度\n # 倒序链表\n prev = None\n while l_head:\n next = l_head.next\n l_head.next = prev\n prev = l_head\n l_head = next\n n += 1\n \n ans= [0] * n\n stack = []\n \n # cur 记录当前 位于原始链表的索引\n cur = n\n while prev: # 倒序遍历链表\n cur -= 1\n while stack and stack[-1] <= prev.val:\n stack.pop()\n if stack:\n ans[cur] = stack[-1]\n stack.append(prev.val)\n\n prev = prev.next # 链表下一个节点\n \n return ans\n ","repo_name":"qianOU/leetcode","sub_path":"1019. 链表中的下一个更大节点.py","file_name":"1019. 链表中的下一个更大节点.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"28527014433","text":"from flask import Blueprint, render_template\n\n__all__ = [\"ui_blueprint\"]\n\nui_blueprint = Blueprint(\n \"ui\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"static\",\n static_url_path=\"\"\n)\n\n\n@ui_blueprint.route(\"/\")\ndef home_page():\n return render_template(\"index.html\")\n","repo_name":"alexmikoto/itoko","sub_path":"src/itoko/ui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38361842737","text":"# Import libraries:\nimport pandas as pd\nimport pandas_gbq\nimport os \n\ndef BigQuery_Query(\n keys = None,\n dataset_name = None,\n table_name = None,\n project_id = None\n ):\n\n \"\"\"\n Query an entire table for a given dataset. You have to pass the\n path to the json file with the service account keys for your GCP\n project to authenticate. Otherwise, it will ask you to authenticate\n your account using end user auth and you will be required to specify\n a project id\\n.\n\n Params:\n\n keys : None or str. Path to the json file for your GCP service account.\n If None, it will try to infer the credentials from the env variables.\n\n dataset_name : str. Name of the BigQuery dataset. \n\n table_name : str. Name of the BigQuery table. \n\n project_id (optional) : str. Name of the project id for the GCP project.\n\n \"\"\"\n\n if keys:\n\n # Set GCP keys to env variable:\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=keys\n\n # Write a SQL query:\n SQL = \"\"\"\n SELECT * \n FROM `{}`\n \"\"\".format(dataset_name + '.' + table_name)\n\n # Execute query and save results in a pandas df:\n df = pandas_gbq.read_gbq(query=SQL)\n\n else:\n\n # Write a SQL query:\n SQL = \"\"\"\n SELECT * \n FROM `{}`\n \"\"\".format(dataset_name + '.' + table_name)\n\n # Execute query and save results in a pandas df:\n df = pandas_gbq.read_gbq(query=SQL, project_id=project_id)\n\n print(f'Imported {table_name}, Table Dimensions: {df.shape}')\n return df\n","repo_name":"Shogun-187/Data-Engineering","sub_path":"GCP_Scripts/BigQuery_Query.py","file_name":"BigQuery_Query.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"19016825922","text":"from app import db\nfrom ..models import Item, ItemWareHouseAssociation, WareHouse\nfrom ..core import Service\n\n\nclass ItemService(Service):\n \"\"\"Service class for handling items.\"\"\"\n\n __model__ = Item\n\n def get_by_id(self, id):\n return Item.query.filter_by(item_id=id).first()\n\n def delete(self, id):\n \"\"\"Delete an item\"\"\"\n item = Item.query.filter_by(item_id=id).first()\n for assoc in item.warehouses.all():\n db.session.delete(assoc)\n db.session.delete(item)\n db.session.commit()\n\n def add_warehouse(self, item, item_id, warehouse_id, count_data):\n \"\"\"Add item to an warehouse\"\"\"\n assoc = ItemWareHouseAssociation.query.filter_by(\n item_id=item_id, warehouse_id=warehouse_id\n ).first()\n if assoc:\n raise Exception(\"This item and warehouse combination already exists.\")\n wh = WareHouse.query.filter_by(id=warehouse_id).first()\n i_wh_assc = ItemWareHouseAssociation(\n item=item, warehouse=wh, quantity=count_data\n )\n db.session.add(i_wh_assc)\n db.session.commit()\n\n def view_warehouses_for_item(self, item_id):\n \"\"\"View warehouses where an item is present.\"\"\"\n item = self.get_by_id(item_id)\n warehouse_list = []\n count_list = []\n warehouses = item.warehouses.all()\n for wh in warehouses:\n warehouse_list.append(wh.warehouse)\n count_list.append(wh.quantity)\n return item, zip(warehouse_list, count_list)\n","repo_name":"abhinavbh08/Shopify_Intern_Task","sub_path":"app/items/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"43867495239","text":"def list_manipulation(lst, command, location, value=None):\n \"\"\"Mutate lst to add/remove from beginning or end.\n\n - lst: list of values\n - command: command, either \"remove\" or \"add\"\n - location: location to remove/add, either \"beginning\" or \"end\"\n - value: when adding, value to add\n\n remove: remove item at beginning or end, and return item removed\n\n >>> lst = [1, 2, 3]\n\n >>> list_manipulation(lst, 'remove', 'end')\n 3\n\n >>> list_manipulation(lst, 'remove', 'beginning')\n 1\n\n >>> lst\n [2]\n\n add: add item at beginning/end, and return list\n\n >>> lst = [1, 2, 3]\n\n >>> list_manipulation(lst, 'add', 'beginning', 20)\n [20, 1, 2, 3]\n\n >>> list_manipulation(lst, 'add', 'end', 30)\n [20, 1, 2, 3, 30]\n\n >>> lst\n [20, 1, 2, 3, 30]\n\n Invalid commands or locations should return None:\n\n >>> list_manipulation(lst, 'foo', 'end') is None\n True\n\n >>> list_manipulation(lst, 'add', 'dunno') is None\n True\n \"\"\"\n \"\"\" \n if command not in [add, remove] or location not in [beginning, end] return none\n\n If command = remove\n - find out if location is beginning or end\n - if beginning return lst.pop(0). \n - if end return lst.pop\n if command = add\n - if end lst.append(value) return lst\n - if beginning lst.insert(0, value) return lst\n \"\"\"\n #global constants so should move to top\n COMMANDS = [\"add\", \"remove\"]\n LOCATIONS = [\"beginning\", \"end\"]\n\n def command_add(): # move the function definitions outside of the function\n if location == \"end\":\n lst.append(value)\n else:\n lst.insert(0, value)\n return lst\n\n\n def command_remove():\n if location == \"end\":\n return lst.pop()\n else:\n return lst.pop(0)\n\n if not (command in COMMANDS) or not (location in LOCATIONS):\n return None\n \n if command == \"add\":\n return command_add()\n else:\n return command_remove()","repo_name":"samau3/python-ds-problems","sub_path":"08_list_manipulation.py","file_name":"08_list_manipulation.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29610828798","text":"# Positive number or Negative number \nnum = int(input(\"Enter a number to check positive or Negative:- \"))\nif num < 0:\n print(\"It is a Negative number\")\nelif num == 0:\n print(\"0 is a positive number\")\nelse:\n print(f'{num} number is positive number')\n \n \n \n \n# positive or negative number using functions\n\ndef positive_negative(num):\n if num < 0:\n print(f'{num} is negative number')\n else:\n print(f'{num} is positive number')\n \n \nnum = int(input('enter a number:- '))\npositive_negative(num)\n","repo_name":"guna7222/Learning-Python-","sub_path":"Interview_Programs/positive_number_or_negative_number.py","file_name":"positive_number_or_negative_number.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"36200314754","text":"'''\nfunction_tools.py\nCreated on Feb 2, 2016\n\n@author: aldeen berluti\nRandom various functions that are going to be used in the project and have no reason to appear in other files\n'''\nimport os\nimport sys\n\ndef get_api_key ():\n \"\"\" Get the API key from a file that has to be at the root of the Django project\"\"\"\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')\n \ndef get_exception_info (e):\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n return (e, exc_type, fname, exc_tb.tb_lineno)\n\n","repo_name":"aldeen/Flynmeet","sub_path":"search_controller/function_tools.py","file_name":"function_tools.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"20404508234","text":"from tkinter import Tk, Frame, Canvas, Scrollbar, HORIZONTAL, VERTICAL, BOTH, X, Y, BOTTOM, RIGHT, LEFT, S, N, W, E\nfrom numpy import arange, sin\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\nclass Test(Tk):\n def __init__(self):\n Tk.__init__(self, None)\n self.frame=Frame(None)\n self.frame.columnconfigure(0,weight=1)\n self.frame.rowconfigure(0,weight=1)\n\n self.frame.grid(row=0,column=0, sticky=W+E+N+S)\n\n fig = Figure()\n\n xval = arange(500)/10.\n yval = sin(xval)\n\n ax1 = fig.add_subplot(111)\n ax1.plot(xval, yval)\n\n self.hbar=Scrollbar(self.frame,orient=HORIZONTAL)\n self.vbar=Scrollbar(self.frame,orient=VERTICAL)\n\n self.canvas=FigureCanvasTkAgg(fig, master=self.frame)\n self.canvas.get_tk_widget().config(bg='#FFFFFF',scrollregion=(0,0,500,500))\n self.canvas.get_tk_widget().config(width=800,height=500)\n self.canvas.get_tk_widget().config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.get_tk_widget().grid(row=0, column=0, sticky=W+E+N+S)\n\n self.hbar.grid(row=1, column=0, sticky=W+E)\n self.hbar.config(command=self.canvas.get_tk_widget().xview)\n self.vbar.grid(row=0, column=1, sticky=N+S)\n self.vbar.config(command=self.canvas.get_tk_widget().yview)\n\n self.frame.config(width=100, height=100) # this has no effect\n\nif __name__ == '__main__':\n\n app = Test()\n app.mainloop()\n","repo_name":"QuinnZhao/study","sub_path":"projects/tkinter_demo/demo5.py","file_name":"demo5.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"23966027802","text":"import os\n\nimport pkg_resources\n\nOUTPUT = \"output\"\nHASH_PREFIX = \"a\"\nCONFIG_PARAM_NAME = \"/servicecatalog-factory/config\"\n\nPUBLISHED_VERSION = pkg_resources.require(\"aws-service-catalog-factory\")[0].version\nVERSION = os.getenv(\"SCF_VERSION_OVERRIDE\", PUBLISHED_VERSION)\n\nBOOTSTRAP_STACK_NAME = \"servicecatalog-factory\"\nBOOTSTRAP_TEMPLATES_STACK_NAME = \"servicecatalog-factory-templates\"\nSERVICE_CATALOG_FACTORY_REPO_NAME = \"ServiceCatalogFactory\"\nNON_RECOVERABLE_STATES = [\n \"ROLLBACK_COMPLETE\",\n \"CREATE_IN_PROGRESS\",\n \"ROLLBACK_IN_PROGRESS\",\n \"DELETE_IN_PROGRESS\",\n \"UPDATE_IN_PROGRESS\",\n \"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS\",\n \"UPDATE_ROLLBACK_IN_PROGRESS\",\n \"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS\",\n \"REVIEW_IN_PROGRESS\",\n]\nPRODUCT = \"product.j2\"\nPRODUCT_TERRAFORM = \"product-terraform.j2\"\nTERRAFORM_TEMPLATE = \"terraform.template.yaml.j2\"\nHOME_REGION = os.environ.get(\n \"AWS_REGION\", os.environ.get(\"AWS_DEFAULT_REGION\", \"eu-west-1\")\n)\n\nRESULTS_DIRECTORY = \"results\"\n\nPIPELINE_MODE_COMBINED = \"combined\"\nPIPELINE_MODE_SPILT = \"split\"\nPIPELINE_MODE_DEFAULT = PIPELINE_MODE_SPILT\n\nPROVISIONERS_CLOUDFORMATION = \"CloudFormation\"\nPROVISIONERS_DEFAULT = PROVISIONERS_CLOUDFORMATION\n\nTEMPLATE_FORMATS_YAML = \"yaml\"\nTEMPLATE_FORMATS_DEFAULT = TEMPLATE_FORMATS_YAML\n\nSTATUS_ACTIVE = \"active\"\nSTATUS_TERMINATED = \"terminated\"\nSTATUS_DEFAULT = STATUS_ACTIVE\n\nPACKAGE_BUILD_SPEC_DEFAULT = \"\"\"\n version: 0.2\n phases:\n install:\n runtime-versions:\n python: 3.7\n build:\n commands:\n - cd $SOURCE_PATH\n {% for region in ALL_REGIONS %}\n - aws cloudformation package --region {{ region }} --template $(pwd)/product.template.yaml --s3-bucket sc-factory-artifacts-${ACCOUNT_ID}-{{ region }} --s3-prefix ${STACK_NAME} --output-template-file product.template-{{ region }}.yaml\n {% endfor %}\n artifacts:\n files:\n - '*'\n - '**/*'\n\"\"\"\n\nENVIRONMENT_COMPUTE_TYPE_DEFAULT = \"BUILD_GENERAL1_SMALL\"\nBUILD_STAGE_TIMEOUT_IN_MINUTES_DEFAULT = 60\nENVIRONMENT_TYPE_DEFAULT = \"LINUX_CONTAINER\"\n\nBUILDSPEC_RUNTIME_VERSIONS_NODEJS_DEFAULT = \"18\"\n\nDEFAULT_PARTITION = \"aws\"\nPARTITION = os.getenv(\"PARTITION\", DEFAULT_PARTITION)\n\nCODEPIPELINE_SUPPORTED_REGIONS = [\n \"us-east-2\",\n \"us-east-1\",\n \"us-west-1\",\n \"us-west-2\",\n \"ap-east-1\",\n \"ap-south-1\",\n \"ap-northeast-2\",\n \"ap-southeast-1\",\n \"ap-southeast-2\",\n \"ap-northeast-1\",\n \"ca-central-1\",\n \"eu-central-1\",\n \"eu-west-1\",\n \"eu-west-2\",\n \"eu-south-1\",\n \"eu-west-3\",\n \"eu-north-1\",\n \"sa-east-1\",\n \"us-gov-west-1\",\n]\n\nSTATIC_HTML_PAGE = \"static-html-page.html\"\n\nGENERIC_BUILD_PROJECT_PRIVILEGED_MODE_DEFAULT = False\n\nINITIALISER_STACK_NAME_SSM_PARAMETER = \"service-catalog-factory-initialiser-stack-name\"\n\n\nCONFIG_SHOULD_PIPELINES_INHERIT_TAGS = \"should_pipelines_inherit_tags\"\n\n\nSERVICE_CATALOG_FACTORY_PIPELINES = \"/servicecatalog-factory/pipelines\"\n\nBOOTSTRAP_TYPE_SECONDARY = \"SECONDARY\"\nBOOTSTRAP_TYPE_PRIMARY = \"PRIMARY\"\n\nBOOTSTRAP_SECONDARY_TEMPLATE_NAME = \"servicecatalog-factory-secondary\"\n\n\nFACTORY_LOGGER_NAME = \"factory-logger\"\nFACTORY_SCHEDULER_LOGGER_NAME = \"factory-logger-scheduler\"\n\nAWS_URL_SUFFIX_DEFAULT = \"amazonaws.com\"\n\nCODE_BUILD_PROJECT_ENVIRONMENT_IMAGE_SSM_PARAMETER_NAME = (\n \"code_build_project_environment_image\"\n)\nCODE_BUILD_PROJECT_ENVIRONMENT_IMAGE_DEFAULT_VALUE = \"aws/codebuild/standard:5.0\"\n\nCODE_BUILD_PROJECT_ENVIRONMENT_IMAGE_CDK_TEMPLATE_DEFAULT_VALUE = \"aws/codebuild/amazonlinux2-x86_64-standard:5.0\"\n\n","repo_name":"awslabs/aws-service-catalog-factory","sub_path":"servicecatalog_factory/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":3577,"program_lang":"python","lang":"en","doc_type":"code","stars":135,"dataset":"github-code","pt":"12"} +{"seq_id":"20787397457","text":"from functools import wraps\nfrom logging import Logger\nfrom typing import Callable, Union, Optional, Sequence, Pattern, List\n\nfrom slack_bolt.context.context import BoltContext\nfrom slack_bolt.error import BoltError\nfrom slack_bolt.listener import Listener, CustomListener\nfrom slack_bolt.listener_matcher import ListenerMatcher, CustomListenerMatcher\nfrom slack_bolt.listener_matcher.builtins import (\n workflow_step_edit,\n workflow_step_save,\n workflow_step_execute,\n)\nfrom slack_bolt.middleware import CustomMiddleware, Middleware\nfrom slack_bolt.response import BoltResponse\nfrom slack_bolt.workflows.step.internals import _is_used_without_argument\nfrom slack_bolt.workflows.step.utilities.complete import Complete\nfrom slack_bolt.workflows.step.utilities.configure import Configure\nfrom slack_bolt.workflows.step.utilities.fail import Fail\nfrom slack_bolt.workflows.step.utilities.update import Update\nfrom slack_sdk.web import WebClient\n\n\nclass WorkflowStepBuilder:\n \"\"\"Steps from Apps\n Refer to https://api.slack.com/workflows/steps for details.\n \"\"\"\n\n callback_id: Union[str, Pattern]\n _base_logger: Optional[Logger]\n _edit: Optional[Listener]\n _save: Optional[Listener]\n _execute: Optional[Listener]\n\n def __init__(\n self,\n callback_id: Union[str, Pattern],\n app_name: Optional[str] = None,\n base_logger: Optional[Logger] = None,\n ):\n \"\"\"This builder is supposed to be used as decorator.\n\n my_step = WorkflowStep.builder(\"my_step\")\n @my_step.edit\n def edit_my_step(ack, configure):\n pass\n @my_step.save\n def save_my_step(ack, step, update):\n pass\n @my_step.execute\n def execute_my_step(step, complete, fail):\n pass\n app.step(my_step)\n\n For further information about WorkflowStep specific function arguments\n such as `configure`, `update`, `complete`, and `fail`,\n refer to `slack_bolt.workflows.step.utilities` API documents.\n\n Args:\n callback_id: The callback_id for the workflow\n app_name: The application name mainly for logging\n base_logger: The base logger\n \"\"\"\n self.callback_id = callback_id\n self.app_name = app_name or __name__\n self._base_logger = base_logger\n self._edit = None\n self._save = None\n self._execute = None\n\n def edit(\n self,\n *args,\n matchers: Optional[Union[Callable[..., bool], ListenerMatcher]] = None,\n middleware: Optional[Union[Callable, Middleware]] = None,\n lazy: Optional[List[Callable[..., None]]] = None,\n ):\n \"\"\"Registers a new edit listener with details.\n You can use this method as decorator as well.\n\n @my_step.edit\n def edit_my_step(ack, configure):\n pass\n\n It's also possible to add additional listener matchers and/or middleware\n\n @my_step.edit(matchers=[is_valid], middleware=[update_context])\n def edit_my_step(ack, configure):\n pass\n\n For further information about WorkflowStep specific function arguments\n such as `configure`, `update`, `complete`, and `fail`,\n refer to `slack_bolt.workflows.step.utilities` API documents.\n\n Args:\n *args: This method can behave as either decorator or a method\n matchers: Listener matchers\n middleware: Listener middleware\n lazy: Lazy listeners\n \"\"\"\n\n if _is_used_without_argument(args):\n func = args[0]\n self._edit = self._to_listener(\"edit\", func, matchers, middleware)\n return func\n\n def _inner(func):\n functions = [func] + (lazy if lazy is not None else [])\n self._edit = self._to_listener(\"edit\", functions, matchers, middleware)\n\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return _wrapper\n\n return _inner\n\n def save(\n self,\n *args,\n matchers: Optional[Union[Callable[..., bool], ListenerMatcher]] = None,\n middleware: Optional[Union[Callable, Middleware]] = None,\n lazy: Optional[List[Callable[..., None]]] = None,\n ):\n \"\"\"Registers a new save listener with details.\n You can use this method as decorator as well.\n\n @my_step.save\n def save_my_step(ack, step, update):\n pass\n\n It's also possible to add additional listener matchers and/or middleware\n\n @my_step.save(matchers=[is_valid], middleware=[update_context])\n def save_my_step(ack, step, update):\n pass\n\n For further information about WorkflowStep specific function arguments\n such as `configure`, `update`, `complete`, and `fail`,\n refer to `slack_bolt.workflows.step.utilities` API documents.\n\n Args:\n *args: This method can behave as either decorator or a method\n matchers: Listener matchers\n middleware: Listener middleware\n lazy: Lazy listeners\n \"\"\"\n if _is_used_without_argument(args):\n func = args[0]\n self._save = self._to_listener(\"save\", func, matchers, middleware)\n return func\n\n def _inner(func):\n functions = [func] + (lazy if lazy is not None else [])\n self._save = self._to_listener(\"save\", functions, matchers, middleware)\n\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return _wrapper\n\n return _inner\n\n def execute(\n self,\n *args,\n matchers: Optional[Union[Callable[..., bool], ListenerMatcher]] = None,\n middleware: Optional[Union[Callable, Middleware]] = None,\n lazy: Optional[List[Callable[..., None]]] = None,\n ):\n \"\"\"Registers a new execute listener with details.\n You can use this method as decorator as well.\n\n @my_step.execute\n def execute_my_step(step, complete, fail):\n pass\n\n It's also possible to add additional listener matchers and/or middleware\n\n @my_step.save(matchers=[is_valid], middleware=[update_context])\n def execute_my_step(step, complete, fail):\n pass\n\n For further information about WorkflowStep specific function arguments\n such as `configure`, `update`, `complete`, and `fail`,\n refer to `slack_bolt.workflows.step.utilities` API documents.\n\n Args:\n *args: This method can behave as either decorator or a method\n matchers: Listener matchers\n middleware: Listener middleware\n lazy: Lazy listeners\n \"\"\"\n if _is_used_without_argument(args):\n func = args[0]\n self._execute = self._to_listener(\"execute\", func, matchers, middleware)\n return func\n\n def _inner(func):\n functions = [func] + (lazy if lazy is not None else [])\n self._execute = self._to_listener(\"execute\", functions, matchers, middleware)\n\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return _wrapper\n\n return _inner\n\n def build(self, base_logger: Optional[Logger] = None) -> \"WorkflowStep\":\n \"\"\"Constructs a WorkflowStep object. This method may raise an exception\n if the builder doesn't have enough configurations to build the object.\n\n Returns:\n WorkflowStep object\n \"\"\"\n if self._edit is None:\n raise BoltError(\"edit listener is not registered\")\n if self._save is None:\n raise BoltError(\"save listener is not registered\")\n if self._execute is None:\n raise BoltError(\"execute listener is not registered\")\n\n return WorkflowStep(\n callback_id=self.callback_id,\n edit=self._edit,\n save=self._save,\n execute=self._execute,\n app_name=self.app_name,\n base_logger=base_logger,\n )\n\n # ---------------------------------------\n\n def _to_listener(\n self,\n name: str,\n listener_or_functions: Union[Listener, Callable, List[Callable]],\n matchers: Optional[Union[Callable[..., bool], ListenerMatcher]] = None,\n middleware: Optional[Union[Callable, Middleware]] = None,\n ) -> Listener:\n return WorkflowStep.build_listener(\n callback_id=self.callback_id,\n app_name=self.app_name,\n listener_or_functions=listener_or_functions,\n name=name,\n matchers=self.to_listener_matchers(self.app_name, matchers, self._base_logger),\n middleware=self.to_listener_middleware(self.app_name, middleware, self._base_logger),\n base_logger=self._base_logger,\n )\n\n @staticmethod\n def to_listener_matchers(\n app_name: str,\n matchers: Optional[List[Union[Callable[..., bool], ListenerMatcher]]],\n base_logger: Optional[Logger] = None,\n ) -> List[ListenerMatcher]:\n _matchers = []\n if matchers is not None:\n for m in matchers:\n if isinstance(m, ListenerMatcher):\n _matchers.append(m)\n elif isinstance(m, Callable):\n _matchers.append(\n CustomListenerMatcher(\n app_name=app_name,\n func=m,\n base_logger=base_logger,\n )\n )\n else:\n raise ValueError(f\"Invalid matcher: {type(m)}\")\n return _matchers # type: ignore\n\n @staticmethod\n def to_listener_middleware(\n app_name: str,\n middleware: Optional[List[Union[Callable, Middleware]]],\n base_logger: Optional[Logger] = None,\n ) -> List[Middleware]:\n _middleware = []\n if middleware is not None:\n for m in middleware:\n if isinstance(m, Middleware):\n _middleware.append(m)\n elif isinstance(m, Callable):\n _middleware.append(\n CustomMiddleware(\n app_name=app_name,\n func=m,\n base_logger=base_logger,\n )\n )\n else:\n raise ValueError(f\"Invalid middleware: {type(m)}\")\n return _middleware # type: ignore\n\n\nclass WorkflowStep:\n callback_id: Union[str, Pattern]\n \"\"\"The Callback ID of the workflow step\"\"\"\n edit: Listener\n \"\"\"`edit` listener, which displays a modal in Workflow Builder\"\"\"\n save: Listener\n \"\"\"`save` listener, which accepts workflow creator's data submission in Workflow Builder\"\"\"\n execute: Listener\n \"\"\"`execute` listener, which processes workflow step execution\"\"\"\n\n def __init__(\n self,\n *,\n callback_id: Union[str, Pattern],\n edit: Union[Callable[..., Optional[BoltResponse]], Listener, Sequence[Callable]],\n save: Union[Callable[..., Optional[BoltResponse]], Listener, Sequence[Callable]],\n execute: Union[Callable[..., Optional[BoltResponse]], Listener, Sequence[Callable]],\n app_name: Optional[str] = None,\n base_logger: Optional[Logger] = None,\n ):\n \"\"\"\n Args:\n callback_id: The callback_id for this workflow step\n edit: Either a single function or a list of functions for opening a modal in the builder UI\n When it's a list, the first one is responsible for ack() while the rest are lazy listeners.\n save: Either a single function or a list of functions for handling modal interactions in the builder UI\n When it's a list, the first one is responsible for ack() while the rest are lazy listeners.\n execute: Either a single function or a list of functions for handling workflow step executions\n When it's a list, the first one is responsible for ack() while the rest are lazy listeners.\n app_name: The app name that can be mainly used for logging\n base_logger: The logger instance that can be used as a template when creating this step's logger\n \"\"\"\n self.callback_id = callback_id\n app_name = app_name or __name__\n self.edit = self.build_listener(\n callback_id=callback_id,\n app_name=app_name,\n listener_or_functions=edit,\n name=\"edit\",\n base_logger=base_logger,\n )\n self.save = self.build_listener(\n callback_id=callback_id,\n app_name=app_name,\n listener_or_functions=save,\n name=\"save\",\n base_logger=base_logger,\n )\n self.execute = self.build_listener(\n callback_id=callback_id,\n app_name=app_name,\n listener_or_functions=execute,\n name=\"execute\",\n base_logger=base_logger,\n )\n\n @classmethod\n def builder(cls, callback_id: Union[str, Pattern], base_logger: Optional[Logger] = None) -> WorkflowStepBuilder:\n return WorkflowStepBuilder(\n callback_id,\n base_logger=base_logger,\n )\n\n @classmethod\n def build_listener(\n cls,\n callback_id: Union[str, Pattern],\n app_name: str,\n listener_or_functions: Union[Listener, Callable, List[Callable]],\n name: str,\n matchers: Optional[List[ListenerMatcher]] = None,\n middleware: Optional[List[Middleware]] = None,\n base_logger: Optional[Logger] = None,\n ) -> Listener:\n if listener_or_functions is None:\n raise BoltError(f\"{name} listener is required (callback_id: {callback_id})\")\n\n if isinstance(listener_or_functions, Callable):\n listener_or_functions = [listener_or_functions]\n\n if isinstance(listener_or_functions, Listener):\n return listener_or_functions\n elif isinstance(listener_or_functions, list):\n matchers = matchers if matchers else []\n matchers.insert(\n 0,\n cls._build_primary_matcher(\n name,\n callback_id,\n base_logger=base_logger,\n ),\n )\n middleware = middleware if middleware else []\n middleware.insert(\n 0,\n cls._build_single_middleware(\n name,\n callback_id,\n base_logger=base_logger,\n ),\n )\n functions = listener_or_functions\n ack_function = functions.pop(0)\n return CustomListener(\n app_name=app_name,\n matchers=matchers,\n middleware=middleware,\n ack_function=ack_function,\n lazy_functions=functions,\n auto_acknowledgement=name == \"execute\",\n base_logger=base_logger,\n )\n else:\n raise BoltError(f\"Invalid {name} listener: {type(listener_or_functions)} detected (callback_id: {callback_id})\")\n\n @classmethod\n def _build_primary_matcher(\n cls,\n name: str,\n callback_id: Union[str, Pattern],\n base_logger: Optional[Logger] = None,\n ) -> ListenerMatcher:\n if name == \"edit\":\n return workflow_step_edit(callback_id, base_logger=base_logger)\n elif name == \"save\":\n return workflow_step_save(callback_id, base_logger=base_logger)\n elif name == \"execute\":\n return workflow_step_execute(callback_id, base_logger=base_logger)\n else:\n raise ValueError(f\"Invalid name {name}\")\n\n @classmethod\n def _build_single_middleware(\n cls,\n name: str,\n callback_id: Union[str, Pattern],\n base_logger: Optional[Logger] = None,\n ) -> Middleware:\n if name == \"edit\":\n return _build_edit_listener_middleware(callback_id, base_logger=base_logger)\n elif name == \"save\":\n return _build_save_listener_middleware(base_logger=base_logger)\n elif name == \"execute\":\n return _build_execute_listener_middleware(base_logger=base_logger)\n else:\n raise ValueError(f\"Invalid name {name}\")\n\n\n#######################\n# Edit\n#######################\n\n\ndef _build_edit_listener_middleware(callback_id: str, base_logger: Optional[Logger] = None) -> Middleware:\n def edit_listener_middleware(\n context: BoltContext,\n client: WebClient,\n body: dict,\n next: Callable[[], BoltResponse],\n ):\n context[\"configure\"] = Configure(\n callback_id=callback_id,\n client=client,\n body=body,\n )\n return next()\n\n return CustomMiddleware(\n app_name=__name__,\n func=edit_listener_middleware,\n base_logger=base_logger,\n )\n\n\n#######################\n# Save\n#######################\n\n\ndef _build_save_listener_middleware(base_logger: Optional[Logger] = None) -> Middleware:\n def save_listener_middleware(\n context: BoltContext,\n client: WebClient,\n body: dict,\n next: Callable[[], BoltResponse],\n ):\n context[\"update\"] = Update(\n client=client,\n body=body,\n )\n return next()\n\n return CustomMiddleware(\n app_name=__name__,\n func=save_listener_middleware,\n base_logger=base_logger,\n )\n\n\n#######################\n# Execute\n#######################\n\n\ndef _build_execute_listener_middleware(\n base_logger: Optional[Logger] = None,\n) -> Middleware:\n def execute_listener_middleware(\n context: BoltContext,\n client: WebClient,\n body: dict,\n next: Callable[[], BoltResponse],\n ):\n context[\"complete\"] = Complete(\n client=client,\n body=body,\n )\n context[\"fail\"] = Fail(\n client=client,\n body=body,\n )\n return next()\n\n return CustomMiddleware(\n app_name=__name__,\n func=execute_listener_middleware,\n base_logger=base_logger,\n )\n","repo_name":"slackapi/bolt-python","sub_path":"slack_bolt/workflows/step/step.py","file_name":"step.py","file_ext":"py","file_size_in_byte":18466,"program_lang":"python","lang":"en","doc_type":"code","stars":897,"dataset":"github-code","pt":"12"} +{"seq_id":"12461392847","text":"import random\n\nnum = random.randrange(1,11)\nnum_guesses= 0 \nfor i in range(3):\n guess_num = int(input(\"Please enter a guess:\"))\n num_guesses = num_guesses + 1\n if guess_num < num:\n print(\"Too Low\")\n elif guess_num > num:\n print(\"Too High\")\n elif guess_num == num:\n print(\"Correct\")\n break\nprint(\"It took you\", num_guesses, \"guesses\")\n\n\n\n# numbers_first = (\"mylist\")\n# mylist = list(range(0,11))\n# y = (random.choice(mylist))\n# result = (y)\n# print(\"Result: \", result)\n\n","repo_name":"bucs110FALL22/portfolio-gabriellavieira1","sub_path":"ch03/exercises/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42985382451","text":"# Imports:\nimport dill\nimport pandas as pd\nimport numpy as np\nimport re\nimport streamlit as st\n\n# ------------------------------------------------------------------------\nst.image('./Robocare_name_logo.PNG', use_column_width=True)\n\n\n@st.cache(allow_output_mutation=True)\ndef get_data():\n\n patient_df = pd.read_csv(\"../data/patient_dataset.csv\")\n drugs_df = pd.read_csv(\"../data/drugs_dataset.csv\")\n pharmacy_df = pd.read_csv(\"../data/pharmacist_dataset.csv\")\n \n return patient_df, drugs_df, pharmacy_df\n\npatient_df, drugs_df, pharmacy_df = get_data()\n\nst.header(\"_...because robocare cares_\")\n\n\nrainbow_flower = \"https://images.unsplash.com/photo-1495386786209-f284d613b8d0?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1350&q=80\"\n\n\n\n\n\n\nst.title('')\nst.title('**`Doctor View`**')\nst.subheader('Medication Module')\nst.write('Page **1** of **3**')\n\nst.title(\" \")\n\nst.write('''Our tool enable you to create a prescription \nthat can be promptly delivered the patient's chosen pharmacy!''')\n\nst.title(\" \")\n\nst.write(\"Let's first find out the patient's needs.\")\n# Medication\nmed = st.text_input(\"Medication\", value = 'Amoxicillin')\ndose = st.selectbox(\"Dosage\", [int(x) for x in sorted(drugs_df.dose_per_unit.unique())])\nrefills = st.text_input(\"Refills\", value = '2')\npatient = st.text_input(\"Patient name\", value = 'Phil Sabie')\n\n\npatient_dict = {\n 'name' : None,\n 'address' : None,\n 'contact' : None,\n 'last_delievery' : None\n}\n\n\npatient_name = patient.split(' ')\n\n\npatient_dict[\"name\"] = patient\npatient_dict[\"address\"] = patient_df.loc[ (patient_df[\"first_name\"] == patient_name[0]) & (patient_df[\"last_name\"] == patient_name[1])][\"address\"].values[0]\npatient_dict[\"contact\"] = patient_df.loc[ (patient_df[\"first_name\"] == patient_name[0]) & (patient_df[\"last_name\"] == patient_name[1])][\"mobile\"].values[0]\npatient_dict[\"last_delivery\"] = patient_df.loc[ (patient_df[\"first_name\"] == patient_name[0]) & (patient_df[\"last_name\"] == patient_name[1])][\"last_delivery\"].values[0]\n\n# patient_address_2 = patient_df.loc[ (patient_df[\"first_name\"] == patient_name[0]) & (patient_df[\"last_name\"] == patient_name[1])][\"address\"].values[0]\n\nst.write('Patient name:', patient)\nst.write('Address:', patient_dict[\"address\"])\nst.write('Contact:', patient_dict[\"contact\"])\nst.write('Last Delivery:', patient_dict[\"last_delivery\"])\n\n\n# st.write(pharmacy_df.head())\npharmacy_name_list = []\nnearest_pharmacy = pharmacy_df.loc[ (pharmacy_df[\"address2\"] == patient_dict[\"address\"]) ][\"pharmacist_name\"].tolist()[0]\npharmacist_name = str(pharmacy_df.loc[ (pharmacy_df[\"address2\"] == patient_dict[\"address\"]) ][\"first_name\"].tolist()[0]) + \" \" + str(pharmacy_df.loc[ (pharmacy_df[\"address2\"] == patient_dict[\"address\"]) ][\"last_name\"].tolist()[0])\nst.write(\"Pharmacy near you: \\n\", nearest_pharmacy)\n\n\n\n\n# doctor information\nst.write(\"Doctor:\")\n\n# pharmacist name\nst.write(\"Pharmacist Name:\", pharmacist_name)\n\n# medication\nst.write(f\"Medication: {med} , dosage: {dose}, refills: {refills} \")\n\n\nst.markdown(\n \"\"\" \n \n
\n \n \"\"\",\n unsafe_allow_html=True\n)\n\n\n# mock_schedule\nmed_1 = {\"name\" : \"Amoxicillin\", \n \"doze\" : \"20 mg\",\n # \"schedule\" : \"2 tabs before meal (Morning)\",\n \"Notes\" : \"Critical - Pair with Protonix\",\n \"Date Prescribed\" : \"2019-01-12\"\n }\n\nmed_2 = {\"name\" : \"Protonix\", \n \"doze\" : \"10 mg\",\n # \"schedule\" : \"1 tab after meal (Morning)\",\n \"Notes\" : \"Critical - Pair with Amoxicillin\",\n \"Date Prescribed\" : \"2019-01-12\"\n }\n\nmed_3 = {\"name\" : \"Levofloxacin\", \n \"doze\" : \"5 mg\",\n # \"schedule\" : \"1 tab after meal (Night)\",\n \"Notes\" : \"-\",\n \"Date Prescribed\" : \"2020-03-18\"\n }\n\ndataframe = pd.DataFrame([med_1, med_2, med_3])\ndataframe.set_index(\"name\", inplace=True)\n\nst.write(f\"Full Medication Summary\")\nst.dataframe(dataframe.style.highlight_max(axis=0))\n\n\n# pharmacy = st.selectbox(\"Pharmacy near you\", set(pharmacy_df.loc[ (pharmacy_df[\"address2\"] == patient_dict[\"address\"]) ][\"pharmacist_name\"].tolist()))\n \nst.title(\" \")\nst.title(\" \")\nst.write('### Watch this space, **Robocare** is constantly _`evolving`_ to **care** for you! 💗')\n\n\n\n","repo_name":"karma271/ROBOCARE","sub_path":"rc_doc.py","file_name":"rc_doc.py","file_ext":"py","file_size_in_byte":4262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23121445893","text":"from typing import Callable\nfrom typing.io import TextIO\n\nimport pandas as pd\n\nfrom exceptions import CSVFIleError, PCDConversionError\nfrom settings.pcd_config import PCD_HEADER\n\n\ndef convert_to_pcd(filename: str, input_dir: str, output_dir: str, file_fun: Callable) -> str:\n \"\"\"\n Convert csv file to pcd file\n Args:\n filename: CSV file name\n input_dir: Directory where input file is present\n output_dir: Directory where converted file to be stored\n file_fun: function to get the reference of output file\n\n Returns:\n Success message with csv filename\n\n Raises:\n CSVFIleError: CSV file error\n PCDConversionError: Error while converting csv to pcd\n \"\"\"\n # Check if filename endswith csv or not\n if not filename.endswith(\".csv\"):\n raise CSVFIleError(\"File not ends with .csv extension\")\n\n # Strip .csv from filename\n filename_without_ext = filename.rstrip(\".csv\")\n\n try:\n # Read the CSV file as dataframe which is separated by space\n df = pd.read_csv(f\"{input_dir}/{filename}\", sep=\" \", header=None)\n\n except Exception as e:\n raise CSVFIleError(e)\n\n # initially file reference will be None\n file_ref = None\n\n try:\n # Get the file reference\n file_ref = file_fun(filename_without_ext, output_dir) # type: TextIO\n\n # Get the header string and modify it considering the no. of lines in CSV\n pcd_header = PCD_HEADER.format(width=len(df), points=len(df))\n\n # write pcd_header to the new file\n file_ref.write(pcd_header)\n\n # Add each row of dataframe to new pcd file\n for row in df.iterrows():\n data = row[1]\n file_ref.write(f\"{data[0]} {data[1]} {data[2]} {data[3]}\\n\")\n\n # Close the file\n file_ref.close()\n\n except Exception as e:\n # In case of any exception, if file reference exists, close it\n if file_ref:\n file_ref.close()\n\n # Raise the error\n raise PCDConversionError(e)\n\n return f\"Successfully converted the file {filename}\"\n","repo_name":"JayadeepJayzd/CSV-to-PCD","sub_path":"pcd_converter.py","file_name":"pcd_converter.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"7478802683","text":"#tells if a string is a permutation of the other... also it deletes the spaces...\n\nstring1=\"cosa preshosha\"\nstring2=\"preshosha cosa\"\n#string1=string1[0:4]+string1[5:len(string1)+1]\nstring1=string1.replace(' ','')\nstring2=string2.replace(' ','')\n\narray=[0]*26\n\nfor i in string1:\n\tarray[ ord(i)-ord('a') ] += 1\n\t\n\nfor i in string2:\n\tarray[ ord(i)-ord('a') ]-=1\n\n\nprint(array)\n\n\n\n","repo_name":"AnaNava1996/ejercicios-de-algoritmia","sub_path":"ejercicios/interview practice/permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"41416907928","text":"from programy.utils.logging.ylogger import YLogger\n\nfrom programy.config.section import BaseSectionConfigurationData\nfrom programy.utils.substitutions.substitues import Substitutions\n\n\nclass BrainSecurityConfiguration(BaseSectionConfigurationData):\n DEFAULT_ACCESS_DENIED = \"Access denied!\"\n\n def __init__(self, service_name):\n BaseSectionConfigurationData.__init__(self, service_name)\n self._classname = None\n self._denied_srai = None\n self._denied_text = None\n\n @property\n def classname(self):\n return self._classname\n\n @property\n def denied_srai(self):\n return self._denied_srai\n\n @property\n def denied_text(self):\n return self._denied_text\n\n def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):\n service = configuration_file.get_section(self.section_name, configuration)\n if service is not None:\n self._classname = configuration_file.get_option(service, \"classname\", missing_value=None, subs=subs)\n self._denied_srai = configuration_file.get_option(service, \"denied_srai\", missing_value=None, subs=subs)\n self._denied_text = configuration_file. \\\n get_option(service, \"denied_text\", missing_value=BrainSecurityConfiguration.DEFAULT_ACCESS_DENIED,\n subs=subs)\n else:\n YLogger.warning(self, \"'security' section missing from bot config, using to defaults\")\n\n\nclass BrainSecurityAuthenticationConfiguration(BrainSecurityConfiguration):\n\n def __init__(self, service_name=\"authentication\"):\n BrainSecurityConfiguration.__init__(self, service_name)\n self._classname = \"programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService\"\n self._denied_srai = \"AUTHENTICATION_FAILED\"\n self._denied_text = \"Access Denied!\"\n\n def to_yaml(self, data, defaults=True):\n if defaults is True:\n data['classname'] = \"programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService\"\n data['denied_srai'] = \"AUTHENTICATION_FAILED\"\n data['denied_text'] = \"Access Denied!\"\n\n else:\n data['classname'] = self._classname\n data['denied_srai'] = self._denied_srai\n data['denied_text'] = self._denied_text\n\n\nclass BrainSecurityAuthorisationConfiguration(BrainSecurityConfiguration):\n\n def __init__(self, service_name=\"authorisation\"):\n BrainSecurityConfiguration.__init__(self, service_name)\n self._classname = \"programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService\"\n self._denied_srai = \"AUTHORISATION_FAILED\"\n self._denied_text = \"Access Denied!\"\n\n def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):\n super(BrainSecurityAuthorisationConfiguration, self).load_config_section(configuration_file, configuration,\n bot_root, subs=subs)\n service = configuration_file.get_section(self.section_name, configuration)\n self.load_additional_key_values(configuration_file, service)\n\n def to_yaml(self, data, defaults=True):\n if defaults is True:\n data['classname'] = \"programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService\"\n data['denied_srai'] = \"AUTHORISATION_FAILED\"\n data['denied_text'] = \"Access Denied!\"\n\n else:\n data['classname'] = self._classname\n data['denied_srai'] = self._denied_srai\n data['denied_text'] = self._denied_text\n\n\nclass BrainSecurityAccountLinkerConfiguration(BrainSecurityConfiguration):\n\n def __init__(self, service_name=\"account_linker\"):\n BrainSecurityConfiguration.__init__(self, service_name)\n self._classname = \"programy.security.linking.accountlinker.BasicAccountLinkerService\"\n self._denied_srai = \"ACCOUNT_LINKING_FAILED\"\n self._denied_text = \"Unable to link accounts!\"\n\n def load_config_section(self, configuration_file, configuration, bot_root, subs: Substitutions = None):\n super(BrainSecurityAccountLinkerConfiguration, self).load_config_section(configuration_file, configuration,\n bot_root, subs=subs)\n service = configuration_file.get_section(self.section_name, configuration)\n self.load_additional_key_values(configuration_file, service)\n\n def to_yaml(self, data, defaults=True):\n if defaults is True:\n data['classname'] = \"programy.security.linking.accountlinker.BasicAccountLinkerService\"\n data['denied_srai'] = \"ACCOUNT_LINKING_FAILED\"\n data['denied_text'] = \"Unable to link accounts!\"\n\n else:\n data['classname'] = self._classname\n data['denied_srai'] = self._denied_srai\n data['denied_text'] = self._denied_text\n","repo_name":"keiffster/program-y","sub_path":"src/programy/config/brain/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","stars":345,"dataset":"github-code","pt":"12"} +{"seq_id":"18886874796","text":"with open(\"input\") as f:\n result = 0\n chars = set()\n for line in f:\n if len(line) > 1:\n line = line.replace(\"\\n\", \"\")\n chars.update(set(line))\n else:\n result += len(chars)\n chars = set()\n result += len(chars)\n print(\"Day 6 Part 1:\", result)\n\nwith open(\"input\") as f:\n data = f.read().split(\"\\n\\n\")\n result = 0\n for group in data:\n group_data = []\n temp = set()\n for c in group:\n if c == '\\n':\n group_data.append(list(temp))\n temp = set()\n else:\n temp.add(c)\n if len(temp) > 0:\n group_data.append(list(temp))\n group_result = list(group_data[0])\n for c in group_data[0]:\n for g in group_data:\n if c not in g and c in group_result:\n group_result.remove(c)\n result += len(group_result)\n print(\"Day 6 Part 2:\", result)\n","repo_name":"aeolyus/advent2020","sub_path":"06/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"9914433513","text":"# import pickle fit\n# unpickle\n# extract values\n# plot posterior predictive dist\n# compare to actual data\n\n\nimport stan\nimport pandas as pd\nimport arviz as az\nimport pickle\nfrom math import exp\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\ndata = pd.read_csv('rg7.csv')\n\n\n\ndef load(filename):\n '''Reload pickled compiled models and fits for reuse'''\n return pickle.load(open(filename, 'rb'))\n\n\nfit = load('simple_model_rg_season_7_fit.pic')\n\naz.summary(fit)\n\n\ndf = fit.to_frame()\n\n\ndef generate_post_predictive(b0, b1, b2, sigma, t):\n \"\"\"\n Return a diameter measurement for given parameter values at\n a given time\n \"\"\"\n\n mu = b0/(1 + exp(-b2*(t - b1)))\n y = np.random.normal(loc=mu, scale=sigma)\n return y\n\n\n#get time points from rg7\nt_points = data.day.unique()\nt_points.sort()\n\n\n#get average for each time point\ny_data = data.groupby(['day']).diameter.mean()\nblock_data = data.groupby(['day']).b_id.unique()\n#plot average size over time\n\nx = [1, 2, 3, 4, 5, 6, 10, 20]\ny = [1, 4, 9, 16, 25, 36, 100, 400]\nplt.plot(x, y)\n#plt.show()\nplt.savefig('rg7_average_data.png')\nimg = Image.open('rg7_average_data.png')\nimg.show()\n\n\n","repo_name":"patrick-kearney/apples","sub_path":"analyse_fit.py","file_name":"analyse_fit.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"37582805285","text":"from Game import Game\r\n#import pygame\r\nimport time\r\n\r\ndef main()->None:\r\n # initialize and run the Tetris game\r\n \r\n # pygame.mixer.init()\r\n # pygame.mixer.music.load(\"toby fox - UNDERTALE Soundtrack - 03 Your Best Friend.mp3\")\r\n # pygame.mixer.music.play(-1)\r\n\r\n game = Game()\r\n game.run()\r\n \r\n return \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"GRUUIS/INFO102_final_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23548418748","text":"from django import template\nfrom django.template.base import Node, NodeList, Template, Context, Variable\nfrom django.template.base import TemplateSyntaxError, VariableDoesNotExist, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END\nfrom markdown import markdownFromFile, markdown\nfrom django.conf import settings\n\nfrom copyblock.utils import copydown\n\nregister = template.Library()\n\nCACHE = {}\n\nclass CopyBlockNode(Node):\n def __init__(self, filepath, nocache, nomarkdown):\n self.filepath = filepath\n self.nocache = nocache\n self.nomarkdown = nomarkdown\n\n def render(self, context):\n filepath = \"%s/%s.markdown\" % (settings.COPYBLOCK_ROOT, self.filepath)\n nocache = self.nocache\n nomarkdown = self.nomarkdown\n \n# if nocache \\\n# or not settings.COPYBLOCK_CACHE \\\n# or filepath not in CACHE:\n# try:\n# content = get_file_contents(filepath)\n#\n# if nomarkdown:\n# output = content\n# else:\n# output = markdown(content)\n# CACHE[self.filepath] = output\n# except IOError:\n# import sys\n# exc_type, exc_value, exc_traceback = sys.exc_info()\n# output = '' % filepath\n# else:\n# output = CACHE[self.filepath]\n\n output = copydown(filepath, nocache=nocache, nomarkdown=nomarkdown)\n\n return output\n\ndef copyblock(parser, token):\n \"\"\"\n Outputs the contents of a given copy file into the page.\n\n Like a simple \"include\" tag, the ``copyblock`` tag includes the contents\n of another file -- which must exist under settings.COPYBLOCK_ROOT --\n in the current page, after running it through markdown::\n\n {% copyblock welcome_message %}\n\n {% copyblock help/how_to_use %}\n\n If the optional \"nocache\" parameter is given, the copyblock cache will not be consulted,\n otherwise, the file output will be read from the cache to save disk IO. Processed file\n output is cached while the app is running.\n\n {% copyblock welcome nocache %}\n \n If the content should not be processed as markdown, the \"nomarkdown\" parameter can be\n passed to the tag:\n\n {% copyblock welcome nomarkdown %}\n \n \"\"\"\n args = token.contents.split()\n nocache = False\n nomarkdown = False\n\n if 'nocache' in args:\n nocache=True\n if 'nomarkdown' in args:\n nomarkdown=True\n\n return CopyBlockNode(args[1], nocache, nomarkdown)\n\ncopyblock = register.tag(copyblock)\n","repo_name":"sivy/django-copyblock","sub_path":"copyblock/templatetags/copyblock_tags.py","file_name":"copyblock_tags.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"12"} +{"seq_id":"21142891631","text":"from bs4 import BeautifulSoup\nimport requests\nfrom time import sleep\n\nprint(\"program started\")\ns1={ \n\t'india' : 'https://timesofindia.indiatimes.com/rssfeeds/-2128936835.cms' , \n\t'world' : 'https://timesofindia.indiatimes.com/rssfeeds/296589292.cms' , \n\t'business' : 'https://timesofindia.indiatimes.com/rssfeeds/1898055.cms', \n\t'sports' : 'https://timesofindia.indiatimes.com/rssfeeds/4719148.cms', \n\t'Science' : 'https://timesofindia.indiatimes.com/rssfeeds/-2128672765.cms'}\n\nfor key, value in s1.items():\n \tprint(key)\n \t\nget_cat=str(input(\"Enter the category of the News you want\\n\"))\nresult=requests.get(s1[get_cat])\nsleep(1)\nprint(\"1st link fetched - rss feed\")\n\nresult.encoding = result.apparent_encoding\nsoup = BeautifulSoup(result.text, 'xml')\n# print (result.text)\n\ns2 = dict()\ni=1\nfor item in soup.find_all('item'):\n\tprint(\"fetching item - \"+str(i))\n\tlink=item.link.text\n\ttitle=item.title.text\n\tdesc=item.description.text\n\tif i<=2:\n\t\ts3 = dict()\n\t\ts3['title'] = title\n\t\ts3['description'] = desc\n\t\ts3['link'] = link\n\t\ts2[i]= s3\n\telse:\n\t\tbreak\n\ti=i+1\n\n# print(\"1st link fetched - rss feed\")\n#print(s2)\nfor i in range(1,3):\n\tprint(i)\n\tprint(s2[i]['title'])\nget_titno=int(input(\"Enter the number corresponding to title you want\\n\"))\n\nprint (\"Your title is:\")\nprint(s2[get_titno]['title'])\n\n\nfresult=requests.get(s2[get_titno]['link'])\nfsoup = BeautifulSoup(fresult.text, 'html.parser')\n\nprint(fsoup.find('div',class_='section1').text)\n\t\n\t\n\n\n\n\n\n\n\t\n\n\n\n\n\t","repo_name":"Sakshamgoyal25/TOI-RSS-BASIC","sub_path":"level1.py","file_name":"level1.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"71004570580","text":"computador_ganhou=0\neu_ganhei=0\ndef usuario_escolhe_jogada(n,m):\n resta=0\n pecas=int(input('Quantas peças você vai tirar?'))\n resta = n-pecas\n if pecas > m or pecas <= 0 or pecas > n:\n while pecas > m or pecas <= 0 or pecas > n:\n print('Oops! Jogada inválida! Tente de novo.')\n pecas = int(input('Quantas peças você vai tirar?'))\n if pecas < 2:\n print('Você tirou uma peça.')\n if resta != 1:\n print('Agora restam {} peças no tabuleiro.'.format(resta))\n else:\n print('Agora resta apenas uma peça no tabuleiro.')\n else:\n print('Voce tirou {} peças.'.format(pecas))\n if resta != 1:\n print('Agora restam {} peças no tabuleiro.'.format(resta))\n else:\n print('Agora resta apenas uma peça no tabuleiro.')\n return pecas\ndef computador_escolhe_jogada(n,m):\n if m >= n:\n m=n\n else:\n sobrou = n % (m + 1) # sobrou recebe o resto da divisão\n if sobrou > 0: # Já que não é menor que m, e maior que 0 então...\n m = sobrou # retorne o resto\n resta=n-m\n if m==1:\n print('O computador tirou uma peça.')\n if resta > 1:\n print('Agora restam {} peças no tabuleiro.'.format(resta))\n elif resta == 1:\n print('Agora resta apenas uma peça no tabuleiro.')\n else:\n print('O computador tirou {} peças'.format(m))\n if resta > 1:\n print('Agora restam {} peças no tabuleiro.'.format(resta))\n elif resta == 1:\n print('Agora resta apenas uma peça no tabuleiro.')\n return m\ndef partida ():\n n = int(input('Quantas peças? '))\n m = int(input('Limite de peças por jogada? '))\n j = n\n i = 1\n if m > n or m <=0 or n < m:\n print('Oops! Jogada inválida! Tente de novo.')\n while m > n or m<=0 or n < m:\n m = int(input('Limite de peças por jogada? '))\n if n % (m + 1)==0:\n print('Voce começa!')\n n = n - (usuario_escolhe_jogada(n, m))\n #n=n-m\n while n >= 0:\n if i%2!=0:\n if m >= n:\n print('Fim do jogo! O computador ganhou!')\n global computador_ganhou\n computador_ganhou += 1\n break\n #m = n\n else:\n n=n-(computador_escolhe_jogada(n, m))\n #n = n - m\n if n==0:\n print('Fim do jogo! O computador ganhou!')\n #global computador_ganhou\n computador_ganhou += 1\n break\n\n else:\n n = n - (usuario_escolhe_jogada(n, m))\n #n = n - m\n if n==0:\n print('Fim do jogo! O Usuario ganhou!')\n global eu_ganhei\n eu_ganhei+=1\n break\n i+=1\n\n else:\n print('Computador começa!')\n if m >= n:\n print('Fim do jogo! O computador ganhou!')\n computador_ganhou += 1\n exit()\n else:\n n = n - (computador_escolhe_jogada(n, m))\n #n = n - m\n while n >= 0:\n if i%2!=0:\n n = n - (usuario_escolhe_jogada(n, m))\n #n = n - m\n if n == 0:\n print('Fim do jogo! O Usuario ganhou!')\n eu_ganhei += 1\n break\n else:\n if m > n:\n m = n\n n = n - (computador_escolhe_jogada(n, m))\n #n = n - m\n if n == 0:\n print('Fim do jogo! O computador ganhou!')\n computador_ganhou += 1\n break\n i+=1\ndef campeonato ():\n partida()\n partida()\n partida()\n print('Placar: Você {} X {} Computador'.format(eu_ganhei,computador_ganhou))\n\nprint('Bem-vindo ao jogo do NIM! Escolha:')\nprint('1 - para jogar uma partida isolada ')\nprint('2 - para jogar um campeonato 2')\nprint('3 para sair do jogo')\nopcao=int(input())\nif opcao == 1:\n partida()\nelif opcao == 2:\n campeonato()\nelif opcao == 3:\n print('Saindo do jogo')\nelse:\n while opcao > 3:\n print('Opcao Invalida')\n opcao=int(input())\n","repo_name":"phablotassio/exercicios-cursos-python","sub_path":"aulas/jogo_do_nim.py","file_name":"jogo_do_nim.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2246456706","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-01-12 19:56:13\n# @Author : 酸饺子 (changzheng300@foxmail.com)\n# @Link : https://github.com/SourDumplings\n# @Version : $Id$\n\n'''\n3. 请完成以下文件综合编程迷你项目(提示:可以利用list的insert函数)。\n\n(1) 创建一个文件Blowing in the wind.txt,其内容是:\n\nHow many roads must a man walk down\n\nBefore they call him a man\n\nHow many seas must a white dove sail\n\nBefore she sleeps in the sand\n\nHow many times must the cannon balls fly\n\nBefore they're forever banned\n\nThe answer my friend is blowing in the wind\n\nThe answer is blowing in the wind\n\n(2) 在文件头部插入歌名“Blowin’ in the wind”\n\n(3) 在歌名后插入歌手名“Bob Dylan”\n\n(4) 在文件末尾加上字符串“1962 by Warner Bros. Inc.”\n\n(5) 在屏幕上打印文件内容\n'''\n\ndef insert_line(lines):\n lines.insert(0, \"Blowin' in the wind\\n\")\n lines.insert(1, \"Bob Dylan\\n\")\n lines.append(\"1962 by Warner Bros. Inc.\")\n return ''.join(lines)\n\nwith open('W2.Blowing in the wind.txt', 'r+') as f:\n lines = f.readlines()\n string = insert_line(lines)\n print(string)\n f.seek(0)\n f.write(string)\n","repo_name":"SourDumplings/CodeSolutions","sub_path":"Course practices/Coursera课程:《用Python玩转数据》-张莉-南京大学/W2.2.3.一首歌的小项目.py","file_name":"W2.2.3.一首歌的小项目.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"12"} +{"seq_id":"14270727289","text":"import numpy as np\nfrom scipy import linalg\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport itertools\nfrom math import *\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom matplotlib.backends.backend_pdf import PdfPages\n\n\n#color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',\n#'darkorange'])\n\n\nmy_colors = [(0.5,0,0.5),(0,0.5,0.5),(0.8,0.36,0.36)]\ncmap_name = 'my_list'\nmy_cmap = LinearSegmentedColormap.from_list(\n cmap_name, my_colors, N=10000)\ncolor_iter = itertools.cycle(my_colors)\n\nparams = {\n 'axes.labelsize': 15,\n # 'text.fontsize': 15,\n 'font.size' : 15,\n 'axes.labelsize': 15,\n 'axes.titlesize': 15,\n 'legend.fontsize': 15,\n 'xtick.labelsize': 15,\n 'ytick.labelsize': 15,\n 'text.usetex': True,\n 'figure.figsize': [7, 4] # instead of 4.5, 4.5\n}\nplt.rcParams.update(params)\n\nclass GeM_tools():\n def __init__(self, validation = False, gt_comparison=False):\n self.gt_comparison = gt_comparison\n self.validation = validation\n\n\n def input_data(self, training_path, validation_path):\n\n rfX = np.loadtxt(training_path+'/rfX.txt')\n rfY = np.loadtxt(training_path+'/rfY.txt')\n rfZ = np.loadtxt(training_path+'/rfZ.txt')\n rtX = np.loadtxt(training_path+'/rtX.txt')\n rtY = np.loadtxt(training_path+'/rtY.txt')\n rtZ = np.loadtxt(training_path+'/rtZ.txt')\n lfX = np.loadtxt(training_path+'/lfX.txt')\n lfY = np.loadtxt(training_path+'/lfY.txt')\n lfZ = np.loadtxt(training_path+'/lfZ.txt')\n ltX = np.loadtxt(training_path+'/ltX.txt')\n ltY = np.loadtxt(training_path+'/ltY.txt')\n ltZ = np.loadtxt(training_path+'/ltZ.txt')\n dlen = min(np.size(lfZ),np.size(rfZ))\n gX = np.loadtxt(training_path+'/gX.txt')\n gY = np.loadtxt(training_path+'/gY.txt')\n gZ = np.loadtxt(training_path+'/gZ.txt')\n accX = np.loadtxt(training_path+'/accX.txt')\n accY = np.loadtxt(training_path+'/accY.txt')\n accZ = np.loadtxt(training_path+'/accZ.txt')\n dlen = min(dlen,np.size(accZ))\n dcX = np.loadtxt(training_path+'/comvX.txt')\n dcY = np.loadtxt(training_path+'/comvY.txt')\n dcZ = np.loadtxt(training_path+'/comvZ.txt')\n dlen = min(dlen,np.size(dcZ)) \n\n if(self.gt_comparison):\n #if(self.gem2):\n phase = np.loadtxt(training_path+'/gt.txt')\n dlen = min(dlen,np.size(phase))\n\n if(self.validation):\n rfX_val = np.loadtxt(validation_path+'/rfX.txt')\n rfY_val = np.loadtxt(validation_path+'/rfY.txt')\n rfZ_val = np.loadtxt(validation_path+'/rfZ.txt')\n rtX_val = np.loadtxt(validation_path+'/rtX.txt')\n rtY_val = np.loadtxt(validation_path+'/rtY.txt')\n rtZ_val = np.loadtxt(validation_path+'/rtZ.txt')\n lfX_val = np.loadtxt(validation_path+'/lfX.txt')\n lfY_val = np.loadtxt(validation_path+'/lfY.txt')\n lfZ_val = np.loadtxt(validation_path+'/lfZ.txt')\n ltX_val = np.loadtxt(validation_path+'/ltX.txt')\n ltY_val = np.loadtxt(validation_path+'/ltY.txt')\n ltZ_val = np.loadtxt(validation_path+'/ltZ.txt')\n dlen_val = min(np.size(lfZ_val),np.size(rfZ_val))\n gX_val = np.loadtxt(validation_path+'/gX.txt')\n gY_val = np.loadtxt(validation_path+'/gY.txt')\n gZ_val = np.loadtxt(validation_path+'/gZ.txt')\n accX_val = np.loadtxt(validation_path+'/accX.txt')\n accY_val = np.loadtxt(validation_path+'/accY.txt')\n accZ_val = np.loadtxt(validation_path+'/accZ.txt')\n dlen_val = min(dlen_val,np.size(accZ_val))\n dcX_val = np.loadtxt(validation_path+'/comvX.txt')\n dcY_val = np.loadtxt(validation_path+'/comvY.txt')\n dcZ_val = np.loadtxt(validation_path+'/comvZ.txt')\n dlen_val = min(dlen_val,np.size(dcZ_val)) \n \n\n\n self.data_train = np.array([])\n self.data_val = np.array([])\n\n #Leg Forces and Torques\n self.data_train = lfX[0:dlen] - rfX[0:dlen]\n self.data_train = np.column_stack([self.data_train, lfY[0:dlen] - rfY[0:dlen]])\n self.data_train = np.column_stack([self.data_train, lfZ[0:dlen] - rfZ[0:dlen]])\n self.data_train = np.column_stack([self.data_train, ltX[0:dlen] - rtX[0:dlen]])\n self.data_train = np.column_stack([self.data_train, ltY[0:dlen] - rtY[0:dlen]])\n self.data_train = np.column_stack([self.data_train, ltZ[0:dlen] - rtZ[0:dlen]])\n\n #CoM Velocity\n self.data_train = np.column_stack([self.data_train, dcX[0:dlen]])\n self.data_train = np.column_stack([self.data_train, dcY[0:dlen]])\n self.data_train = np.column_stack([self.data_train, dcZ[0:dlen]])\n \n #Base Linear Acceleration and Base Angular Velocity\n self.data_train = np.column_stack([self.data_train, accX[0:dlen]])\n self.data_train = np.column_stack([self.data_train, accY[0:dlen]])\n self.data_train = np.column_stack([self.data_train, accZ[0:dlen]])\n self.data_train = np.column_stack([self.data_train, gX[0:dlen]])\n self.data_train = np.column_stack([self.data_train, gY[0:dlen]])\n self.data_train = np.column_stack([self.data_train, gZ[0:dlen]])\n\n\n self.data_train_min = np.zeros((self.data_train.shape[1]))\n self.data_train_max = np.zeros((self.data_train.shape[1]))\n self.data_train_mean = np.zeros((self.data_train.shape[1]))\n self.data_train_std = np.zeros((self.data_train.shape[1]))\n \n #Data Statistics\n for i in range(self.data_train.shape[1]):\n self.data_train_min[i] = np.min(self.data_train[:, i])\n self.data_train_max[i] = np.max(self.data_train[:, i])\n self.data_train_mean[i] = np.mean(self.data_train[:, i])\n self.data_train_std[i] = np.std(self.data_train[:, i])\n self.data_train[:, i] = self.normalize_data(self.data_train[:, i],self.data_train_max[i], self.data_train_min[i]) \n #self.data_train[:, i] = self.standarize_data(self.data_train[:, i],self.data_train_mean[i], self.data_train_std[i]) \n #self.data_train[:, i] = self.normalizeMean_data(self.data_train[:, i],self.data_train_max[i], self.data_train_min[i],self.data_train_mean[i]) \n\n '''\n plt.plot(self.data_label[:,1], color = [0.5,0.5,0.5])\n plt.plot(self.data_label[:,4], color = [0,0.5,0.5])\n plt.plot(self.data_label[:,7], color = [0.8,0.36,0.36])\n plt.grid('on')\n plt.show()\n '''\n\n if(self.validation):\n #Leg Forces and Torques\n self.data_val = lfX_val[0:dlen_val] - rfX_val[0:dlen_val]\n self.data_val = np.column_stack([self.data_val, lfY_val[0:dlen_val] - rfY_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, lfZ_val[0:dlen_val] - rfZ_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, ltX_val[0:dlen_val] - rtX_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, ltY_val[0:dlen_val] - rtY_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, ltZ_val[0:dlen_val] - rtZ_val[0:dlen_val]])\n\n #CoM Velocity\n self.data_val = np.column_stack([self.data_val, dcX_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, dcY_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, dcZ_val[0:dlen_val]])\n\n #Base Linear Acceleration and Base Angular Velocity\n self.data_val = np.column_stack([self.data_val, accX_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, accY_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, accZ_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, gX_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, gY_val[0:dlen_val]])\n self.data_val = np.column_stack([self.data_val, gZ_val[0:dlen_val]])\n\n\n\n self.data_val_min = np.zeros((self.data_val.shape[1]))\n self.data_val_max = np.zeros((self.data_val.shape[1]))\n self.data_val_mean = np.zeros((self.data_val.shape[1]))\n self.data_val_std = np.zeros((self.data_val.shape[1]))\n \n #Data Statistics\n for i in range(self.data_val.shape[1]):\n self.data_val_min[i] = np.min(self.data_val[:, i])\n self.data_val_max[i] = np.max(self.data_val[:, i])\n self.data_val_mean[i] = np.mean(self.data_val[:, i])\n self.data_val_std[i] = np.std(self.data_val[:, i])\n self.data_val[:, i] = self.normalize_data(self.data_val[:, i],self.data_val_max[i], self.data_val_min[i]) \n #self.data_val[:, i] = self.standarize_data(self.data_val[:, i],self.data_val_mean[i], self.data_val_std[i]) \n #self.data_val[:, i] = self.normalizeMean_data(self.data_val[:, i],self.data_val_max[i], self.data_val_min[i],self.data_val_mean[i]) \n\n \n if (self.gt_comparison):\n self.phase = phase[0:dlen]\n self.dlen = dlen\n '''\n else:\n phase2=np.append([phase],[np.zeros_like(np.arange(cX.shape[0]-phase.shape[0]))])\n self.cX = cX[~(phase2==-1)]\n self.cY = cY[~(phase2==-1)]\n self.cZ = cZ[~(phase2==-1)]\n phase3=np.append([phase],[np.zeros_like(np.arange(accX.shape[0]-phase.shape[0]))])\n self.accX = accX[~(phase3==-1)]\n self.accY = accY[~(phase3==-1)]\n self.accZ = accZ[~(phase3==-1)]\n phase4=np.append([phase],[np.zeros_like(np.arange(gX.shape[0]-phase.shape[0]))])\n self.gX = gX[~(phase4==-1)]\n self.gY = gY[~(phase4==-1)]\n phase5=np.append([phase],[np.zeros_like(np.arange(lfZ.shape[0]-phase.shape[0]))])\n self.lfZ = lfZ[~(phase5==-1)]\n self.lfX = lfX[~(phase5==-1)]\n self.lfY = lfY[~(phase5==-1)]\n phase6=np.append([phase],[np.zeros_like(np.arange(rfZ.shape[0]-phase.shape[0]))])\n self.rfZ = rfZ[~(phase6==-1)]\n self.rfX = rfX[~(phase6==-1)]\n self.rfY = rfY[~(phase6==-1)]\n phase7=np.append([phase],[np.zeros_like(np.arange(ltZ.shape[0]-phase.shape[0]))])\n self.ltZ = ltZ[~(phase7==-1)]\n self.ltX = ltX[~(phase7==-1)]\n self.ltY = ltY[~(phase7==-1)]\n phase8=np.append([phase],[np.zeros_like(np.arange(rtZ.shape[0]-phase.shape[0]))])\n self.rtZ = rtZ[~(phase8==-1)]\n self.rtX = rtX[~(phase8==-1)]\n self.rtY = rtY[~(phase8==-1)]\n self.data_train=self.data_train[~(phase==-1)]\n self.phase=phase[~(phase==-1)]\n self.dlen = np.size(self.phase)\n '''\n else:\n self.dlen = dlen\n \n\n \n\n print(\"Data Dim\")\n print(self.dlen)\n\n\n def genInput(self, data, gt=None):\n\n if gt is None:\n gt=self\n\n output_ = np.array([])\n output_ = np.append(output_, data.lfX - data.rfX, axis = 0)\n output_ = np.append(output_, data.lfY - data.rfY, axis = 0)\n output_ = np.append(output_, data.lfZ - data.rfZ, axis = 0)\n output_ = np.append(output_, data.ltX - data.rtX, axis = 0)\n output_ = np.append(output_, data.ltY - data.rtY, axis = 0)\n output_ = np.append(output_, data.ltZ - data.rtZ, axis = 0)\n output_ = np.append(output_, data.dcX, axis = 0)\n output_ = np.append(output_, data.dcY, axis = 0)\n output_ = np.append(output_, data.dcZ, axis = 0)\n output_ = np.append(output_, data.accX, axis = 0)\n output_ = np.append(output_, data.accY, axis = 0)\n output_ = np.append(output_, data.accZ, axis = 0)\n output_ = np.append(output_, data.gX, axis = 0)\n output_ = np.append(output_, data.gY, axis = 0)\n output_ = np.append(output_, data.gZ, axis = 0)\n\n for i in range(self.data_train.shape[1]):\n output_[i] = self.normalize_data(output_[i],self.data_train_max[i], self.data_train_min[i]) \n\n return output_\n\n\n def normalize_data(self,din, dmax, dmin, min_range=-1, max_range = 1): \n if(dmax-dmin != 0):\n dout = min_range + (max_range - min_range) * (din - dmin)/(dmax - dmin)\n else:\n dout = np.zeros((np.size(din)))\n\n return dout\n\n def standarize_data(self,din,dmean,dstd):\n if(dstd != 0):\n dout = (din - dmean)/dstd\n else:\n dout = np.zeros((np.size(din)))\n\n return dout\n\n\n def normalize(self,din, dmax, dmin, min_range=-1, max_range = 1): \n if(din>dmax):\n din=dmax\n elif(dindmax):\n din=dmax\n elif(din thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n\nclass GEM_data:\n def __init__(self):\n self.lfX = 0\n self.lfY = 0\n self.lfZ = 0\n self.ltX = 0\n self.ltY = 0\n self.ltZ = 0\n self.rfX = 0\n self.rfY = 0\n self.rfZ = 0\n self.rtX = 0\n self.rtY = 0\n self.rtZ = 0\n self.accX = 0\n self.accY = 0\n self.accZ = 0\n self.gX = 0\n self.gY = 0\n self.gZ = 0\n self.dcX = 0\n self.dcY = 0\n self.dcZ = 0","repo_name":"mrsp/gem","sub_path":"src/gem_tools.py","file_name":"gem_tools.py","file_ext":"py","file_size_in_byte":19225,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"19538777554","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n return self.mergeLists(list1, list2)\n\n def mergeLists(self, curr1, curr2):\n \n result = None\n if curr1 == None:\n return curr2\n elif curr2 == None:\n return curr1\n \n if curr1.val <= curr2.val:\n result = curr1\n result.next = self.mergeLists(curr1.next, curr2)\n\n else:\n result = curr2\n result.next = self.mergeLists(curr1, curr2.next)\n\n return result \n\n \n","repo_name":"surafel58/A2SV-progress-sheet","sub_path":"Camp Progress sheet/Merge Two Sorted Lists.py","file_name":"Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"26004835287","text":"\n\nfrom django.db import models, migrations\nimport django.db.models.deletion\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('contenttypes', '0002_remove_content_type_name'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Version',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('create_dt', models.DateTimeField(verbose_name='create time')),\n ('object_id', models.IntegerField(verbose_name='object id')),\n ('object_repr', models.CharField(max_length=200, verbose_name='object repr')),\n ('object_changes', models.TextField(verbose_name='change message', blank=True)),\n ('object_value', models.TextField(verbose_name='changed object', blank=True)),\n ('hash', models.CharField(default='', max_length=40, null=True)),\n ('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=django.db.models.deletion.CASCADE)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n ]\n","repo_name":"tendenci/tendenci","sub_path":"tendenci/apps/versions/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":467,"dataset":"github-code","pt":"12"} +{"seq_id":"15156729805","text":"import pathlib\nimport re\nimport argparse\nimport yaml\nfrom Bio.PDB import PDBParser\nfrom Bio import SeqIO\nimport os\nimport numpy as np\nimport pandas as pd\n# import faulthandler\nimport helpers as nhp\nfrom LatticeModelComparison import LatticeModelComparison\nfrom ParallelTempering import ParallelTempering\nfrom jinja2 import Template\nfrom os.path import basename, splitext\nimport snakemake as sm\nfrom shutil import copyfile, rmtree\nfrom itertools import product\n\npdb_parser = PDBParser()\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\n\nparser = argparse.ArgumentParser(description='Generate cubic lattice models for a (list of) pdb names or AA seqs')\nparser.add_argument('--in-dir', type=str, nargs='+',\n help='entity/fasta file or directory containing entity/fasta files of which to generate models')\n# --- model parameters ---\nparser.add_argument('--temp-range', type=float, nargs=2, default=[0.01, 0.001],\n help='Use a range of equally spaced temperatures between given values in a parallel tempering search.')\nparser.add_argument('--tagged-resn', type=str, default=['None'], nargs='+',\n help='Define residue(s) with 1-letter-code that should be replaced with a very hydrophillic tag.'\n 'May define multiple combinations. Set to None for no tagged resn [default: None]')\nparser.add_argument('--experimental-mode', type=int, default=0,\n help='Several experimental settings under 1 switch, numeric.')\nparser.add_argument('--cm-pdb-dir', type=str, default=[], nargs='+',\n help='center-of-mass pdbs directory, for finetuning of starting structures. Necessary if --finetune-structure is on')\nparser.add_argument('--finetune-structure', action='store_true',\n help='Minimize lattice model RMSD w.r.t. center-of-mass structure provided through cm-pdb-dir by the same ID')\nparser.add_argument('--labeling-model', type=str, default='perfect',\n help='Yaml file containing labeling probability for each residue, for each labeling chemistry '\n '(no value == no labeling). Supply \"standard\" for regular model or \"perfect\" for '\n 'no mislabeling or path to yaml [default: perfect].')\n# --- lattice properties ---\nparser.add_argument('--lattice-type', type=str, default='bcc', choices=['cubic', 'bcc'],\n help='Set type of lattice to use. Choices: cubic, bcc [ default: bcc]')\n# --- model iterations parameters ---\nparser.add_argument('--nb-steps', type=int, default=1,\n help='number of mutations to perform at each MC iteration.')\nparser.add_argument('--iters-per-tempswap', default=100, type=int,\n help='If using parallel tempering, define per how many rounds a temperature swap should be performed')\nparser.add_argument('--mc-iters', type=int, default=500,\n help='number Monte Carlo iterations to perform for each model.')\nparser.add_argument('--nb-models', type=int, default=10,\n help='number of models to create per AA sequence.')\nparser.add_argument('--nb-processes', type=int, default=4,\n help='Define how many processes to engage at once in parallel tempering.')\nparser.add_argument('--no-regularization', action='store_true',\n help='Do not add regularization term to energy function.')\nparser.add_argument('--accomodate-tags', action='store_true',\n help='Run MC iterations until tag penalty is 0.')\nparser.add_argument('--max-accomodate-rounds', type=int, default=5,\n help='If accomodating tags, number of rounds to run before re-initiating.')\nparser.add_argument('--free-sampling', action='store_true',\n help='Do not optimize structure during snapshot generation, accept all')\n# --- Result saving options ---\nparser.add_argument('--out-dir', type=str, required=True,\n help='Location where model pdb files are stored.')\nparser.add_argument('--snapshots', nargs=2, type=int, default=[0,0],\n help='If given [n,s], saves n snapshots with s steps in between after convergence/end of run.')\nparser.add_argument('--save-intermediate-structures', action='store_true',\n help='Save structure after temperature swaps in this pdb file')\nparser.add_argument('--store-energies', action='store_true',\n help='Store base energy and individual contributions to energy in tsv file')\nparser.add_argument('--max-cores', type=int, default=4)\nparser.add_argument('--dry-run', action='store_true')\n\nargs = parser.parse_args()\n\nout_dir = nhp.parse_output_dir(args.out_dir)\nlog_dir = nhp.parse_output_dir(out_dir +'logs')\nent_list = nhp.parse_input_dir(args.in_dir, '*.npz')\npdb_id_list = [splitext(basename(ent))[0] for ent in ent_list]\npdb_id_list_str = ','.join(pdb_id_list)\ncm_pdb_list = ','.join(args.cm_pdb_dir)\n\n# Copy input files to destination folder\nin_dir = nhp.parse_output_dir(out_dir + 'in_npz')\nfor ent in ent_list:\n copyfile(ent, in_dir+basename(ent))\n\ndef get_mod_tuples(pdb_id_list, nb_models, tagged_resn, out_dir):\n out_list = []\n for tup in product(pdb_id_list, list(range(nb_models)), tagged_resn):\n if not os.path.exists(f'{out_dir}tag{tup[2]}/{tup[0]}/{tup[0]}_{tup[1]}.pdb'): out_list.append(tup)\n return out_list\n\nmod_tuples = get_mod_tuples(pdb_id_list, args.nb_models, args.tagged_resn, out_dir)\n\nif args.accomodate_tags:\n with open(f'{__location__}/generate_lm_accomodate_tags.sf', 'r') as fh: template_txt = fh.read()\n accomodate_tags_rounds = 0\n unfinished_ids_fn = f'{out_dir}unfinished_ids.txt'\n # mod_tuples = list(product(pdb_id_list, list(range(args.nb_models)), args.tagged_resn))\n while len(mod_tuples):\n with open(unfinished_ids_fn, 'w') as fh:\n fh.write('')\n sf_txt = Template(template_txt).render(\n __location__=__location__,\n mod_tuples=mod_tuples,\n in_dir=in_dir,\n out_dir=out_dir,\n max_nb_models=args.nb_models,\n processes=args.nb_processes,\n temp_min=args.temp_range[0], temp_max=args.temp_range[1],\n iters_per_tempswap=args.iters_per_tempswap,\n mc_iters=args.mc_iters,\n nb_steps=args.nb_steps,\n lattice_type=args.lattice_type,\n nb_snapshots=args.snapshots[0], snapshot_dist=args.snapshots[1],\n store_energies=args.store_energies,\n save_intermediate_structures=args.save_intermediate_structures,\n experimental_mode=args.experimental_mode,\n cm_pdb_str=str(cm_pdb_list).strip('[]').replace(',', ''),\n finetune_structure=args.finetune_structure,\n no_regularization=args.no_regularization,\n labeling_model=args.labeling_model,\n accomodate_tags=args.accomodate_tags,\n free_sampling=args.free_sampling\n )\n sf_fn = f'{out_dir}generate_lm{accomodate_tags_rounds}.sf'\n with open(sf_fn, 'w') as fh: fh.write(sf_txt)\n sm.snakemake(sf_fn, cores=args.max_cores, keepgoing=True, dryrun=args.dry_run)\n\n # --- reload model tuples that did not end with all tags accomodated ---\n with open(unfinished_ids_fn, 'r') as fh:\n mod_tuples = [tup.strip().split('\\t') for tup in fh.readlines()]\n accomodate_tags_rounds += 1\n if not accomodate_tags_rounds % args.max_accomodate_rounds:\n for tup in mod_tuples:\n os.remove(f'{out_dir}tag{tup[2]}/{tup[0]}/{tup[0]}_{tup[1]}_unoptimizedTags.npz')\nelse:\n with open(f'{__location__}/generate_lm_accomodate_tags.sf', 'r') as fh: template_txt = fh.read()\n sf_txt = Template(template_txt).render(\n __location__=__location__,\n mod_tuples=mod_tuples,\n in_dir=in_dir,\n out_dir=out_dir,\n max_nb_models=args.nb_models,\n processes=args.nb_processes,\n temp_min=args.temp_range[0], temp_max=args.temp_range[1],\n iters_per_tempswap=args.iters_per_tempswap,\n mc_iters=args.mc_iters,\n nb_steps=args.nb_steps,\n lattice_type=args.lattice_type,\n nb_snapshots=args.snapshots[0], snapshot_dist=args.snapshots[1],\n store_energies=args.store_energies,\n save_intermediate_structures=args.save_intermediate_structures,\n experimental_mode=args.experimental_mode,\n cm_pdb_str=str(cm_pdb_list).strip('[]').replace(',', ''),\n finetune_structure=args.finetune_structure,\n no_regularization=args.no_regularization,\n labeling_model=args.labeling_model,\n accomodate_tags=args.accomodate_tags,\n free_sampling=args.free_sampling\n )\n sf_fn = f'{out_dir}generate_lm.sf'\n with open(sf_fn, 'w') as fh:\n fh.write(sf_txt)\n sm.snakemake(sf_fn, cores=args.max_cores, keepgoing=True, dryrun=args.dry_run)\n","repo_name":"cvdelannoy/FRET_X_fingerprinting_simulation","sub_path":"generate_lattice_models_sm.py","file_name":"generate_lattice_models_sm.py","file_ext":"py","file_size_in_byte":8958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19084813069","text":"import random\n\n\ndef check(a, s, d, n):\n x = pow(a, d, n)\n if x == 1:\n return True\n for i in range(s - 1):\n if x == n - 1:\n return True\n x = pow(x, 2, n)\n return x == n - 1\n\n\ndef isPrime(n, k=10):\n if n == 2:\n return True\n if not n & 1:\n return False\n\n s = 0\n d = n - 1\n\n while d % 2 == 0:\n d >>= 1\n s += 1\n\n for i in range(k):\n a = random.randint(2, n - 1)\n if not check(a, s, d, n):\n return False\n return True\n","repo_name":"OpenGenus/cosmos","sub_path":"code/mathematical_algorithms/src/primality_tests/miller_rabin_primality_test/miller_rabin_primality_test.py","file_name":"miller_rabin_primality_test.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":13433,"dataset":"github-code","pt":"12"} +{"seq_id":"15917418764","text":"import numpy as np\nfrom scipy.integrate import cumtrapz\nfrom scipy.optimize import curve_fit\nfrom scipy.interpolate import interp1d\nfrom Source.util import get_first_crossing_time\n\ndef fit_renal_model(aorta_c, kidney_c, temp_res = 3.62, mode='two_compartment'):\n\tif aorta_c.shape[0] != kidney_c.shape[0]:\n\t\traise ValueError('AIF and Kidney signals should have the same length!')\n\tnt = aorta_c.shape[0]\n\tacq_time = np.arange(nt)*temp_res\n\n\tinterp_step = -1\n\tif interp_step > 0:\n\t\tacq_time_ip = np.arange(0, acq_time[-1], interp_step)\n\t\tf_a = interp1d(acq_time, aorta_c, kind='cubic')\n\t\tf_k = interp1d(acq_time, kidney_c, kind='cubic')\n\t\taorta_ip = f_a(acq_time_ip)\n\t\taorta_ip[aorta_ip < 0] = 0\n\t\tkidney_ip = f_k(acq_time_ip)\n\t\tkidney_ip[kidney_ip < 0] = 0\n\telse:\n\t\tacq_time_ip = acq_time\n\t\taorta_ip = aorta_c\n\t\tkidney_ip = kidney_c\n\n\t# Time syc\n\tt0_thresh = 0.10 # threshold for starting time t0\n\t# before_t0 = int(5/interp_step)\n\t# after_t0 = int(90/interp_step)\n\tt0 = int(get_first_crossing_time(aorta_ip.reshape(1,-1), t0_thresh))\n\t# aorta_sync = aorta_ip[(t0-before_t0):(t0+after_t0)]\n\t# kidney_sync = kidney_ip[(t0-before_t0):(t0+after_t0)]\n\t# acq_time_sync = np.arange(0, before_t0+after_t0)*interp_step\n\n\tacq_time_sync = acq_time_ip\n\taorta_sync = aorta_ip\n\tkidney_sync = kidney_ip\n\n\txdata = np.vstack([acq_time_sync.ravel(), aorta_sync.ravel()]).T\n\n\tmode = mode.lower()\n\tif mode == 'two_compartment':\n\t\tif interp_step > 0:\n\t\t\tstep_size = interp_step\n\t\telse:\n\t\t\tstep_size = temp_res\n\t\tt_end = min(t0 + np.floor(300/step_size).astype(np.int), acq_time_sync.shape[0])\n\t\t# print(t_end)\n\t\tacq_time_sync = acq_time_sync[:t_end]\n\t\taorta_sync = aorta_sync[:t_end]\n\t\tkidney_sync = kidney_sync[:t_end]\n\t\txdata = np.vstack([acq_time_sync.ravel(), aorta_sync.ravel()]).T\n\t\tparam_opt, chisqr = two_compartment_fit(acq_time_sync, aorta_sync, kidney_sync, chisqr=True)\n\t\t(C_fit, C_art, C_tub) = two_compartment_detailed(xdata, param_opt[0], param_opt[1], param_opt[2], param_opt[3])\n\telif mode == 'three_compartment':\n\t\tparam_opt, chisqr = three_compartment_fit(acq_time_sync, aorta_sync, kidney_sync, chisqr=True)\n\t\t(C_fit, C_art, C_tub) = three_compartment_detailed(xdata, param_opt[0], param_opt[1], param_opt[2], param_opt[3], param_opt[4])\n\telse:\n\t\tprint('Invalid mode:', mode)\n\t\traise ValueError('Invalid mode:' + mode)\n\t# ktrans = param_opt[0]\n\n\t\n\ty = kidney_sync\n\n\tparam = {}\n\tparam['param_opt'] = param_opt\n\tparam['chisqr'] = chisqr\n\tparam['xdata'] = xdata\n\tparam['y'] = y\n\tparam['mode'] = mode\n\tparam['C_fit'] = C_fit\n\tparam['C_art'] = C_art\n\tparam['C_tub'] = C_tub\n\n\n\treturn param\n\n\ndef two_compartment_detailed(xdata, ktrans_m, vb, delta, t_fwhm):\n\t# xdata is nt-by-2 matrix (first_col = time [s], second_col = Cb [mM])\n\t# Cb is the concentration of contrast in blood (i.e. AIF)\n\t# ktrans - transfer coefficient [1/min]\n\t# vb - blood fraction []\n\t# delta - delay of the Gaussian VIRF [s]\n\t# t_fwhm - full-width-half-maximum of the Gaussian VIRF [s]\n\n\t# if t_fwhm <= 0 or ktrans <= 0 or vb <= 0:\n\t# \treturn np.zeros(xdata.shape[0])\n\t# delta = np.abs(delta)\n\n\tt = xdata[:, 0]\n\tCb_art = xdata[:, 1]\n\n\thct_large = 0.41\n\thct_small = 0.24\n\n\t# Convert Ktrans to s^-1\n\tktrans = np.abs(ktrans_m/60)\n\n\t# Calculate plasma concentration in Aorta\n\tCp_art = Cb_art/(1-hct_large)\n\n\t# Calculate VIRF\n\tfilter_width = 40 # seconds\n\tt_filter = t[t<=filter_width] # limit the length\n\tg = np.exp(-(4*np.log(2))*((t_filter-delta)/t_fwhm)**2)\n\tg_area = np.sum(g)\n\tif g_area > 0:\n\t\tg /= np.sum(g)\n\n\t# Calculate delayed and dispersed AIF\n\tCp_kid = np.convolve(Cp_art, g, mode='full')[:t.shape[0]]\n\n\tCp_kid_integral = cumtrapz(Cp_kid, x=t, initial=0)\n\n\tvd_Cd = ktrans*Cp_kid_integral\n\n\tCt = vb*(1-hct_small)*Cp_kid + vd_Cd\n\n\t# return Ct\n\treturn (Ct, vb*(1-hct_small)*Cp_kid, vd_Cd)\n\n\ndef two_compartment(xdata, ktrans_m, vb, delta, t_fwhm):\n\t# xdata is nt-by-2 matrix (first_col = time [s], second_col = Cb [mM])\n\t# Cb is the concentration of contrast in blood (i.e. AIF)\n\t# ktrans - transfer coefficient [1/min]\n\t# vb - blood fraction []\n\t# delta - delay of the Gaussian VIRF [s]\n\t# t_fwhm - full-width-half-maximum of the Gaussian VIRF [s]\n\n\t# if t_fwhm <= 0 or ktrans <= 0 or vb <= 0:\n\t# \treturn np.zeros(xdata.shape[0])\n\t# delta = np.abs(delta)\n\n\t(C_roi, C_art, C_tub) = two_compartment_detailed(xdata, ktrans_m, vb, delta, t_fwhm)\n\treturn C_roi\n\n\nfrom lmfit import minimize, Parameters, Parameter, report_fit\ndef two_compartment_fit(t, Cb, C_kidney, chisqr=False):\n\tparams = Parameters()\n\t# params.add('ktrans', value=0.3, min=0)\n\t# params.add('vb', value=0.4, min=0)\n\t# params.add('delta', value=1, min=0, max=20)\n\t# params.add('t_fwhm', value=1, min=0.01)\n\n\t# params.add('ktrans', value=0.25, min=0)\n\t# params.add('vb', value=0.4, min=0)\n\t# params.add('delta', value=0, min=0, max=3)\n\t# params.add('t_fwhm', value=1, min=0.01)\n\t\n\tparams.add('ktrans', value=0.66, min=0)\n\tparams.add('vb', value=0.4, min=0)\n\tparams.add('delta', value=0, min=0, max=10)\n\tparams.add('t_fwhm', value=1, min=0.01)\n\n\t# # Single search\n\t# xdata = np.vstack([t.ravel(), Cb.ravel()]).T\n\t# result = minimize(two_compartment_cost_func, params, args=(xdata, C_kidney))\n\n\t# Random search\n\tbest_chisqr = None\n\tbest_result = None\n\tfor i in range(300):\n\t\tparams = Parameters()\n\t\tparams.add('ktrans', value=np.random.randint(10)/10, min=0)\n\t\tparams.add('vb', value=0.4, min=0)\n\t\tparams.add('delta', value=np.random.randint(50), min=0, max=50)\n\t\tparams.add('t_fwhm', value=1, min=0.01)\n\n\t\t# ktrans_fixed = 0.558273053571\n\t\t# params = Parameters()\n\t\t# params.add('ktrans', value=ktrans_fixed, min=ktrans_fixed, max=ktrans_fixed+1e-12)\n\t\t# params.add('vb', value=0.4, min=0)\n\t\t# params.add('delta', value=np.random.randint(50), min=0, max=50)\n\t\t# params.add('t_fwhm', value=1, min=0.01)\n\n\t\txdata = np.vstack([t.ravel(), Cb.ravel()]).T\n\t\tresult = minimize(two_compartment_cost_func, params, args=(xdata, C_kidney))\n\n\t\tif best_chisqr is None or result.chisqr < best_chisqr:\n\t\t\tbest_chisqr = result.chisqr\n\t\t\tbest_result = result\n\tresult = best_result\n\n\n\t# # Grid Search\n\t# ktrans_list = np.linspace(0, 1, num=20)\n\t# vb_list = [0.4]\n\t# delta_list = np.linspace(0, 50, num=20)\n\t# t_fwhm_list = [1]\n\t# best_chisqr = None\n\t# best_result = None\n\t# for ktrans_init in ktrans_list:\n\t# \tfor vb_init in vb_list:\n\t# \t\tfor delta_init in delta_list:\n\t# \t\t\tfor t_fwhm_init in t_fwhm_list:\n\t# \t\t\t\tparams = Parameters()\n\t# \t\t\t\tparams.add('ktrans', value=ktrans_init, min=0)\n\t# \t\t\t\tparams.add('vb', value=vb_init, min=0)\n\t# \t\t\t\tparams.add('delta', value=delta_init, min=0, max=50)\n\t# \t\t\t\tparams.add('t_fwhm', value=t_fwhm_init, min=0.01)\n\n\t# \t\t\t\t# ktrans_fixed = 0.558273053571\n\t# \t\t\t\t# params = Parameters()\n\t# \t\t\t\t# params.add('ktrans', value=ktrans_fixed, min=ktrans_fixed, max=ktrans_fixed+1e-12)\n\t# \t\t\t\t# params.add('vb', value=vb_init, min=0)\n\t# \t\t\t\t# params.add('delta', value=delta_init, min=0, max=50)\n\t# \t\t\t\t# params.add('t_fwhm', value=t_fwhm_init, min=0.01)\n\n\t# \t\t\t\txdata = np.vstack([t.ravel(), Cb.ravel()]).T\n\t# \t\t\t\tresult = minimize(two_compartment_cost_func, params, args=(xdata, C_kidney))\n\n\t# \t\t\t\tif best_chisqr is None or result.chisqr < best_chisqr:\n\t# \t\t\t\t\tbest_chisqr = result.chisqr\n\t# \t\t\t\t\tbest_result = result\n\t# result = best_result\n\t\n\n\tparam_opt = [result.params['ktrans'].value, \n\t\t\t\tresult.params['vb'].value,\n\t\t\t\tresult.params['delta'].value,\n\t\t\t\tresult.params['t_fwhm'].value]\n\t\n\tif chisqr:\n\t\treturn param_opt, result.chisqr\n\treturn param_opt\n\n\ndef two_compartment_cost_func(params, xdata, ydata):\n\tktrans = params['ktrans'].value\n\tvb = params['vb'].value\n\tdelta = params['delta'].value\n\tt_fwhm = params['t_fwhm'].value\n\ty_model = two_compartment(xdata, ktrans, vb, delta, t_fwhm)\n\treturn y_model - ydata\n\n\n\ndef three_compartment_detailed(xdata, ktrans, k12, fa, tau, d):\n\t# xdata is nt-by-2 matrix (first_col = time [s], second_col = Cb [mM])\n\t# Cb is the concentration of contrast in blood (i.e. AIF)\n\t# ktrans - ktrans transfer coefficient [1/min]\n\t# k12 - transfer coefficient [1/min]\n\t# fa - blood fraction []\n\t# tau - delay of the AIF [s]\n\t# d - dispersion of the AIF [s]\n\n\tt = xdata[:, 0]\n\tCb_art = xdata[:, 1]\n\n\thct = 0.41\n\n\t# Convert Ktrans to s^-1\n\tk21 = np.abs(ktrans/60)\n\tk12 = np.abs(k12/60)\n\n\t# Calculate plasma concentration in Aorta\n\tCa = Cb_art/(1-hct)\n\n\t# Apply delay (tau) to AIF\n\tCa_delayed = Ca\n\tif tau > 1e-6:\n\t\tnumpad = np.sum(t < tau)\n\t\toffset = t[numpad] - tau\n\t\t# offset = 0 # DEBUG REMOVE LATER\n\t\tif offset > 0:\n\t\t\tt_offset = t[:-1] + offset\n\t\t\tf_interp = interp1d(t, Ca, kind='linear')\n\t\t\tCa_offset = f_interp(t_offset)\n\t\telse:\n\t\t\tCa_offset = Ca\n\t\tCa_delayed = np.hstack([np.zeros(numpad), Ca_offset])\n\n\n\t# Apply dispersion (d) to delayed AIF\n\tCa_prime = Ca_delayed\n\tif (d > 0):\n\t\tfilter_cutoff = 4*d # seconds (4 time constants)\n\t\tt_filter = t[t<=filter_cutoff] # limit the length\n\t\tg = 1/d*np.exp(-t_filter/d)\n\t\tg_area = np.sum(g)\n\t\tif g_area > 0:\n\t\t\tg /= g_area\n\t\t# Colvolve delayed AIF with dispersion filter\n\t\tCa_prime = np.convolve(Ca_delayed, g, mode='full')[:t.shape[0]]\n\n\t# Apply lossy (k12) integration on the AIF (leakage to the third compartment)\n\tfilter_cutoff = 4/k12 # seconds (4 time constants)\n\tt_filter = t[t<=filter_cutoff] # limit the length\n\tg = np.exp(-t_filter*k12) # exponential decay over time\n\t# g = np.ones(t_filter.shape) # DEBUG REMOVE LATER\n\t# g_area = np.sum(g)\n\t# if g_area > 0:\n\t# \tg /= g_area\n\tCa_prime_integral = (t[1])*np.convolve(Ca_prime, g, mode='full')[:t.shape[0]]\n\n\tC_roi = fa*Ca_prime + k21*Ca_prime_integral\n\n\n\t# Ca_prime = Ca_prime[:t.shape[0]]\n\t# Cp_kid_integral = cumtrapz(Ca_prime, x=t, initial=0)\n\t# vd_Cd = ktrans*Cp_kid_integral\n\t# Ct = fa*(1-hct)*Ca_prime + vd_Cd\n\n\treturn (C_roi, fa*Ca_prime, k21*Ca_prime_integral)\n\t# return Ct\n\n\n\ndef three_compartment(xdata, ktrans, k12, fa, tau, d):\n\t# xdata is nt-by-2 matrix (first_col = time [s], second_col = Cb [mM])\n\t# Cb is the concentration of contrast in blood (i.e. AIF)\n\t# ktrans - ktrans transfer coefficient [1/min]\n\t# k12 - transfer coefficient [1/min]\n\t# fa - blood fraction []\n\t# tau - delay of the AIF [s]\n\t# d - dispersion of the AIF [s]\n\t(C_roi, C_art, C_tub) = three_compartment_detailed(xdata, ktrans, k12, fa, tau, d)\n\treturn C_roi\n\n\ndef three_compartment_fit(t, Cb, C_kidney, chisqr=False):\n\tparams = Parameters()\n\n\t# params.add('ktrans', value=0.9, min=0)\n\t# params.add('k12', value=0.9, min=0)\n\t# params.add('fa', value=0.35, min=0)\n\t# params.add('tau', value=0, min=0, max=10)\n\t# params.add('d', value=5, min=0)\n\n\tparams.add('ktrans', value=0.66, min=0)\n\tparams.add('k12', value=1.6, min=0)\n\tparams.add('fa', value=0.4, min=0)\n\tparams.add('tau', value=2.4, min=0, max=10)\n\tparams.add('d', value=2.5, min=0)\n\t\n\txdata = np.vstack([t.ravel(), Cb.ravel()]).T\n\t# result = minimize(three_compartment_cost_func, params, args=(xdata, C_kidney))\t\n\n\tbest_chisqr = None\n\tbest_result = None\n\tfor i in range(100):\n\t\tparams = Parameters()\n\t\tparams.add('ktrans', value=np.random.randint(10)/10, min=0)\n\t\tparams.add('k12', value=np.random.randint(20)/10, min=0)\n\t\tparams.add('fa', value=0.4, min=0)\n\t\tparams.add('tau', value=np.random.randint(10), min=0, max=10)\n\t\tparams.add('d', value=2.5, min=0)\n\n\t\tresult = minimize(three_compartment_cost_func, params, args=(xdata, C_kidney))\n\n\t\tif best_chisqr is None or result.chisqr < best_chisqr:\n\t\t\tbest_chisqr = result.chisqr\n\t\t\tbest_result = result\n\tresult = best_result\n\n\n\tparam_opt = [result.params['ktrans'].value, \n\t\t\t\tresult.params['k12'].value,\n\t\t\t\tresult.params['fa'].value,\n\t\t\t\tresult.params['tau'].value,\n\t\t\t\tresult.params['d'].value]\n\t\n\tif chisqr:\n\t\treturn param_opt, result.chisqr\n\treturn param_opt\n\n\ndef three_compartment_cost_func(params, xdata, ydata):\n\tktrans = params['ktrans'].value\n\tk12 = params['k12'].value\n\tfa = params['fa'].value\n\ttau = params['tau'].value\n\td = params['d'].value\n\ty_model = three_compartment(xdata, ktrans, k12, fa, tau, d)\n\treturn y_model - ydata\n\n\n\n\n\n# PARTIAL VOLUME MODEL #\nfrom Source.util import SI2C\ndef fit_renal_model_pv(SIaorta, SIkidney, TR, FA, temp_res = 3.62, mode='two_compartment'):\n\tif SIaorta.shape[1] != SIkidney.shape[1]:\n\t\traise ValueError('AIF and Kidney signals should have the same length!')\n\tnt = SIaorta.shape[1]\n\tacq_time = np.arange(nt)*temp_res\n\n\thct_large = 0.41\n\thct_small = 0.24\n\tr1 = 4.5 # s^-1 * mM^-1\n\tT10_blood = 1.4 # s\n\tT10_kidney = 1.2 # s\n\n\tother_params = {}\n\tother_params['hct_large'] = hct_large\n\tother_params['hct_small'] = hct_small\n\tother_params['TR'] = TR\n\tother_params['FA'] = FA\n\tother_params['r1'] = r1\n\tother_params['T10_blood'] = T10_blood\n\tother_params['T10_kidney'] = T10_kidney\n\n\n\t# Partial volume hct correction\n\t# Assume no enhancement in the rbc volume\n\tbaseline_rbc = SIaorta[:,0]*hct_large\n\tSIaorta_plasma = SIaorta - baseline_rbc.reshape(-1,1)\n\n\tbaseline = np.mean(SIaorta_plasma[:,0].astype(np.double))\n\tCp_full = SI2C(SIaorta_plasma.astype(np.double), TR, FA, T10_blood, r1, baseline=baseline)\n\tCp = np.mean(Cp_full, axis=0) # plasma concentration\n\tbaseline_blood = baseline/(1-hct_large) # Average baseline for a full voxel.\n\n\tother_params['baseline_blood'] = baseline_blood\n\n\tmode = mode.lower()\n\tif mode == 'two_compartment':\n\t\txdata = np.vstack([acq_time.ravel(), Cp.ravel()]).T\n\t\tparam_opt, chisqr = two_compartment_fit_pv(acq_time, Cp, SIkidney, other_params, chisqr=True)\n\t\t# (C_fit, C_art, C_tub) = two_compartment_detailed(xdata, param_opt[0], param_opt[1], param_opt[2], param_opt[3])\n\telif mode == 'three_compartment':\n\t\txdata = np.vstack([acq_time.ravel(), Cp.ravel()]).T\n\t\tparam_opt, chisqr = three_compartment_fit(acq_time_sync, aorta_sync, kidney_sync, chisqr=True)\n\t\t(C_fit, C_art, C_tub) = three_compartment_detailed(xdata, param_opt[0], param_opt[1], param_opt[2], param_opt[3], param_opt[4])\n\telse:\n\t\tprint('Invalid mode:', mode)\n\t\treturn -1, -1\n\n\t# ktrans = param_opt[0]\n\t\n\t# y = SIkidney\n\n\tparam = {}\n\tparam['param_opt'] = param_opt\n\tparam['chisqr'] = chisqr\n\tparam['xdata'] = xdata\n\t# param['y'] = y\n\tparam['mode'] = mode\n\t# param['C_fit'] = C_fit\n\t# param['C_art'] = C_art\n\t# param['C_tub'] = C_tub\n\n\n\treturn param\n\n\ndef two_compartment_fit_pv(t, Cp, SIkidney, other_params, chisqr=False):\n\tparams = Parameters()\n\t\n\tparams.add('ktrans', value=0.66, min=0)\n\tparams.add('vp', value=0.4, min=0, max=0.6)\n\tparams.add('delta', value=0, min=0, max=10)\n\tparams.add('t_fwhm', value=1, min=0.01)\n\n\t# best_chisqr = None\n\t# best_result = None\n\t# for i in range(100):\n\t# \tparams = Parameters()\n\t# \tparams.add('ktrans', value=np.random.randint(10)/10, min=0)\n\t# \tparams.add('vb', value=0.4, min=0)\n\t# \tparams.add('delta', value=np.random.randint(10), min=0, max=10)\n\t# \tparams.add('t_fwhm', value=1, min=0.01)\n\n\t# \txdata = np.vstack([t.ravel(), Cb.ravel()]).T\n\t# \tresult = minimize(two_compartment_cost_func, params, args=(xdata, C_kidney))\n\n\t# \tif best_chisqr is None or result.chisqr < best_chisqr:\n\t# \t\tbest_chisqr = result.chisqr\n\t# \t\tbest_result = result\n\t# result = best_result\n\n\txdata = np.vstack([t.ravel(), Cp.ravel()]).T\n\tresult = minimize(two_compartment_cost_func_pv, params, args=(xdata, SIkidney, other_params))\n\n\n\tparam_opt = [result.params['ktrans'].value, \n\t\t\t\tresult.params['vp'].value,\n\t\t\t\tresult.params['delta'].value,\n\t\t\t\tresult.params['t_fwhm'].value]\n\t\n\tif chisqr:\n\t\treturn param_opt, result.chisqr\n\treturn param_opt\n\n\ndef two_compartment_cost_func_pv(params, xdata, SIkidney, other_params):\n\tCd, Ctubule = two_compartment_advanced_pv(params, xdata, SIkidney, other_params)\n\n\treturn Cd - Ctubule\n\n\ndef two_compartment_advanced_pv(params, xdata, SIkidney, other_params):\n\tktrans = params['ktrans'].value\n\tvp = params['vp'].value\n\tdelta = params['delta'].value\n\tt_fwhm = params['t_fwhm'].value\n\tCt, vp_Ckp, vd_Cd = two_compartment_detailed_pv(xdata, ktrans, vp, delta, t_fwhm)\n\n\tbaseline_blood = other_params['baseline_blood']\n\thct_small = other_params['hct_small']\n\tTR = other_params['TR']\n\tFA = other_params['FA']\n\tr1 = other_params['r1']\n\tT10_kidney = other_params['T10_kidney']\n\tT10_blood = other_params['T10_blood']\n\n\tCkp = vp_Ckp/vp\n\tvb = vp/(1-hct_small)\n\tvd = 1-vb\n\tCd = vd_Cd/vd\n\n\t# Calculate signal from arterial compartment\n\tbaseline_rbc = baseline_blood*vb*hct_small\n\tbaseline_plasma = baseline_blood*vb*(1-hct_small)\n\n\tSIrbc = baseline_rbc\n\tSIplasma = C2SI(Ckp, baseline_plasma, T10_blood, TR, FA, r1)\n\tSIblood = SIrbc + SIplasma\n\tSItubule = SIkidney - SIblood.reshape(1,-1)\n\n\tvoxel_mask = np.sum(SItubule <= 0, axis=1) == 0 # discard voxels that have negative signal intensity.\n\n\tbaseline = np.mean(SItubule[voxel_mask, 0].astype(np.double))\n\tCtubule_full = SI2C(SItubule[voxel_mask, :].astype(np.double), TR, FA, T10_kidney, r1, baseline=baseline)\n\tCtubule = np.mean(Ctubule_full, axis=0)*(np.sum(voxel_mask)/voxel_mask.shape[0])\n\n\t# Cd from model, Ctubule from data\n\treturn (Cd, Ctubule)\n\n\ndef two_compartment_detailed_pv(xdata, ktrans_m, vp, delta, t_fwhm):\n\t# xdata is nt-by-2 matrix (first_col = time [s], second_col = Cp [mM])\n\t# Cp is the concentration of contrast in plasma (hct corrected)\n\t# ktrans - transfer coefficient [1/min]\n\t# vp - plasma fraction []\n\t# delta - delay of the Gaussian VIRF [s]\n\t# t_fwhm - full-width-half-maximum of the Gaussian VIRF [s]\n\n\t# if t_fwhm <= 0 or ktrans <= 0 or vb <= 0:\n\t# \treturn np.zeros(xdata.shape[0])\n\t# delta = np.abs(delta)\n\n\tt = xdata[:, 0]\n\tCp_art = xdata[:, 1]\n\n\t# Convert Ktrans to s^-1\n\tktrans = np.abs(ktrans_m/60)\n\n\t# Calculate VIRF\n\tfilter_width = 40 # seconds\n\tt_filter = t[t<=filter_width] # limit the length\n\tg = np.exp(-(4*np.log(2))*((t_filter-delta)/t_fwhm)**2)\n\tg_area = np.sum(g)\n\tif g_area > 0:\n\t\tg /= np.sum(g)\n\n\t# Calculate delayed and dispersed AIF\n\tCp_kid = np.convolve(Cp_art, g, mode='full')[:t.shape[0]]\n\n\tCp_kid_integral = cumtrapz(Cp_kid, x=t, initial=0)\n\n\tvd_Cd = ktrans*Cp_kid_integral\n\n\tvp_Ckp = vp*Cp_kid # kidney plasma concentration\n\n\tCt = vp_Ckp + vd_Cd\n\n\treturn (Ct, vp_Ckp, vd_Cd)\n\n\ndef C2SI(C, SI0, T10, TR, FA_rad, r1):\n\t# C => mM\n # SI0 => unitless (initial signal level)\n # T10 => s\n # TR => s\n # FA_rad => rad\n # r1 => s^-1 * mM^-1\n \n R10 = 1/T10\n R1 = R10 + r1*C\n T1 = 1/R1\n\n S0 = SI0 * (1-np.exp(-TR/T1[0])*np.cos(FA_rad)) / ((1-np.exp(-TR/T1[0]))*np.sin(FA_rad))\n SI = S0 * ((1-np.exp(-TR/T1))*np.sin(FA_rad)) / (1-np.exp(-TR/T1)*np.cos(FA_rad))\n \n return SI\n","repo_name":"umityoruk/renal-segmentation","sub_path":"Source/renalModels.py","file_name":"renalModels.py","file_ext":"py","file_size_in_byte":18182,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"12"} +{"seq_id":"33146301089","text":"import math\nimport random\n\nclass Player:\n def __init__(self, letter):\n #letter is x or o\n self.letter = letter\n def get_move(self, game):\n pass\n\nclass RandomComputerPlayer(Player):\n def __init__(self, letter):\n super().__init__(letter) \n\n def get_move(self, game):\n #rnadom valid sport for next move\n square = random.choice(game.available_moves())\n return square\n\nclass HumanPlayer(Player):\n def __init__(self, letter):\n super().__init__(letter)\n \n def get_move(self, game):\n valid_square = False\n val = None\n while not valid_square:\n square = input(self.letter + '\\'s turn. Input move (0-8):')\n #check value by trying to cast it to an integer\n #if not integer is invalid\n #if that spot isn't available on the board, it's invalid\n try:\n val = int(square)\n if val not in game.available_moves():\n raise ValueError\n valid_square = True\n except ValueError:\n print('Invalid square. Try again.')\n\n return val\nclass UnbeatablePlayer(Player):\n def __init__(self, letter):\n super().__init__(letter)\n\n def get_move(self, game):\n if len(game.available_moves()) == 9:\n square = random.choice(game.available_moves()) #randomly chosen\n else:\n square = self.minimax(game, self.letter)['position']\n return square\n\n def minimax(self, s, player):\n max_player = self.letter\n other_player = 'O' if player == 'X' else 'X'\n\n #check if previous move was winner\n if s.current_winner == other_player:\n return {'position': None,\n 'score': 1 * (s.num_empty_squares() + 1) if other_player == max_player else -1 * (s.num_emptysquares() + 1)\n }\n elif not s.empty_squares():\n return {'position': None, 'score': 0}\n\n #initialize dictionaries\n if player == max_player:\n best = {'position': None, 'score': -math.inf} #each score maximizes\n else:\n best = {'position': None, 'score': math.inf} #each score minimizes \n for possible_move in s.available_moves():\n #1 make a move, try that spot\n s.make_move(possible_move, player)\n\n #2 use minimax to simulate a game after making that move\n sim_score = self.minimax(s, other_player)\n\n #3 undo the move\n s.board[possible_move] = ' '\n s.current_winner = None\n sim_score['position'] = possible_move\n\n #4 update dictionaries if necessary\n if player == max_player:\n if sim_score['score'] > best['score']:\n best = sim_score\n\n else:\n if sim_score['score'] < best['score']:\n best = sim_score\n\n return best\n\n","repo_name":"ArianaBeckford/TicTacToe","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"75089721302","text":"import datetime\nimport numpy as np\nimport os\nimport shutil\nimport torch\n\nfrom torchmed.utils.metric import dice as dc\nfrom torchmed.utils.metric import jaccard, multiclass\n\n\ndef write_config(model, args, train_size, val_size):\n num_params = 0\n for m in model.modules():\n if isinstance(m, torch.nn.Conv2d):\n num_params += m.weight.data.numel()\n\n configfile = os.path.join(args.output_dir, 'config.txt')\n cfg_f = open(configfile, \"a\")\n cfg_f.write('\\ntraining with {} patches\\n'\n 'validating with {} patches\\n'\n .format(train_size * args.batch_size,\n val_size * args.batch_size))\n cfg_f.write(('project: {}\\n' +\n 'number of workers: {}\\n' +\n 'number of epochs: {}\\n' +\n 'starting epoch: {}\\n' +\n 'batch size: {}\\n' +\n 'learning rate: {:.6f}\\n' +\n 'momentum: {:.5f}\\n' +\n 'weight-decay: {:.5f}\\n' +\n 'number of parameters: {}\\n')\n .format(args.exp_id, args.workers,\n args.epochs, args.start_epoch,\n args.batch_size, args.lr,\n args.momentum, args.weight_decay, num_params)\n )\n cfg_f.write('\\nstarted training at {}\\n'.format(datetime.datetime.now()))\n cfg_f.flush()\n\n\ndef write_end_config(args, elapsed_time):\n configfile = os.path.join(args.output_dir, 'config.txt')\n cfg_f = open(configfile, \"a\")\n cfg_f.write('stopped training at {}\\n'.format(datetime.datetime.now()))\n cfg_f.write('elapsed time : {:.2f} hours or {:.2f} days.'\n .format((elapsed_time) / (60 * 60),\n (elapsed_time) / (60 * 60 * 24)))\n cfg_f.flush()\n\n\ndef update_figures(log_plot):\n # plot avg train loss_meter\n log_plot.add_line('cross_entropy', 'average_train.csv', 'epoch', 'cross_entropy_loss', \"#1f77b4\")\n log_plot.add_line('dice', 'average_train.csv', 'epoch', 'dice_loss', \"#ff7f0e\")\n log_plot.plot('losses_train.png', 'epoch', 'loss')\n\n # plot avg validation loss_meter\n log_plot.add_line('cross_entropy', 'average_validation.csv', 'epoch', 'cross_entropy_loss', \"#1f77b4\")\n log_plot.add_line('dice', 'average_validation.csv', 'epoch', 'dice_loss', \"#ff7f0e\")\n log_plot.plot('losses_validation.png', 'epoch', 'loss')\n\n # plot learning rate\n log_plot.add_line('learning_rate', 'learning_rate.csv',\n 'epoch', 'lr', '#1f77b4')\n log_plot.plot('learning_rate.png', 'epoch', 'learning rate')\n\n # plot dice\n log_plot.add_line('train', 'average_train.csv', 'epoch', 'dice_metric', '#1f77b4')\n log_plot.add_line('validation', 'average_validation.csv',\n 'epoch', 'dice_metric', '#ff7f0e')\n log_plot.plot('average_dice.png', 'epoch', 'dice', max_y=1)\n\n # plot iou\n log_plot.add_line('train', 'average_train.csv', 'epoch', 'iou_metric', '#1f77b4')\n log_plot.add_line('validation', 'average_validation.csv',\n 'epoch', 'iou_metric', '#ff7f0e')\n log_plot.plot('average_iou.png', 'epoch', 'iou', max_y=1)\n\n\ndef save_checkpoint(state, is_best, output_dir):\n filename = os.path.join(output_dir, 'checkpoint.pth.tar')\n bestfile = os.path.join(output_dir, 'best_log.txt')\n torch.save(state, filename)\n if is_best:\n bestfile_f = open(bestfile, \"a\")\n bestfile_f.write('epoch:{:>5d} dice:{:>7.4f} IoU:{:>7.4f}\\n'.format(\n state['epoch'], state['dice_metric'], state['iou_metric']))\n bestfile_f.flush()\n shutil.copyfile(filename,\n os.path.join(output_dir, 'model_best_dice.pth.tar'))\n\n\ndef poly_lr_scheduler(optimizer, init_lr, iter, lr_decay_iter=1,\n max_iter=100, power=0.9):\n \"\"\"Polynomial decay of learning rate\n :param init_lr is base learning rate\n :param iter is a current iteration\n :param lr_decay_iter how frequently decay occurs, default is 1\n :param max_iter is number of maximum iterations\n :param power is a polymomial power\n\n \"\"\"\n if iter % lr_decay_iter or iter > max_iter:\n return optimizer\n\n lr = init_lr * (1 - iter / max_iter)**power\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n return lr\n\n\ndef eval_metrics(segmentation, reference):\n results, undec_labels = multiclass(segmentation, reference, [dc, jaccard])\n return list(map(lambda l: sum(l.values()) / len(l), results))\n","repo_name":"trypag/torchmed","sub_path":"examples/01_brain_segmentation_MRI/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"12"} +{"seq_id":"40568807507","text":"# 사탕 가게\n# sol 220915\n# 1차원 knapsack\nfrom collections import defaultdict\n\n\nwhile True:\n N, M = input().split()\n if N=='0': break\n N =int(N)\n M = int(0.05+float(M)*100)\n dp = [0 for _ in range(M+1)]\n \n for i in range(1,N+1):\n c, p = input().split()\n c = int(c)\n p = int(0.05+float(p)*100.00) \n \n for m in range(p,M+1):\n dp[m] = max(dp[m], dp[m-p]+c)\n\n print(max(dp))","repo_name":"gudals113/Algorithms","sub_path":"boj/DP/knapsack/boj-4781.py","file_name":"boj-4781.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29609946388","text":"#!/usr/bin/env python\n\nfrom mininet.wifi.cli import CLI_wifi\nfrom mininet.log import output, error\nfrom cpstwinning.twins import Plc, Motor, Hmi, RfidReaderMqttWiFi\n\n\nclass CpsTwinningCli(CLI_wifi):\n # Override 'mininet' prompt text\n prompt = 'cpstwinning> '\n\n def do_twinning(self, line):\n \"\"\"Starts the twinning process.\n Usage: twinning \n \"\"\"\n args = line.split()\n if len(args) != 1:\n error('Invalid number of args: twinning \\n')\n return\n else:\n self.mn.twinning(args[0])\n\n def do_get_tag(self, line):\n \"\"\"Retrieves a tag from a PLC.\n Usage: get_tag \n \"\"\"\n args = line.split()\n if len(args) != 2:\n error('Invalid number of args: get_tag \\n')\n return\n for node in self.mn.values():\n if node.name == args[0]:\n if isinstance(node, Plc) or isinstance(node, Hmi):\n output(node.get_var_value(args[1]))\n return\n error(\"No PLC or HMI found with name '{}'.\\n\".format(args[0]))\n\n def do_set_tag(self, line):\n \"\"\"Sets a tag in a PLC.\n Usage: set_tag \n \"\"\"\n args = line.split()\n if len(args) != 3:\n error('Invalid number of args: set_tag \\n')\n return\n for node in self.mn.values():\n if node.name == args[0]:\n if isinstance(node, Plc) or isinstance(node, Hmi):\n output(node.set_var_value(args[1], args[2]))\n return\n error(\"No PLC or HMI found with name '{}'.\\n\".format(args[0]))\n\n def do_show_tags(self, line):\n \"\"\"Shows all available tags of a PLC.\n Usage: show_tags \n \"\"\"\n args = line.split()\n if len(args) != 1:\n error('Invalid number of args: show_tags \\n')\n return\n for node in self.mn.values():\n if node.name == args[0]:\n if isinstance(node, Plc) or isinstance(node, Hmi):\n output(node.show_tags())\n return\n error(\"No PLC or HMI found with name '{}'.\\n\".format(args[0]))\n\n def do_stop_plc(self, line):\n \"\"\"Stops a PLC.\n Usage: stop_plc \n \"\"\"\n args = line.split()\n if len(args) != 1:\n error('Invalid number of args: stop_plc \\n')\n return\n\n for node in self.mn.values():\n if node.name == args[0] and isinstance(node, Plc):\n output(node.stop())\n return\n error(\"No PLC found with name '{}'.\\n\".format(args[0]))\n\n def do_start_plc(self, line):\n \"\"\"Starts a PLC.\n Usage: start_plc \n \"\"\"\n args = line.split()\n if len(args) != 1:\n error('Invalid number of args: start_plc \\n')\n return\n\n for node in self.mn.values():\n if node.name == args[0] and isinstance(node, Plc):\n output(node.start())\n return\n error(\"No PLC found with name '{}'.\\n\".format(args[0]))\n\n def do_show_motor_status(self, line):\n \"\"\"Shows a motor's status.\n Usage: show_motor_status \n \"\"\"\n args = line.split()\n if len(args) != 1:\n error('Invalid number of args: show_motor_status \\n')\n return\n devices = getattr(self.mn, 'physical_devices', [])\n for dev in devices:\n if dev.name == args[0] and isinstance(dev, Motor):\n output(dev.get_status())\n return\n error(\"No motor found with name '{}'.\\n\".format(args[0]))\n\n def do_devices(self, _line):\n \"\"\"Lists all devices (motors, pumps etc.).\n Usage: devices\n \"\"\"\n devices = getattr(self.mn, 'physical_devices', [])\n out = ' '.join(str(x) for x in devices) if devices else ''\n output('available devices are: \\n{}\\n'.format(out))\n\n def do_start_replication(self, _line):\n \"\"\"Starts the replication module.\n Usage: start_replication\n \"\"\"\n self.mn.start_replication()\n\n def do_stop_replication(self, _line):\n \"\"\"Stops the replication module.\n Usage: stop_replication\n \"\"\"\n self.mn.stop_replication()\n\n def do_start_viz(self, _line):\n \"\"\"Starts the visualization module.\n Usage: start_viz\n \"\"\"\n self.mn.start_viz()\n\n def do_stop_viz(self, _line):\n \"\"\"Stops the visualization module.\n Usage: stop_viz\n \"\"\"\n self.mn.stop_viz()\n\n def do_rfid_read(self, line):\n \"\"\"Reads a value by a RFID reader.\n Usage: rfid_read \n \"\"\"\n args = line.split()\n if len(args) != 2:\n error('Invalid number of args: nfc_read \\n')\n return\n for node in self.mn.values():\n if node.name == args[0]:\n if isinstance(node, RfidReaderMqttWiFi):\n output(node.read_value(args[1]))\n return\n error(\"No NFC reader found with name '{}'.\\n\".format(args[0]))\n\n def do_start_state_logging(self, _line):\n \"\"\"Starts the visualization module.\n Usage: start_state_logging\n \"\"\"\n self.mn.start_state_logging()\n\n def do_stop_state_logging(self, _line):\n \"\"\"Stops the state logging module.\n Usage: stop_state_logging\n \"\"\"\n self.mn.stop_state_logging()\n","repo_name":"sbaresearch/cps-twinning","sub_path":"cpstwinning/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"12"} +{"seq_id":"44543199112","text":"class Student:\r\n def __init__(self):\r\n self.__att1=None\r\n self.__att2=None\r\n self.__att3=None\r\n self.__att5=None\r\n \r\n def set_attributes(self,student_id,marks,age,course):\r\n self.__att1=student_id\r\n self.__att2=marks\r\n self.__att3=age\r\n self.__att5=course\r\n \r\n def validate_marks(self):\r\n if(self.__att2 >= 65 and self.__att2 <=100):\r\n return True\r\n else:\r\n return False\r\n def validate_age(self):\r\n if(self.__att3 > 20):\r\n return True\r\n else:\r\n return False\r\n def check_qualification(self):\r\n if(self.__att2 >= 65 and self.__att3 >20):\r\n return True\r\n else:\r\n return False\r\n \r\n def choose_course(self):\r\n if self.get_qualification():\r\n \r\n if(self.__att5==1001 and self.__att2>85):\r\n print(25575.0-(25575.0*0.25))\r\n elif(self.__att5==1001):\r\n print(25575.0)\r\n elif(self.__att5==1002 and self.__att2>85):\r\n print(15500-(15500.0*0.25))\r\n elif(self.__att5==1002):\r\n print(15500)\r\n else:\r\n print(\"No course is for you\")\r\n else:\r\n print(\"Invalid\")\r\n def get_marks(self):\r\n if(self.validate_marks()==True):\r\n return True\r\n else:\r\n return False\r\n def get_age(self):\r\n if(self.validate_age()==True):\r\n return True\r\n else:\r\n return False\r\n def get_qualification(self):\r\n if(self.check_qualification()==True):\r\n return True\r\n else:\r\n return False\r\n def get_id(self):\r\n return self.__att1\r\n def course_id(self):\r\n return self.__att5\r\n \r\n \r\n \r\nstudent1 = Student()\r\nstudent1.set_attributes(69,69,1,69)\r\nprint(student1.get_id())\r\nprint(student1.get_marks())\r\nprint(student1.get_age())\r\nprint(student1.get_qualification())\r\nprint(student1.course_id())\r\nstudent1.choose_course()\r\n","repo_name":"360-rks/120-hrs-of-training","sub_path":"Python and Oops(36 hrs)/Day 5/marks_age_qualification_validation_and_course_selection.py","file_name":"marks_age_qualification_validation_and_course_selection.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25574354593","text":"import RPi.GPIO as GPIO\nimport time\n\nencoder_data = 27\nencoder_clock = 22\nencoder_buttom = 17\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\ndef encoder_interrupt(channel):\n print(\"CLK,DT \"+str(GPIO.input(encoder_clock))+\",\"+str(GPIO.input(encoder_data)))\n\nGPIO.setup(encoder_clock, GPIO.IN, GPIO.PUD_UP)\nGPIO.setup(encoder_data, GPIO.IN, GPIO.PUD_UP)\nGPIO.add_event_detect(encoder_clock, GPIO.BOTH, callback=encoder_interrupt) # add rising edge detection on a channel\n\ntry:\n while True:\n time.sleep(1)\nfinally:\n GPIO.cleanup()","repo_name":"analyzerlabs/BottleCounterCV","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38073385876","text":"\"\"\"тест Pylint\"\"\"\nimport subprocess\nimport time\n\nimport psutil\n\n\ndef kill(proc_pid):\n process = psutil.Process(proc_pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n\n\ndef main():\n \"\"\"тест Pylint\"\"\"\n process = []\n\n while True:\n time.sleep(1)\n action = input(\n 'Выберите действие: q - выход, '\n 'x - закрыть все окна, '\n 's - запустить сервер, '\n 'k - запустить клиенты:')\n if action == 's':\n # Запускаем сервер!\n process.append(\n subprocess.Popen(\n 'python server.py', shell=True))\n elif action == 'k':\n print('Убедитесь, что на сервере зарегистрировано необходимо количество (от 2 шт.) '\n 'тестовых клиентов с паролем 123456.')\n print('Первый запуск может быть достаточно долгим из-за генерации ключей!')\n clients_count = int(\n input('Введите количество тестовых клиентов для запуска: '))\n # Запускаем клиентов:\n for i in range(clients_count):\n process.append(\n subprocess.Popen(\n f'python client.py -n test{i + 1} -p 123456', shell=True))\n elif action == 'x':\n while process:\n kill(process.pop().pid)\n print('Все окна закрыты!')\n elif action == 'q':\n print('Работа программы завершена!')\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iTheSand/client_server_applications_on_Python","sub_path":"async_chat/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29715991569","text":"import torch\nimport torch.nn as nn\nimport torchvision\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport os\nimport sys\nimport argparse\nimport time\nimport data\nimport model\nimport numpy as np\n\n\ndef train_val():\n\n\tgenerator = model.Generator().cuda() # Generator\n\tdiscriminatorD = model.DiscriminatorD().cuda() # Real-Fake Discriminator\n\tdiscriminatorA = model.DiscriminatorA().cuda() # Domain Discriminator\n\n\tdataFeeder = data.domainTransferLoader('/home/user/data/lookbook/data')\n\ttrain_loader = torch.utils.data.DataLoader(dataFeeder, batch_size=128, shuffle=True,\n\t\t\t\t\t\t\t\t\t\t\t num_workers=2, pin_memory=True)\n\n\tcriterion = nn.BCEWithLogitsLoss().cuda()\n\n\toptimizerD = torch.optim.Adam(discriminatorD.parameters(), lr=0.0002)\n\toptimizerA = torch.optim.Adam(discriminatorA.parameters(), lr=0.0002)\n\toptimizerG = torch.optim.Adam(generator.parameters(), lr=0.0002)\n\n\tgenerator.train()\n\tdiscriminatorD.train()\n\tdiscriminatorA.train()\n\t\n\tfor epoch in range(10):\n\t\tfor i, (image1, image2, image3) in enumerate(train_loader):\n\n\t\t\tI1_var = image1.to(torch.float32).cuda() #Image of cloth being worn by model in image3\n\t\t\tI2_var = image2.to(torch.float32).cuda() #Image of cloth unassociated with model in image3\n\t\t\tI3_var = image3.to(torch.float32).cuda() #Image of Model\n\t\t\t\n\t\t\treal_label_var = torch.ones((I1_var.shape[0],1), requires_grad=False).cuda()\n\t\t\tfake_label_var = torch.zeros((I1_var.shape[0],1), requires_grad=False).cuda()\t\t\t\n\n\t\t\t# ----------\n\t\t\t# Train DiscriminatorD\n\t\t\t# ----------\n\t\t\t\n\t\t\toptimizerD.zero_grad()\n\n\t\t\tout_associated = discriminatorD(I1_var)\n\t\t\tlossD_real_1 = criterion(out_associated, real_label_var)\n\n\t\t\tout_not_associated = discriminatorD(I2_var)\n\t\t\tlossD_real_2 = criterion(out_not_associated, real_label_var)\n\n\t\t\tfake = generator(I3_var).detach()\n\t\t\tout_fake = discriminatorD(fake)\n\t\t\tlossD_fake = criterion(out_fake, fake_label_var)\n\n\t\t\tlossD = (lossD_real_1 + lossD_real_2 + lossD_fake)/3\n\n\t\t\tlossD.backward()\n\t\t\toptimizerD.step()\n\t\t\t\n\t\t\t# ----------\n\t\t\t# Train DiscriminatorA\n\t\t\t# ----------\n\t\t\t\n\t\t\toptimizerA.zero_grad()\n\n\t\t\tassociated_pair_var = torch.cat((I3_var, I1_var),1)\n\t\t\tnot_associated_pair_var = torch.cat((I3_var, I2_var),1)\n\n\t\t\tfake = generator(I3_var).detach()\n\t\t\tfake_pair_var = torch.cat((I3_var, fake),1)\n\n\t\t\tout_associated = discriminatorA(associated_pair_var)\n\t\t\tlossA_ass = criterion(out_associated, real_label_var)\n\n\t\t\tout_not_associated = discriminatorA(not_associated_pair_var)\n\t\t\tlossA_not_ass = criterion(out_not_associated, fake_label_var)\n\n\t\t\tout_fake = discriminatorA(fake_pair_var)\n\t\t\tlossA_fake = criterion(out_fake, fake_label_var)\n\n\t\t\tlossA = (lossA_ass + lossA_not_ass + lossA_fake)/3\n\n\t\t\tlossA.backward()\n\t\t\toptimizerA.step()\n\n\t\t\t# ----------\n\t\t\t# Train Generator\n\t\t\t# ----------\n\t\t\t\n\t\t\toptimizerG.zero_grad()\n\t\t\t\n\t\t\tfake = generator(I3_var)\n\t\t\toutputD = discriminatorD(fake)\n\t\t\tlossGD = criterion(outputD,real_label_var)\n\n\t\t\tfake_pair_var = torch.cat((I3_var, fake),1)\n\t\t\toutputA = discriminatorA(fake_pair_var)\n\t\t\tlossGA = criterion(outputA,real_label_var)\n\n\t\t\tlossG = (lossGD + lossGA)/2\n\n\t\t\tlossG.backward()\n\t\t\toptimizerG.step()\n\n\t\t\tif((i+1) % 10) == 0:\n\t\t\t\tprint(\"Iter:\", i+1, \"/\", len(train_loader))\n\t\t\t\tprint(\"LossG:\", lossG.item(), \"LossD:\", lossD.item(), \"LossA:\", lossA.item())\n\t\t\tif((i+1) % 100) == 0:\n\t\t\t\ttorchvision.utils.save_image((fake+1)/2, 'samples/'+str(i+1)+'.jpg')\n\n\n\n\nif __name__ == '__main__':\n\tos.system('mkdir -p samples')\n\ttrain_val()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"MayankSingal/PyTorch-Pixel-Level-Domain-Transfer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15848654214","text":"# 字符串类型和字节类型转换过程\n# 字符串类型转换为字节类型:编码,encode\n#\n# ​\t 字节类型转换为字符串类型:解码,decode\n\ns1 = b\"hello\" # bytes\ns2 = \"hello\" # str\nprint( type(s1),type(s2))\n\n# python转字节字符串\nres = s2.encode(\"utf-8\") # 字符转字节\nprint(type(res),res)\n\n# 字节字符串转python字符串\nres = s1.decode(\"utf-8\")\nprint( type(res),res) # 字节转字符\n\n# 二进制文件操作\nwith open(\"04.txt\",\"wb\") as fp:\n\tfp.write(\"中国\".encode(\"utf-8\"))\n\nwith open(\"04.txt\",\"rb\") as fp:\n\tdata = fp.read()\n\tprint(len(data),data)\n\tprint(data.decode('utf-8'))","repo_name":"zaoyuaner/Learning-materials","sub_path":"python1812/python_1/16_异常_文件读写/代码/14_字符串的编解码.py","file_name":"14_字符串的编解码.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"31451240713","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclasses = ['P', 'N']\nconfusion_matrix = np.array([(9, 1, ), (2, 13, ),],dtype=np.int)\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.cm.Oranges) # 按照像素显示出矩阵\nplt.colorbar()\ntick_marks = np.arange(len(classes))\nplt.xticks(tick_marks, classes)\nplt.yticks(tick_marks, classes)\nthresh = confusion_matrix.max() / 2.\nfontsize = 15\n\niters = np.reshape([[[i, j] for j in range(2)] for i in range(2)], (confusion_matrix.size, 2))\nfor i, j in iters:\n plt.text(j, i, format(confusion_matrix[i, j]),fontsize=fontsize) # 显示对应的数字\n\nplt.ylabel('真实结果',fontsize=fontsize)\nplt.xlabel('预测结果',fontsize=fontsize)\nplt.title('混淆矩阵',fontsize=15)\nplt.tight_layout()\nplt.show()","repo_name":"jm199504/Financial-Time-Series","sub_path":"Financial-Time-Others/code/confuse_matrix.py","file_name":"confuse_matrix.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"12"} +{"seq_id":"5690805328","text":"import re, random\nfrom typing import Optional\nimport ast\nfrom fastapi import (\n FastAPI,\n Request,\n Response,\n Cookie,\n WebSocket,\n Form,\n WebSocketDisconnect,\n)\nfrom fastapi.staticfiles import StaticFiles\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.responses import RedirectResponse\n\nfrom utils.ConnectionManager import ConnectionManager\nfrom utils.Message import Message\nfrom utils.Ratelimits import RatelimitManager\n\n\nVALIDURL = re.compile(r\"^([a-z\\-_0-9\\/\\:\\.]*\\.(jpg|jpeg|png|gif|webp))\", re.IGNORECASE)\nURL = re.compile(\n \"(https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s]{2,})\",\n re.IGNORECASE,\n)\nDEFAULT_PFP = \"https://cdn.discordapp.com/attachments/830269354649452564/863169775646670888/unknown.png\"\nCOLORS = (\"purple\", \"pink\", \"red\", \"yellow\", \"green\", \"blue\", \"indigo\")\nSLOWDOWN = \"Slow Down! 5s (Only you can see this message)\"\nignore = []\n\napp = FastAPI()\napp.mount(\"/src/static\", StaticFiles(directory=\"src/static\"), name=\"static\")\ntemplates = Jinja2Templates(directory=\"src/templates\")\n\nmanager = ConnectionManager()\n\nchatlimiter = RatelimitManager(rate=5, per=5.0)\nratelimiter = RatelimitManager(rate=1, per=5.0)\n\n\ndef get_response(\n template: str,\n request: Request,\n response: Response,\n data: dict = {},\n) -> Response:\n data[\"request\"] = request\n res = templates.TemplateResponse(template, data)\n\n return res\n\n\n@app.get(\"/\")\nasync def read_root(\n request: Request, response: Response, group: Optional[str] = \"\", error: str = \"\"\n):\n return get_response(\n \"index.html\", request, response, {error: \"border-red-500\", \"group_id\": group}\n )\n\n\n@app.get(\"/newuser\")\nasync def new_user(\n request: Request,\n response: Response,\n username: str = \"\",\n avatar: str = \"\",\n color: str = \"\",\n group: str = None,\n):\n if ratelimiter.check_ratelimit(str(request.client.host)):\n r = f\"&group={group}\" if group else \"\"\n if (username.strip() == \"\") or (not username.isalnum()) or (len(username) > 24):\n return RedirectResponse(f\"/?error=username{r}\")\n if avatar and not VALIDURL.match(avatar):\n return RedirectResponse(f\"/?error=avatar{r}\")\n if color not in COLORS:\n color = random.choice(COLORS)\n\n if group and not manager.group_exists(group):\n return RedirectResponse(f\"/?error=group\")\n client_uuid, group_id = manager.wait_user(\n username=username,\n avatar=avatar or DEFAULT_PFP,\n color=color,\n group_id=group,\n )\n\n return RedirectResponse(f\"/chat?user={client_uuid}&group={group_id}\")\n else:\n return RedirectResponse(\"/?error=limited\")\n\n\n@app.get(\"/chat\")\nasync def chat(request: Request, response: Response, user: str = \"\"):\n if not manager.is_waiting(user):\n return RedirectResponse(\"/?error=failed\")\n\n user = manager.waiting_users.get(user)\n return get_response(\n \"chat.html\",\n request,\n response,\n {\n \"username\": user.username,\n \"avatar\": user.avatar,\n \"color\": user.color,\n \"default_avatar\": DEFAULT_PFP,\n },\n )\n\n\n@app.websocket(\"/ws/{group_id}/{client_uuid}\")\nasync def websocket_endpoint(websocket: WebSocket, group_id: str, client_uuid: str):\n if (not manager.group_exists(group_id)) or (not manager.is_waiting(client_uuid)):\n return\n\n await manager.connect(client_uuid, websocket)\n try:\n await manager.broadcast(\n client_uuid, group_id, f\"just joined the chat!\", Message.EVENT\n )\n while True:\n data = await websocket.receive_text()\n if data.strip() != \"\":\n if chatlimiter.check_ratelimit(client_uuid):\n await manager.broadcast(\n client_uuid, group_id, data, Message.MESSAGE\n )\n if client_uuid in ignore:\n ignore.remove(client_uuid)\n else:\n if client_uuid not in ignore:\n ignore.append(client_uuid)\n await manager.send_ephemeral_event(client_uuid, SLOWDOWN)\n\n except WebSocketDisconnect:\n user = await manager.disconnect(client_uuid, group_id, websocket)\n if manager.group_exists(group_id):\n await manager.broadcast(\n \"_\", group_id, f\"{user.username} has left the chat\", Message.EVENT\n )\n","repo_name":"nathanielfernandes/Ephemeral-Chat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4627,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"12"} +{"seq_id":"40597891693","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('server', '0003_error'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='error',\n old_name='errorid',\n new_name='error_id',\n ),\n ]\n","repo_name":"PerminovMA/slide_lock","sub_path":"slide_server/server/migrations/0004_auto_20151121_1800.py","file_name":"0004_auto_20151121_1800.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2737613046","text":"import pandas as pd\nimport xlwings as xw\nimport numpy as np\npd.set_option('display.precision',100)\n\ncounter = 0\nlist1 = []\nlist2 = []\nlist3 = []\n\n\nPATH = 'F:\\Programmieren\\mdm.xlsx'\nwb = xw.Book(PATH)\nsheetGeräte = wb.sheets['Geräte Pool-Vergabe']\nsheetBank = wb.sheets['MDM Bank']\nsheetService = wb.sheets['MDM Service']\n\n#print(type(sheetGeräte))\n\n\ndf = sheetGeräte['A1:T1520'].options(pd.DataFrame, index=True, header = True).value\naf = sheetBank['A8:D1256'].options(pd.DataFrame, index=True, header = True).value\n\ndf = df.set_index([\"Nachname\"])\n\ndf = df.drop(\"Pool\",0)\n\ndf = df.reset_index()\n\npf = df.loc[:, \"Nachname\"]\ncf = df.loc[:, 'IMEI-Nummer']\ngf = af.loc[:, 'IMEI']\n\ndef search(list1):\n for item in list1:\n if '354200072240011' != item:\n print('nicht Vorhanden')\n\n\n\ndef saveAsExcel(list, Name):\n\n name = pd.DataFrame(list)\n\n writer = pd.ExcelWriter(Name, engine='xlsxwriter')\n\n name.to_excel(writer, sheet_name='Sheet1')\n\n writer.save()\n\ndef checkImei(cf,gf):\n global counter\n for rowD in cf:\n for rowA in gf:\n if (rowD == rowA and len(str(rowD)) > 5):\n list1.append(rowD)\n counter += 1\n c = np.array(list1)\n return c\n\n\ndef nameToList(pf):\n for row in pf:\n list2.append(row)\n c = np.array(list2)\n return c\n\ndef imeiToList(cf):\n for row in cf:\n list3.append(row)\n c = np.array(list3)\n return c\n\ndef check(list):\n for index, row in enumerate(list):\n for index, cell in enumerate(row):\n print(cell)\nNamen = []\n\n#print(len(checkImei(cf,gf)))\nnameToList(pf)\nimeiToList(cf)\nlist4 = np.array([list(a) for a in zip(list3, list2)])\n\n\nprint(list4)\n","repo_name":"ChrisPasda/crispyclean","sub_path":"Excel-Datenbank_Vergleich/MDM.py","file_name":"MDM.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5690221776","text":"# 특정한 합을 가지는 부분 연속 수열 찾기\n## 입력값 : 길이가 N인 리스트, 출력값 : M의 합을 갖는 부분 수열\nimport sys\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nlist = list(map(int, input().split()))\n\nanswer = 0\ninterval_sum = 0\nend = 0\n\nfor start in range(N):\n\n # 부분 수열의 합이 M보다 작거나, end가 N을 넘지 않을 때,\n # interval_sum에 end좌표의 데이터를 누산하고\n # end를 1 이동시켜준다.\n while interval_sum < M and end < N:\n interval_sum += list[end]\n end += 1\n \n # 부분 수열의 합이 M과 같다면 answer + 1\n if interval_sum == M:\n answer +=1\n \n # 모든 연산이 끝나면, interval_sum이 M보다 크거나 같다면\n ## start를 이동해야 하므로 interval_sum에서 start좌표의 \n interval_sum -= list[start]\n\nprint(answer)","repo_name":"7unho/TIL_algo_solution","sub_path":"src/BOJ/python/2Pointers_test.py","file_name":"2Pointers_test.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15280327153","text":"# -*- coding: utf-8 -*-\n\nimport urllib.request\nimport json\n\n# データを web から取ってくる\nsrc = 'http://weather.livedoor.com/forecast/webservice/json/v1?city=400040'\nf = urllib.request.urlopen(src)\nstrdat = f.read()\nf.close()\n\n# JSON レコードの解析\nq = json.loads(strdat)\n\n# とりあえず採ってきたデータのタイトルを表示\nprint(q['title'])\n","repo_name":"KyosukeKamoda/Pr1","sub_path":"weather1.py","file_name":"weather1.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15137004128","text":"# Python imports :\nimport sys, os, bpy, socket, shutil\nfrom importlib import import_module\nfrom os.path import dirname, join, realpath, abspath, exists\nfrom subprocess import call\n\n\n#############################################################\ndef ShowMessageBox(message=[], title=\"INFO\", icon=\"INFO\"):\n def draw(self, context):\n for txtLine in message:\n self.layout.label(text=txtLine)\n\n bpy.context.window_manager.popup_menu(draw, title=title, icon=icon)\n\n#############################################################\ndef isConnected():\n try:\n sock = socket.create_connection((\"www.google.com\", 80))\n if sock is not None:\n print(\"Clossing socket\")\n sock.close\n return True\n except OSError:\n pass\n return False\n \n#############################################################\ndef ImportReq(REQ_DICT):\n Pkgs = []\n for mod, pkg in REQ_DICT.items():\n try:\n import_module(mod)\n except ImportError:\n Pkgs.append(pkg)\n\n return Pkgs\n#############################################################\ndef ReqInternetInstall(path, modules):\n # Download and install requirement if not AddonPacked version:\n if sys.platform in ['darwin', 'linux'] :\n\n PythonPath = join(sys.base_exec_prefix, \"bin\", \"python3.7m\")\n\n call(\n f'\"{PythonPath}\" -m ensurepip ',\n shell=True)\n\n call(\n f'\"{PythonPath}\" -m pip install -U pip==21.0 ',\n shell=True)\n\n for module in modules:\n command = f' \"{PythonPath}\" -m pip install {module} --target \"{path}\" '\n call(command, shell=True)\n\n if sys.platform in ['win32'] :\n\n PythonBin = dirname(sys.executable)\n\n call(\n f'cd \"{PythonBin}\" && \".\\\\python.exe\" -m ensurepip ',\n shell=True)\n\n call(\n f'cd \"{PythonBin}\" && \".\\\\python.exe\" -m pip install -U pip==21.0 ',\n shell=True)\n\n for module in modules:\n command = f'cd \"{PythonBin}\" && \".\\\\python.exe\" -m pip install \"{module}\" --target \"{path}\" '\n call(command, shell=True)\n\n#############################################################\ndef ReqInstall(REQ_DICT, REQ_ARCHIVE, REQ_DIR):\n \n Pkgs = list(REQ_DICT.values())\n \n if exists(REQ_ARCHIVE):\n\n shutil.unpack_archive(REQ_ARCHIVE, REQ_DIR)\n os.remove(REQ_ARCHIVE)\n\n print(\"Requirements installed from ARCHIVE!\")\n print(\"Please Restart Blender\")\n message = [\"Required Modules installation completed! \",\n \"Please Restart Blender\"]\n ShowMessageBox(message=message, icon=\"COLORSET_03_VEC\")\n \n else :\n if isConnected():\n \n ReqInternetInstall(path=REQ_DIR, modules=Pkgs)\n\n ##########################\n print(\"requirements Internet installation completed.\")\n print(\"Please Restart Blender\")\n message = [\"Required Modules installation completed! \",\n \"Please Restart Blender\"]\n ShowMessageBox(message=message, icon=\"COLORSET_03_VEC\")\n\n else :\n message = [\"Please Check Internet Connexion and retry! \"]\n ShowMessageBox(message=message, icon=\"COLORSET_02_VEC\")\n print(message)\n\n \n#############################################################\n# Install Requirements Operators :\n#############################################################\n\nclass BDENTAL_OT_InstallRequirements(bpy.types.Operator):\n \"\"\" Requirement installer \"\"\"\n\n bl_idname = \"bdental.installreq\"\n bl_label = \"INSTALL BDENTAL MODULES\"\n\n def execute(self, context):\n\n REQ_DICT = {\n \"SimpleITK\": \"SimpleITK==2.0.2\",\n \"vtk\": \"vtk==9.0.1\",\n \"cv2\": \"opencv-contrib-python==4.4.0.46\", \n }\n ADDON_DIR = dirname(dirname(abspath(__file__)))\n REQ_DIR = join(ADDON_DIR, \"Resources\", \"Requirements\")\n\n if sys.platform == 'darwin' :\n REQ_ARCHIVE = join(REQ_DIR, \"BDENTAL_REQ_MAC.tar.xz\")\n if sys.platform == 'linux' :\n REQ_ARCHIVE = join(REQ_DIR, \"BDENTAL_REQ_LINUX.tar.xz\")\n if sys.platform == 'win32' :\n REQ_ARCHIVE = join(REQ_DIR, \"BDENTAL_REQ_WIN.zip\")\n\n ReqInstall(REQ_DICT, REQ_ARCHIVE, REQ_DIR)\n\n return {\"FINISHED\"}\n\nclass BDENTAL_PT_InstallReqPanel(bpy.types.Panel):\n \"\"\" Install Req Panel\"\"\"\n\n bl_idname = \"BDENTAL_PT_InstallReqPanel\"\n bl_space_type = \"VIEW_3D\"\n bl_region_type = \"UI\" # blender 2.7 and lower = TOOLS\n bl_category = \"BDENTAL\"\n bl_label = \"BDENTAL\"\n # bl_options = {\"DEFAULT_CLOSED\"}\n\n def draw(self, context):\n layout = self.layout\n row = layout.row()\n row.operator(\"bdental.installreq\")\n \n\n#################################################################################################\n# Registration :\n#################################################################################################\n\nclasses = [\n BDENTAL_OT_InstallRequirements,\n BDENTAL_PT_InstallReqPanel,\n]\n\n\ndef register():\n\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\ndef unregister():\n\n for cls in reversed(classes):\n bpy.utils.unregister_class(cls)\n","repo_name":"issamdakir/BDENTAL","sub_path":"Operators/BDENTAL_InstallReq.py","file_name":"BDENTAL_InstallReq.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"12"} +{"seq_id":"40202251874","text":"import sys\nsys.path.append(\"lib\")\nimport thinknode_worker as thinknode\nimport dosimetry_worker as dosimetry\nimport dicom_worker as dicom\nimport decimal_logging as dl\nimport rt_types as rt_types\nimport vtk_worker as vtk\nimport thinknode_id as tn_id\n\n\niam = thinknode.authenticate(thinknode.read_config('thinknode.cfg'))\n\n\ndef dose_to_vtk(dose_id):\n\timg_data = thinknode.get_immutable(iam, 'dicom', dose_id, False)\n\n\timg = rt_types.image_3d()\n\timg.from_json(img_data)\n\timg2 = img.expand_data()\n\n\tvtk.write_vtk_image3('dose_pbs.vtk', img2)\n\ndef run():\n\tstudy_id = dicom.make_rt_study_from_dir(iam, 'C:/Users/abrown/data/proton/prostate')\n\n\tstudy_calc = \\\n\t\tthinknode.function(iam[\"account_name\"], 'dicom', \"merge_ct_image_slices\",\n\t\t\t[\n\t\t\t\tthinknode.reference(study_id)\n\t\t\t])\n\tstudy_id = thinknode.post_calculation(iam, study_calc)\n\t# study_id = tn_id.prostate_patient_study()\n\n\tbeam_index = 0\n\tpbs_machine = tn_id.pbs_machine_procure()\n\tbeam_id = dicom.get_beam_from_study(iam, study_id, beam_index)\n\tspots = dicom.get_spots_from_beam(iam, beam_id)\n\n\t# Dose calc data\n\tfluences = dicom.get_fluences_from_beam(iam, beam_id)\n\tfluences_res = thinknode.get_immutable(iam, 'dosimetry', fluences)\n\n\tfluences_res2 = []\n\tfor f in fluences_res:\n\t\tfluences_res2.append(1.0)\n\n\tprint(\"fluences: \" + fluences)\n\tstopping_img = dicom.get_stopping_power_img(iam, study_id)\n\tdose_grid = dosimetry.get_dose_grid(iam, stopping_img, 4.0)\n\tbeam_geometry = dicom.get_beam_geometry(iam, study_id, beam_index)\n\tbixel_grid = dosimetry.get_pbs_bixel_grid(iam, spots, 2.0)\n\tlayers = dicom.get_pbs_layers(iam, pbs_machine, spots, beam_id)\n\n\tcalc = \\\n\t\tthinknode.function(iam[\"account_name\"], \"dosimetry\", \"compute_pbs_pb_dose_to_grid\",\n\t\t\t[\n\t\t\t\tthinknode.reference(fluences),\n\t\t\t\tthinknode.reference(stopping_img),\n\t\t\t\tthinknode.reference(dose_grid),\n\t\t\t\tthinknode.reference(beam_geometry),\n\t\t\t\tthinknode.reference(bixel_grid),\n\t\t\t\tthinknode.reference(layers),\n\t\t\t\tthinknode.none,\n\t\t\t\tthinknode.value([])\n\t\t\t])\n\n\t# dl.debug('Dose Calc Command: ' + str(calc))\n\n\t# Perform pbs dose calculation request\n\tres = thinknode.do_calculation(iam, calc, False)\n\t# Write dose results to vtk image for viewing in Paraview\n\tdose_to_vtk(res)\t\n\tdl.event(\"Done!\")\t\n\n# Work is performed from here:\nrun()","repo_name":"dotdecimal/astroid-script-library","sub_path":"python/pbs_dose_calc_from_dicom.py","file_name":"pbs_dose_calc_from_dicom.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"26299309679","text":"from IMLearn.learners.regressors import LinearRegression\nfrom IMLearn.learners.regressors import PolynomialFitting\nfrom IMLearn.utils import split_train_test\n\nimport numpy as np\nimport pandas as pd\nimport plotly.express as px\nimport plotly.io as pio\nimport plotly.graph_objects as go\n\npio.templates.default = \"simple_white\"\n\n\ndef load_data(filename: str) -> (pd.DataFrame, pd.Series):\n \"\"\"\n Load city daily temperature dataset and preprocess data.\n Parameters\n ----------\n filename: str\n Path to house prices dataset\n\n Returns\n -------\n Design matrix and response vector (Temp)\n \"\"\"\n full_data = pd.read_csv(filename, parse_dates=[\"Date\"]). \\\n drop_duplicates().dropna()\n full_data = full_data[full_data[\"Temp\"] > -70]\n features = full_data[[\"Country\",\n \"City\",\n # \"Date\",\n \"Year\",\n \"Month\",\n # \"Day\",\n # \"Temp\"\n ]]\n features[\"DayOfYear\"] = full_data[\"Date\"].dt.dayofyear\n # features = pd.get_dummies(data=features, columns=[\"Country\"], drop_first=True)\n # features = pd.get_dummies(data=features, columns=[\"City\"], drop_first=True)\n\n labels = full_data[\"Temp\"]\n\n return features, labels\n\n\ndef explore_data_for_country(X, y, country):\n X_Israel, y_Israel = get_samples_for_country(X, y, country)\n X_Israel[\"Temp\"] = y_Israel\n X_Israel[\"Year\"] = X_Israel[\"Year\"].map(str)\n fig = px.scatter(X_Israel, x=\"DayOfYear\", y=\"Temp\", color=\"Year\")\n fig.show()\n\n X_Israel[\"Temp\"] = y_Israel\n temp_std_by_month = X_Israel.groupby(\"Month\").agg('std')[\"Temp\"]\n plot = go.Figure(data=[go.Bar(x=list(range(12)), y=temp_std_by_month)])\n plot.update_layout(\n title=\"Month vs. Standard Deviation of Temperature in Israel\",\n xaxis_title=\"Month\",\n yaxis_title=\"Standard Deviation of Temperature\",\n )\n plot.show()\n\n\ndef get_samples_for_country(X, y, country):\n X_c = X[X[\"Country\"] == country]\n y_c = y[X[\"Country\"] == country]\n return X_c, y_c\n\n\ndef explore_data_all_countries(X, y):\n df = X.copy()\n df[\"Temp\"] = y.copy()\n df_std_dev = df.groupby([\"Country\", \"Month\"]).std().reset_index()\n df = df.groupby([\"Country\", \"Month\"]).mean().reset_index()\n fig = px.line(df, x=\"Month\", y=\"Temp\", error_y=df_std_dev[\"Temp\"],\n color=\"Country\")\n fig.show()\n\n\nif __name__ == '__main__':\n np.random.seed(0)\n # Question 1 - Load and preprocessing of city temperature dataset\n X, y = load_data(\"..\\datasets\\City_Temperature.csv\")\n\n # Question 2 - Exploring data for specific country\n explore_data_for_country(X, y, \"Israel\")\n\n # Question 3 - Exploring differences between countries\n explore_data_all_countries(X, y)\n\n # Question 4 - Fitting model for different values of `k`\n # Get samples from Israel\n X_Israel, y_Israel = get_samples_for_country(X, y, \"Israel\")\n\n # Split to train and test\n train_X, train_y, test_X, test_y = split_train_test(X_Israel, y_Israel)\n train_X = train_X[\"DayOfYear\"]\n test_X = test_X[\"DayOfYear\"]\n\n # Fit regression\n loss_k = np.zeros(10)\n for k in range(10):\n # Create and train model\n pr = PolynomialFitting(k + 1)\n pr.fit(train_X.to_numpy(), train_y.to_numpy())\n\n # Calculate loss\n loss = pr.loss(test_X.to_numpy(), test_y.to_numpy())\n\n # Print and save loss\n print(k + 1, \": \", round(loss, 2))\n loss_k[k] = round(loss, 2)\n\n # Draw loss graph\n plot = go.Figure(data=[go.Bar(x=list(range(1, 11)), y=loss_k)])\n plot.update_layout(\n title=\"Loss vs. Degree for Polynomial Fit\",\n xaxis_title=\"Degree for Polynomial Fit\",\n yaxis_title=\"Loss\",\n )\n plot.show()\n\n # Question 5 - Evaluating fitted model on different countries\n # Fit model\n pr = PolynomialFitting(5)\n pr.fit(X_Israel[\"DayOfYear\"].to_numpy(), y_Israel.to_numpy())\n\n # Check loss for each country\n Countries = [\"The Netherlands\", \"South Africa\", \"Jordan\"]\n loss_countries = np.zeros(len(Countries))\n for i in range(len(Countries)):\n # Get samples for country c\n X_c, y_c = get_samples_for_country(X, y, Countries[i])\n X_c = X_c[\"DayOfYear\"]\n \n # Calculate loss\n loss_countries[i] = pr.loss(X_c.to_numpy(), y_c.to_numpy())\n\n # Create graph\n plot = go.Figure(data=[go.Bar(x=Countries, y=loss_countries)])\n plot.update_layout(\n title=\"Countries vs. Loss\",\n xaxis_title=\"Countries\",\n yaxis_title=\"Loss\",\n )\n plot.show()\n\n","repo_name":"inbarmada/IML.HUJI","sub_path":"exercises/city_temperature_prediction.py","file_name":"city_temperature_prediction.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"12"} +{"seq_id":"19839869084","text":"import sys, collections\nfrom .settings import Settings\n\nclass Names(Settings):\n '''Manage the names of compiled LAF data items.\n\n Data items are stored in a dictionary with keys that tell a lot about the kind of data stored under that key.\n Keys have the following format::\n\n origin group kind direction ( item )\n\n and **item** is a comma separated list of a variable number of components, possibly zero.\n\n **Group**:\n\n * ``P``: primary data items,\n * ``G``: items for regions, nodes, edges, \n * ``X``: xml identifiers,\n * ``F``: features,\n * ``C``: connectivity,\n * ``T``: temporary during compiling.\n\n **Origin**: ``m`` or ``a`` meaning *main* and *annox* resp. Indicates the source data.\n The value ``z`` indicates that this data is not prepared by Laf-Fabric but by auxiliary modules.\n\n **Kind**: ``n`` or ``e`` meaning *node* and *edge* resp.\n\n **Direction**: ``f`` or ``b`` meaning *forward* and *backward* resp.\n\n The direction can mean the direction in which edges are followed, or the direction in which a mapping goes.\n\n **Components**:\n\n Features are items, with three components: (*namespace*, *label*, *name*).\n\n In group ``P``, ``G``, ``T`` there are one-component items, such as (``edges_from``,) and (``edges_to``).\n\n In group ``X`` there is only one item, and it has no components: ().\n\n For each data item we have to know the conditions under which it has to be loaded and its data type.\n\n The **condition** is a key in a dictionary of conditions.\n The loader determines the condition dictionary by filling in its slots with relevant components.\n \n The **data type** is either array, or dict, or string.\n\n **Class methods**\n The class methods ``comp`` and ``decomp`` and ``decompfull`` take care of the composition and decomposition of keys in meaningful bits.\n\n **Instance data and methods**\n The instance data contains a list of datakeys, adapted to the present environment, which is based\n on the source, annox-es and task chosen by the user.\n The previous list is also remembered, so that the loader can load/unload the difference.\n\n The instance method ``request_files`` determines the difference between previously and presently requested data items.\n It uses an instance method ``dinfo`` that provides all relevant information associated with a datakey,\n including the location and name of the corresponding data file on disk. This method is an instance method because it \n needs values from the current environment.\n '''\n _data_items_tpl = (( \n ('mP00 node_anchor', (False, 'arr')),\n ('mP00 node_anchor_items', (False, 'arr')),\n ('mG00 node_anchor_min', (True, 'arr')),\n ('mG00 node_anchor_max', (True, 'arr')),\n ('mP00 node_events', (False, 'arr')),\n ('mP00 node_events_items', (False, 'arr')),\n ('mP00 node_events_k', (False, 'arr')),\n ('mP00 node_events_n', (False, 'arr')),\n ('mG00 node_sort', (True, 'arr')),\n ('mG00 node_sort_inv', (True, 'dct')),\n ('mG00 edges_from', (True, 'arr')),\n ('mG00 edges_to', (True, 'arr')),\n ('mP00 primary_data', (False, 'str')),\n ('mXnf', ([], 'dct')),\n ('mXef', ([], 'dct')),\n ('mXnb', ([], 'dct')),\n ('mXeb', ([], 'dct')),\n ('mFn0', ([], 'dct')),\n ('mFe0', ([], 'dct')),\n ('mC0f', ([], 'dct')),\n ('mC0b', ([], 'dct')),\n ('zG00 node_sort', (None, 'arr')),\n ('zG00 node_sort_inv', (None, 'dct')),\n ('zL00 node_up', (None, 'dct')),\n ('zL00 node_down', (None, 'dct')),\n ('zV00 verses', (None, 'dct')),\n ('zV00 books_la', (None, 'dct')),\n ))\n _data_items_tpl_a = ((\n ('Xnf', ([], 'dct')),\n ('Xef', ([], 'dct')),\n ('Xnb', ([], 'dct')),\n ('Xeb', ([], 'dct')),\n ('Fn0', ([], 'dct')),\n ('Fe0', ([], 'dct')),\n ('C0f', ([], 'dct')),\n ('C0b', ([], 'dct')),\n ))\n _data_items_def = collections.OrderedDict()\n\n E_ANNOT_YES = ('laf','','y')\n E_ANNOT_NON = ('laf','','x')\n DCOMP_SEP = ','\n\n load_spec_keys = {'features', 'xmlids', 'primary', 'prepare'}\n load_spec_subkeys = {'node', 'edge'}\n kind_types = {False, True}\n\n def __init__(self, data_dir, laf_dir, output_dir, save, verbose):\n if not Settings.__init__(self, data_dir, laf_dir, output_dir, save, verbose): sys.exit(-1)\n self.req_data_items = collections.OrderedDict()\n self._old_data_items = collections.OrderedDict()\n for ((dkey_raw, dbits)) in Names._data_items_tpl:\n parts = dkey_raw.split(' ')\n dkey = '{}({})'.format(parts[0], Names.DCOMP_SEP.join(parts[1:])) if len(parts) > 1 else dkey_raw\n Names._data_items_def[dkey] = dbits\n\n def set_annox(self):\n for anx in self.env['annox']:\n for ((dkey_raw, dbits)) in Names._data_items_tpl_a:\n parts = dkey_raw.split(' ')\n dkey = 'a{}:'.format(anx)+('{}({})'.format(parts[0], Names.DCOMP_SEP.join(parts[1:])) if len(parts) > 1 else dkey_raw)\n Names._data_items_def[dkey] = dbits\n\n def comp(dkeymin, dcomps): return '{}({})'.format(dkeymin, Names.DCOMP_SEP.join(dcomps))\n def comp_file(dgroup, dkind, ddir, dcomps):\n return'{}{}{}({})'.format(dgroup, dkind, ddir, Names.DCOMP_SEP.join(dcomps))\n\n def decomp(dkey):\n parts = dkey.split('(', 1)\n return (parts[0], '({}'.format(parts[1])) if len(parts) == 2 else (dkey, '')\n\n def decomp_full(dkey):\n parts = dkey.split('(')\n kparts = parts[0].split(':')\n if len(kparts) == 2:\n rparts = (kparts[0],)+tuple(kparts[1])\n else:\n rparts = tuple(parts[0])\n return rparts + (tuple(parts[1].rstrip(')').split(Names.DCOMP_SEP)),)\n \n def apiname(dcomps): return \"_\".join(dcomps)\n def orig_key(dkey): return dkey.replace('z', 'm', 1) if dkey.startswith('z') else dkey\n\n def maingroup(dgroup):\n return [dkey for dkey in Names._data_items_def if dkey[0] == 'm' and dkey[1] == dgroup]\n\n def deliver(computed_data, dest, data_items):\n if computed_data: data_items[Names.comp(*dest)] = computed_data\n\n def dmsg(dkey):\n (dorigin, dgroup, dkind, ddir, dcomps) = Names.decomp_full(dkey)\n return '{}: {}{}{}{}'.format(\n 'main' if dorigin == 'm' else 'annox {}'.format(dorigin[1:]) if dorigin[0] == 'a' else 'prep',\n dgroup,\n '.' + Names.apiname(dcomps) if len(dcomps) else '',\n ' [' + ('node' if dkind == 'n' else 'e') + '] ' if dkind != '0' else '',\n ' ' + ('->' if ddir == 'f' else '<-') + ' ' if ddir != '0' else '',\n )\n\n def request_init(self, req_items):\n req_items.clear()\n for dkey in Names._data_items_def:\n (docc_def, dtype) = Names._data_items_def[dkey]\n docc = Names.decomp(dkey)[0]\n req_items[docc] = docc_def.copy() if type(docc_def) == list or type(docc_def) == dict else docc_def\n\n def request_files(self, req_items, prepare_dict):\n self._old_data_items = self.req_data_items\n self.req_data_items = collections.OrderedDict()\n dkeys = {'clear': [], 'keep': [], 'load': [], 'prep': set()}\n for dkey in Names._data_items_def:\n (docc_def, dtype) = Names._data_items_def[dkey]\n docc = Names.decomp(dkey)[0]\n if docc not in req_items and dkey not in prepare_dict: continue\n if dkey in prepare_dict:\n self.setenv(zspace=prepare_dict[dkey][-1])\n self.req_data_items[dkey] = self.dinfo(dkey)\n dkeys['prep'].add(dkey)\n elif docc in req_items and req_items[docc] == True:\n self.req_data_items[dkey] = self.dinfo(dkey)\n elif req_items[docc] == False: continue\n elif req_items[docc] == None: continue\n else:\n for dcomps in sorted(req_items[docc]):\n dkeyfull = Names.comp(dkey, dcomps)\n self.req_data_items[dkeyfull] = self.dinfo(dkeyfull)\n old_data_items = self._old_data_items\n new_data_items = self.req_data_items\n for dkey in old_data_items:\n if dkey not in new_data_items or new_data_items[dkey] != old_data_items[dkey]: dkeys['clear'].append(dkey)\n for dkey in new_data_items:\n if dkey in old_data_items and new_data_items[dkey] == old_data_items[dkey]: dkeys['keep'].append(dkey)\n else:\n dkeys['load'].append(dkey)\n # if not new_data_items[dkey][-1]: dkeys['load'].append(dkey)\n return dkeys\n\n def dinfo(self, dkey):\n if dkey in Names._data_items_def: (docc_def, dtype) = Names._data_items_def[dkey]\n else:\n dkeymin = Names.decomp(dkey)[0]\n (docc_def, dtype) = Names._data_items_def[dkeymin]\n (dorigin, dgroup, dkind, ddir, dcomps) = Names.decomp_full(dkey)\n if dgroup == 'T': return (None, None, None, None, None)\n if dorigin[0] == 'a':\n dloc = self.env['annox'][dorigin[1:]]['{}_compiled_dir'.format(dorigin[0])]\n else:\n dloc = self.env['{}_compiled_dir'.format(dorigin)]\n dfile = Names.comp_file(dgroup, dkind, ddir, dcomps)\n return (dgroup not in 'FC', dloc, dfile, dtype, dorigin == 'z')\n\n def check_load_spec(load_spec, stamp):\n errors = []\n for key in load_spec:\n if key not in Names.load_spec_keys:\n errors.append('only these keys are allowed: {}, not {}'.format(Names.load_spec_keys, key))\n elif key == 'xmlids':\n for subkey in load_spec[key]:\n if subkey not in Names.load_spec_subkeys:\n errors.append('under {} only these keys are allowed: {}, not {}'.format(key, Names.load_spec_subkeys, subkey))\n else:\n val = load_spec[key][subkey]\n if val not in {False, True}:\n errors.append('under {} and then {} only these values are allowed: {}, not {}'.format(key, subkey, Names.kind_types, val))\n elif key == 'primary':\n val = load_spec[key]\n if val not in {False, True}:\n errors.append('under {} only these values are allowed: {}, not {}'.format(key, Names.kind_types, val))\n elif key == 'features':\n val = load_spec[key]\n if type(val) == dict:\n for namespace in val:\n for subkey in val[namespace]:\n if subkey not in Names.load_spec_subkeys:\n errors.append('under {} and then {} only these keys are allowed: {}, not {}'.format(key, namespace, Names.load_spec_subkeys, subkey))\n else:\n valsub = val[namespace][subkey]\n if type(valsub) != list:\n errors.append('under {} and then {} and then {} the value should be a list, not {}'.format(key, namespace, subkey, type(valsub)))\n elif type(val) == tuple:\n nelem = len(val)\n if nelem != 2:\n errors.append('under {} the value should be a tuple with exactly two elements (for nodes and edges), not {}'.format(key, nelem))\n else:\n for (e, elem) in enumerate(val):\n if type(elem) != str: \n errors.append('under {}, item {} the value should be a string, not {}'.format(key, e, type(elem)))\n else:\n errors.append('under {} the value should be either a tuple with exactly two elements (for nodes and edges) or a dictionary, not {}'.format(key, type(val)))\n elif key == 'prepare':\n val = load_spec[key]\n if type(val) != tuple:\n errors.append('the value of {} should be a tuple, not {}'.format(key, type(val)))\n else:\n if len(val) != 2: \n errors.append('the value of {} should be a 2-tuple, not a {}-tuple'.format(key, len(val)))\n else:\n if type(val[0]) != collections.OrderedDict:\n errors.append('the value of {}[0] should be a collections.OrderedDict, not {}'.format(key, type(val[0])))\n if errors:\n raise FabricError(\"Your load instructions have the following errors:\\n{}\".format('\\n'.join(errors)), stamp, None)\n\n\nclass FabricError(Exception):\n def __init__(self, message, stamp, cause=None):\n Exception.__init__(self, message)\n stamp.Emsg(message)\n if cause: stamp.Dmsg(\"{}: {}\".format(type(cause), str(cause)))\n\n","repo_name":"dirkroorda/laf-fabric","sub_path":"laf/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":13379,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"12"} +{"seq_id":"41057273841","text":"import numpy as np\n\n\nclass ExperienceReplayBuffer:\n def __init__(self, buffer_size, state_shape):\n self.buffer_size = buffer_size\n self.items_present = 0\n self.curr_index = 0\n self.state_buffer = np.zeros((buffer_size, *state_shape), dtype=np.float32)\n self.next_state_buffer = np.zeros((buffer_size, *state_shape), dtype=np.float32)\n self.reward_buffer = np.zeros((buffer_size), dtype=np.float32)\n self.action_buffer = np.zeros((buffer_size), dtype=np.int32)\n\n def add(self, experience):\n state, action, reward, next_state = experience\n self.state_buffer[self.curr_index] = state\n self.action_buffer[self.curr_index] = action\n self.reward_buffer[self.curr_index] = reward\n self.next_state_buffer[self.curr_index] = next_state\n self.curr_index = (self.curr_index + 1) % self.buffer_size\n self.items_present = min(self.items_present + 1, self.buffer_size)\n\n def sample(self, batch_size):\n inds = np.random.choice(np.arange(self.items_present),\n size=batch_size,\n replace=False)\n\n return self.state_buffer[inds], self.action_buffer[inds], \\\n self.reward_buffer[inds], self.next_state_buffer[inds]","repo_name":"internaccounts123/internship.ai","sub_path":"reinforcement/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"12588425405","text":"# -*- encoding: utf-8 -*-\nimport odoo\nfrom odoo import http\n\n\nclass px(odoo.addons.web.controllers.main.Home):\n @http.route(['/px'], type='http', auth='user')\n def px2(self, *args, **kargs):\n teachers = []\n for p in http.request.env['res.partner'].search([('is_teacher','=',1)]):\n teachers.append(p.name)\n\n return http.request.render('osbzr_training.index', {'teachers':teachers})\n\n","repo_name":"GoodERPJeff/training_class_1","sub_path":"osbzr_training/test_route.py","file_name":"test_route.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"39071717505","text":"import os\nimport logging\nimport redis\nimport random\nimport psycopg2\nimport jsonpickle\nimport sys\nimport json\nimport random\nfrom sqlalchemy import *\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom datetime import datetime, timedelta\nfrom flask import Flask, request, render_template, jsonify\nfrom pymongo import MongoClient\nfrom bson import json_util\n\nimport cache\nimport mongo\nimport postgres\n\nimport salesforce\nimport requests\n\napp = Flask(__name__)\nlogging.basicConfig(level=logging.DEBUG)\n\ncaching_enabled = os.environ[\"CACHING_ENABLED\"]\nriskUrl = os.environ[\"RISK_URL\"]\n\ndef uniqueName(name):\n newName = name.replace(\" \", \"\").lower()\n print(newName)\n return newName\n\ndef combineData(providerData, bloodData):\n combined = []\n aoneDict = []\n\n for y in bloodData:\n aoneDict.append(y.a1c)\n\n risks = getRisks(aoneDict)\n\n for x in providerData:\n for index, y in enumerate(bloodData):\n if x[\"nameid\"] == y.nameid:\n risk = risks[index] \n user = { 'name': x[\"name\"], 'provider': x[\"provider\"], 'a1c': round(float(y.a1c / 100),1), 'risk': risk }\n combined.append(user)\n return combined\n\ndef getRisks(risks):\n risksJson = json.dumps(risks)\n r = requests.post(riskUrl, data=risksJson)\n return r.json()\n\ndef getData():\n data = \"\"\n logging.debug(\"is_caching_enabled:\" + caching_enabled)\n\n if caching_enabled == \"true\": \n data = cache.get()\n \n if data == \"\":\n providerData = mongo.getData()\n bloodData = postgres.getData()\n data = combineData(providerData, bloodData)\n \n if caching_enabled == \"true\":\n cache.set(data)\n return data\n \ndef insertInitData():\n names = salesforce.getData()\n providers = [\"Aetna\", \"Blue Cross\", \"Kasier\", \"Oscar\", \"United\", \"Humana\"]\n\n for x in names:\n insertProviderData(x, random.choice(providers))\n insertA1C(x, random.randint(450, 1000))\n\ndef insertProviderData(name, provider):\n data = mongo.insertHealth(uniqueName(name), name, provider)\n \ndef insertA1C(name, a1c):\n data = postgres.insertHealth(uniqueName(name), a1c)\n \ndef init():\n if caching_enabled == \"true\":\n cache.clear()\n\n #Create the DB schmea if it doesn't exist\n postgres.createSchema()\n \n #Add init data if it doesn't exist \n data = getData()\n if len(data) == 0:\n insertInitData()\n","repo_name":"sowjumn/health-app","sub_path":"appModel.py","file_name":"appModel.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"32667551203","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom __future__ import unicode_literals\n\nfrom sklearn import metrics\nfrom sklearn import manifold\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.impute import SimpleImputer\nfrom sklearn import preprocessing\n\nimport math\nimport random\nimport time\n\nfrom hazm import *\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nimport csv, re, pickle\n\nfrom colorama import Back, Fore, Style\nimport time\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot as plt\n\nimport pyclustering as pyclus\n\nimport seaborn as sns\nsns.set()\n\n\n# In[2]:\n\n\ndata = pd.read_excel(\"dataset.xlsx\")\nreviews = data['question']\n# rate = data['Score']\nlabels = list(data['subject'])\n\n\n# In[3]:\n\n\n# stopwords_list(reviews,\"dataset1.txt\")\n\n\n# In[4]:\n\n\n# labels\n\n\n# In[5]:\n\n\n\n\nRE_USELESS = r'[^\\w]' # remove useless characters\nRE_DIGIT = r\"^\\d+\\s|\\s\\d+\\s|\\s\\d+$\" # remove digits\nRE_SPACE = r'\\s+' # remove space\nRE_EMAILS = r'[\\w\\.-]+@[\\w\\.-]+'\nRE_URLS = r'http\\S+'\nRE_WWW = r'www\\S+'\n\n\ndef clean_all_save(document, save_file_path):\n \"\"\"\n this function generate raw persian text, it remove non-persian character\n and all numbers and symbols\n :param document:\n :param save_file_path:\n :return:\n \"\"\"\n with open(save_file_path, 'w') as output:\n for sentence in document:\n sentence = clean_sentence(sentence)\n output.write(sentence + '\\n')\n return None\n\n\ndef clean_all(document, doc_pattern=r'(.*?)'):\n \"\"\"\n clean text like hamshahri, irBlogs, and other Treck format\n :param document:\n :param doc_pattern:\n :return:\n \"\"\"\n clean = ''\n document = re.findall(doc_pattern, document, re.DOTALL)\n for sentence in document:\n sentence = clean_sentence(sentence)\n clean += ' \\n' + sentence\n return clean\n\n\ndef clean_sentence(sentence):\n sentence = re.sub(r'[^\\u0621-\\u06ff]', ' ', sentence)\n sentence = arToPersianChar(sentence)\n sentence = arToPersianNumb(sentence)\n sentence = faToEnglishNumb(sentence)\n sentence = re.sub(r'[a-zA-Z]', ' ', sentence)\n sentence = re.sub(r'[0-9]', ' ', sentence)\n sentence = re.sub(RE_WWW, r' ', sentence)\n sentence = re.sub(RE_URLS, r' ', sentence)\n sentence = re.sub(RE_EMAILS, r' ', sentence)\n sentence = re.sub(RE_USELESS, r' ', sentence)\n sentence = re.sub(RE_DIGIT, r' ', sentence)\n sentence = re.sub(RE_SPACE, r' ', sentence)\n return sentence\n\n\ndef arToPersianNumb(number):\n dic = {\n '١': '۱',\n '٢': '۲',\n '٣': '۳',\n '٤': '۴',\n '٥': '۵',\n '٦': '۶',\n '٧': '۷',\n '٨': '۸',\n '٩': '۹',\n '٠': '۰',\n }\n return multiple_replace(dic, number)\n\n\ndef arToPersianChar(userInput):\n dic = {\n 'ك': 'ک',\n 'دِ': 'د',\n 'بِ': 'ب',\n 'زِ': 'ز',\n 'ذِ': 'ذ',\n 'شِ': 'ش',\n 'سِ': 'س',\n 'ى': 'ی',\n 'ي': 'ی'\n }\n return multiple_replace(dic, userInput)\n\n\ndef faToEnglishNumb(number):\n dic = {\n '۰': '0',\n '۱': '1',\n '۲': '2',\n '۳': '3',\n '۴': '4',\n '۵': '5',\n '۶': '6',\n '۷': '7',\n '۸': '8',\n '۹': '9',\n }\n return multiple_replace(dic, number)\n\n\ndef multiple_replace(dic, text):\n pattern = \"|\".join(map(re.escape, dic.keys()))\n return re.sub(pattern, lambda m: dic[m.group()], str(text))\n\n\n# In[6]:\n\n\ndef clean_all(document):\n clean = ''\n for sentence in document:\n sentence = clean_sentence(sentence)\n clean += sentence\n return clean\n\n\n# In[7]:\n\n\nj = k = i = 0\nreviews1 = []\nlabels1 = []\n# labels1 = list(labels.copy())\nnormalizer = Normalizer()\nfor review in reviews:\n sentences = sent_tokenize(normalizer.normalize(clean_all(review)))\n reviews1.extend(sentences)\n for j in range(len(sentences)):\n labels1.insert(i + k, labels[i])\n k += 1\n i += 1\n\n\n# In[8]:\n\n\nprint(len(reviews1),len(labels1))\n\n\n# In[9]:\n\n\nreviews[4]\n\n\n# In[10]:\n\n\n#cleaning dataset\nwords=[]\nall_text = ''\n# stemmer = Stemmer()\nfor t in range (len(reviews1)):\n text = reviews1[t]\n text = text.replace('\\u200c',' ')\n text = text.replace('\\u200f',' ')\n text = re.sub(r'[^a-zA-Z0-9آ-ی۰-۹ ]', ' ', text)\n all_text += text\n all_text += ' '\n wordsInText = text.split()\n for word in wordsInText:\n# word = stemmer.stem(word)\n if word != ' ' or word != '':\n words.append(word)\nlen(words)\n\n\n# In[11]:\n\n\nlen(all_text)\n\n\n# In[12]:\n\n\ncounts = Counter(words)\nvocab = sorted(counts, key=counts.get, reverse=True)\nvocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}\n\nwith open(\"mySavedDict.txt\", \"wb\") as myFile:\n pickle.dump(vocab_to_int, myFile)\n\n'''\nwith open(\"mySavedDict.txt\", \"rb\") as myFile:\n myNewPulledInDictionary = pickle.load(myFile)\n'''\n\n\n# In[13]:\n\n\n# vocab\n\n\n# In[14]:\n\n\nreviews_ints = []\nfor each in reviews1:\n #print (each)\n each = each.replace('\\u200c',' ')\n each = each.replace('\\u200f',' ')\n each = re.sub(r'[^a-zA-Z0-9آ-ی۰-۹ ]', ' ', each)\n reviews_ints.append([vocab_to_int[word] for word in each.split()])\n\n\nreview_lens = Counter([len(x) for x in reviews_ints])\nprint(\"Zero-length reviews: {}\".format(review_lens[0]))\nprint(\"Maximum review length: {}\".format(max(review_lens)))\n\n\n# In[15]:\n\n\n# reviews_ints[1]\n\n\n# In[16]:\n\n\nmi = 15000\nsu = ma = 0\ni = 0\nlow = []\nfor each in reviews_ints:\n if len(each) == 2 or len(each) == 1:\n low.append(i)\n if len(each) <= mi:\n# print(each,i,len(each))\n mi = len(each)\n if len(each) > ma:\n ma = len(each)\n su += len(each)\n i += 1\nprint('min lenght: '+str(mi),' and max lenght: '+str(ma),' and mean lenght: '+str(su/len(reviews_ints)))\n\n\n# In[17]:\n\n\nreviews_ints22 = reviews_ints.copy()\nfor i in range(len(low)):\n print(reviews_ints22.pop(low[len(low)- i -1]),low[len(low)- i -1])\nlen(reviews_ints22)\n\n\n# In[18]:\n\n\nreviews_ints = reviews_ints22.copy()\n\n\n# In[19]:\n\n\n# lstm_size = 256\n# lstm_layers = 1\n# batch_size = 200\n# learning_rate = 0.001\n\n# data_dim = 16\n# timesteps = 25\n# num_classes = 2\n\nn_words = len(vocab)\nprint (n_words)\n\n\n# In[54]:\n\n\nseq_len = 30\nfeatures = np.zeros((len(reviews_ints), seq_len), dtype=int)\n\nfor i, row in enumerate(reviews_ints):\n# print (i , row)\n# print (i )\n# print ('****')\n features[i, -len(row):] = np.array(row)[:seq_len]\npd.DataFrame(features)\n\n\n# In[55]:\n\n\nimport configparser\nimport numpy as np\nimport pandas as pd\n\nfrom cluster import Clustering\nfrom genetic import Genetic\nfrom generation import Generation\n\n\n# In[56]:\n\n\n\n\nNORMALIZATION = True\n\n\ndef readVars(config_file):\n config = configparser.ConfigParser()\n config.read(config_file)\n budget = int(config.get(\"vars\", \"budget\"))\n kmax = int(config.get(\"vars\", \"kmax\")) # Maximum number of Clusters\n numOfInd = int(config.get(\"vars\", \"numOfInd\")) # number of individual\n Ps = float(config.get(\"vars\", \"Ps\"))\n Pm = float(config.get(\"vars\", \"Pm\"))\n Pc = float(config.get(\"vars\", \"Pc\"))\n\n return budget, kmax, Ps, Pm, Pc, numOfInd\n\n\n# minmax normalization\ndef minmax(data):\n normData = data\n data = data.astype(float)\n normData = normData.astype(float)\n for i in range(0, data.shape[1]):\n tmp = data.iloc[:, i]\n # max of each column\n maxElement = np.amax(tmp)\n # min of each column\n minElement = np.amin(tmp)\n\n # norm_dat.shape[0] : size of row\n for j in range(0, normData.shape[0]):\n normData[i][j] = float(\n data[i][j] - minElement) / (maxElement - minElement)\n\n normData.to_csv('result/norm_data.csv', index=None, header=None)\n return normData\ndata = pd.DataFrame(features)\ndata = minmax(data) # normalize\n\n\n# In[30]:\n\n\n\nif __name__ == '__main__':\n config_file = \"config.txt\"\n# if(NORMALIZATION):\n# data = pd.read_csv('data/iris.csv', header=None)\n\n# data = minmax(data) # normalize\n# else:\n# data = pd.read_csv('result/norm_data.csv', header=None)\n\n # size of column\n dim = data.shape[1]\n\n # kmeans parameters & GA parameters\n generationCount = 0\n budget, kmax, Ps, Pm, Pc, numOfInd = readVars(config_file)\n\n budget = 30\n kmax = 8\n# numOfInd =20\n# Ps =0.4\n# Pm =0.05\n# Pc =0.8\n \n print(\"-------------GA Info-------------------\")\n print(\"budget\", budget)\n print(\"kmax\", kmax)\n print(\"numOfInd\", numOfInd)\n print(\"Ps\", Ps)\n print(\"Pm\", Pm)\n print(\"Pc\", Pc)\n print(\"---------------------------------------\")\n\n # dim or pattern id \n chromosome_length = kmax * dim\n\n #-------------------------------------------------------#\n # \t\t\t\t\t\t\tmain \t\t\t\t\t\t#\n #-------------------------------------------------------#\n initial = Generation(numOfInd, 0)\n initial.randomGenerateChromosomes(\n chromosome_length) # initial generate chromosome\n\n clustering = Clustering(initial, data, kmax) # eval fit of chromosomes\n\n # ------------------calc fitness------------------#\n generation = clustering.calcChromosomesFit()\n\n # ------------------------GA----------------------#\n while generationCount <= budget:\n GA = Genetic(numOfInd, Ps, Pm, Pc, budget, data, generationCount, kmax)\n generation, generationCount = GA.geneticProcess(\n generation)\n iBest = generation.chromosomes[0]\n clustering.printIBest(iBest)\n\n # ------------------output result-------------------#\n# clustering.output_result(iBest, data)\n\n\n# In[23]:\n\n\n# clustering.getLabels()\na = clustering.getLabels()\nlen(a)\n\n\n# In[24]:\n\n\nGAKMeans_Sil = metrics.silhouette_score(X, a, metric='euclidean')\nGAKMeans_Sil\n\n\n# In[67]:\n\n\nX = data\ncolors = np.array(['g', 'r', 'b', 'c', 'k', 'y','royalblue', 'maroon', 'forestgreen',\n 'mediumorchid', 'tan', 'deeppink', 'olive', 'goldenrod', 'lightcyan', 'navy'])\n\n\n# In[26]:\n\n\n\n########## PCA of features for GA_Kmeans\nfrom sklearn.decomposition import PCA\npca_model = PCA(n_components=2)\nX_PCA = pca_model.fit_transform(X)\n\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(121)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1],c='green', marker='o', s=10)\nax = fig.add_subplot(122)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1], c=colors[a], marker='*')\n\n\n# In[27]:\n\n\n\n#### kmeans algorithm\nfrom sklearn.cluster import KMeans\nstart = time.time()\nkmean = KMeans(n_clusters=8, max_iter=500)\nkmean.fit(X)\nend = time.time()\nprint(Fore.BLUE + \"k-mean algorithm time is :\", end - start)\nprint(Fore.RESET)\n\n\n\n# In[28]:\n\n\n########## PCA of features for Kmeans\nfrom sklearn.decomposition import PCA\npca_model = PCA(n_components=2)\nX_PCA = pca_model.fit_transform(X)\n\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(121)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1],c='green', marker='o', s=10)\nax = fig.add_subplot(122)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1], c=colors[kmean.labels_], marker='*')\n\n\n# In[29]:\n\n\nfrom sklearn.cluster import Birch\n\nbrc = Birch(branching_factor=50, n_clusters=4, threshold=0.5, compute_labels=True)\nbrc.fit(X) \n# Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None,\n# threshold=0.5)\nClusterBirch = brc.predict(X)\n\n\n# In[30]:\n\n\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(121)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1],c='green', marker='o', s=10)\nax = fig.add_subplot(122)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1], c=colors[ClusterBirch], marker='*')\n\n\n# In[369]:\n\n\n\ndef WOA_clustering(X, numberOfCluster=3,iterations=100, numberOfWhale=20):\n# X must be numpy.ndarray\n dataPoints=X.shape[0]\n features=X.shape[1]\n #intialise\n\n centresOfwhale=np.zeros((numberOfWhale,numberOfCluster,features))\n\n\n for whale in range(numberOfWhale):\n for cluster in range(numberOfCluster):\n for feature in range(features):\n centresOfwhale[whale,cluster,feature]=float(random.randint(np.min(X[:,feature]),np.max(X[:,feature])))\n bestWhale=0\n for iteration in range(iterations):\n # print(iteration)\n #dataPointsInCluster=[[[] for cluster in range(numberOfCluster)] for whale in range(numberOfWhale)]\n dataPointsInCluster=np.zeros((numberOfWhale,numberOfCluster))\n bestWhale=0\n bestDist=np.infty\n startTime=time.time()\n for whale in range(numberOfWhale):\n dist=0.00\n clusi = []\n for i in range(numberOfCluster):\n clusi.append(np.zeros((features)))\n\n for dataPoint in range(dataPoints):\n bestEuclidianDist=np.infty\n bestCluster=0\n for cluster in range(numberOfCluster):\n euclidDist=np.linalg.norm(centresOfwhale[whale,cluster]-X[dataPoint,:])\n if(euclidDist=1 :\n rand_leader_index = int(math.floor((numberOfWhale-1)*random.random()+1));\n X_rand = centresOfwhale[rand_leader_index]\n D_X_rand=abs(C*X_rand[cluster]-centresOfwhale[whale,cluster]) # Eq. (2.7)\n centresOfwhale[whale,cluster]=X_rand[cluster]-A*D_X_rand # Eq. (2.8)\n elif abs(A)<1 :\n D_Leader=abs(C*centresOfwhale[bestWhale,cluster]-centresOfwhale[whale,cluster]) # Eq. (2.1)\n centresOfwhale[whale,cluster]=centresOfwhale[bestWhale,cluster]-A*D_Leader # Eq. (2.2)\n elif p>=0.5 :\n distance2Leader=abs(centresOfwhale[bestWhale,cluster]-centresOfwhale[whale,cluster]) # Eq. (2.5)\n centresOfwhale[whale,cluster]=distance2Leader*math.exp(b*l)*math.cos(l*2*3.14)+centresOfwhale[bestWhale,cluster]\n # print(time.time()-startTime)\n startTime=time.time()\n mins = []\n for i in range(numberOfCluster):\n mins.append([np.infty])\n WOACluster=[0 for dataPoint in range(dataPoints)]\n j = 0\n for dataPoint in range(dataPoints):\n d = []\n for i in range(numberOfCluster):\n d.append(np.linalg.norm(centresOfwhale[bestWhale,i]-X[dataPoint,:]))\n if d[i] <= mins[i][0]:\n mins[i][0] = d[i]\n mins[i].append(j)\n j += 1\n WOACluster[dataPoint] = d.index(min(d))\n j = 0\n for i in mins:\n# print(i[1:],j)\n WOACluster = pd.DataFrame(WOACluster)\n WOACluster.iloc[i[1:]] = j\n WOACluster = list(WOACluster[0])\n j += 1\n\n return WOACluster\n\n\n# In[396]:\n\n\n# centresOfwhale=[]\n# bestWhale = 0\nsill = -10\nfor i in range(25):\n labeles = WOA_clustering(X.values, numberOfCluster=8,iterations=100, numberOfWhale=30)\n try:\n sill1 = metrics.silhouette_score(X.values, labeles, metric='euclidean')\n if sill1> sill:\n sill =sill1\n WOACluster = labeles\n except:\n 1+1\n\n# print(WOACluster)\n# centresOfwhale\n\n\n# In[397]:\n\n\nWOAKMeans_Sil = metrics.silhouette_score(X.values, WOACluster, metric='euclidean')\nWOAKMeans_Sil\n\n\n# In[398]:\n\n\nfig = plt.figure(figsize=(16, 8))\nax = fig.add_subplot(121)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1],c='green', marker='o', s=10)\nax = fig.add_subplot(122)\nax.scatter(X_PCA[:, 0], X_PCA[:, 1], c=colors[WOACluster], marker='*')\n\n\n# In[34]:\n\n\nGAKMeans_Sil = metrics.silhouette_score(X, a, metric='euclidean')\nGAKMeans_Sil\n\n\n# In[35]:\n\n\nlabels = kmean.labels_\nKMeans_Sil = metrics.silhouette_score(X, kmean.labels_, metric='euclidean')\nprint('Kmeans silhouette ',KMeans_Sil)\n\n\n# In[36]:\n\n\n\nprint(\"Birch Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, ClusterBirch, metric='sqeuclidean'))\n\n\n# In[37]:\n\n\nWOAKMeans_Sil = metrics.silhouette_score(X, WOACluster, metric='euclidean')\nWOAKMeans_Sil\n\n\n# In[ ]:\n\n\n\n\n\n# In[309]:\n\n\n# normalizer = Normalizer()\n# normalizer.normalize('اصلاح نويسه ها و استفاده از نیم‌فاصله \\n پردازش را آسان مي كند')\n# # 'اصلاح نویسه‌ها و استفاده از نیم‌فاصله پردازش را آسان می‌کند'\n\n\n# In[310]:\n\n\n# sent_tokenize('ما هم برای وصل کردن آمدیم! ولی برای پردازش، جدا بهتر نیست؟')\n# # ['ما هم برای وصل کردن آمدیم!', 'ولی برای پردازش، جدا بهتر نیست؟']\n\n\n# In[107]:\n\n\n# word_tokenize('ولی برای پردازش، جدا بهتر نیست؟')\n# # ['ولی', 'برای', 'پردازش', '،', 'جدا', 'بهتر', 'نیست', '؟']\n\n\n# In[108]:\n\n\n# stemmer = Stemmer()\n# print(stemmer.stem(stemmer.stem('پردازش‌ها')))\n# # 'کتاب'\n\n\n# In[109]:\n\n\n# lemmatizer = Lemmatizer()\n# lemmatizer.lemmatize('می‌روم')\n# 'رفت#رو'\n\n\n# In[110]:\n\n\n# tagger = POSTagger(model='resources/postagger.model')\n# tagger.tag(word_tokenize('ما بسیار کتاب می‌خوانیم'))\n# [('ما', 'PRO'), ('بسیار', 'ADV'), ('کتاب', 'N'), ('می‌خوانیم', 'V')]\n\n\n# In[111]:\n\n\n# chunker = Chunker(model='resources/chunker.model')\n# tagged = tagger.tag(word_tokenize('کتاب خواندن را دوست داریم'))\n# tree2brackets(chunker.parse(tagged))\n# '[کتاب خواندن NP] [را POSTP] [دوست داریم VP]'\n\n\n# In[34]:\n\n\n# parser = DependencyParser(tagger=tagger, lemmatizer=lemmatizer)\n# parser.parse(word_tokenize('زنگ‌ها برای که به صدا درمی‌آید؟'))\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"aminkhod/PersainTextClusteringWithHazm","sub_path":"TextPsychology.py","file_name":"TextPsychology.py","file_ext":"py","file_size_in_byte":19773,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"36766782933","text":"\"\"\"Functions made to combine spreadsheets to a clear, organised, standard format.\"\"\"\n\nimport pandas as pd\n\n\n# Functions that combine paired and individual bet spreadsheets \n\ndef remove_wanted_columns(spreadsheet):\n \"\"\"Removes unwanted columns from a preprocessed individual bets spreadsheet.\"\"\"\n \n spreadsheet.drop(['BookieID', 'BetOutcomeID', 'BettingTypeID', 'ItemID'], \n axis=1, inplace=True)\n \n return spreadsheet\n\n\ndef tidy_profit_id(spreadsheet):\n \"\"\"Renames preprocessed individual bets spreadsheets' profitid to ProfitID.\n This allowed paired and individual bets to be merged on ProfitID.\"\"\"\n \n spreadsheet.rename(columns={'profitid':'ProfitID'}, inplace=True)\n \n return spreadsheet\n\n\ndef merge_on_profit_id(individual_spreadsheet, paired_spreadsheet):\n \"\"\"Combines paired and individual spreadsheets to make a new spreadsheet.\n \n It does this by taking columns exclusive to paired bet spreadsheets and \n merges to individual bets spreadsheets based on Profit ID.\"\"\"\n \n for column in ['sport', 'Event', 'EventTime', 'datecreated ', 'BetType', 'Note']:\n partially_merged = pd.merge(individual_spreadsheet, \n paired_spreadsheet.loc[:,['ProfitID',column]], \n on='ProfitID')\n individual_spreadsheet = partially_merged\n \n return individual_spreadsheet\n\n\n# Functions that better organise and tidy up the newly combined spreadsheet \n\ndef rename_colums(spreadsheet):\n \"\"\"Renames columns on newly combined spreadsheet.\"\"\"\n \n spreadsheet.columns = ['Profit ID', 'Bookie', 'Bet Result', 'Type', \n 'Outcome', 'Stake','Odds', 'Fee (%)', 'Liability', \n 'Return', 'Potential Profit', 'Sport', 'Event', \n 'Event Time', 'Date Created', 'Bet Type', 'Note']\n \n return spreadsheet\n\n\ndef reorder_columns(spreadsheet):\n \"\"\"Reorders columns on newly combined spreadsheet.\"\"\"\n \n columns_reordered = ['Date Created', 'Sport', 'Event', 'Event Time', 'Bookie',\n 'Bet Type','Type', 'Outcome', 'Stake', 'Odds', 'Fee (%)', \n 'Liability', 'Return','Potential Profit', 'Bet Result',\n 'Note', 'Profit ID']\n \n spreadsheet = spreadsheet.loc[:, columns_reordered]\n \n return spreadsheet\n\n\ndef rename_type_normal(spreadsheet):\n \"\"\"Renames to Bet Type 'Normal' to 'Qualifying' to better represent what the \n bet does (qualify for free bet).\"\"\"\n \n spreadsheet.loc[:,'Bet Type'].replace('Normal', 'Qualifying', inplace=True)\n \n return spreadsheet\n\n\ndef fill_note_NaN(spreadsheet):\n \"\"\"Replace NaNs in notes with blank space.\"\"\"\n \n spreadsheet.loc[:, 'Note'].fillna(' ', inplace=True)\n \n return spreadsheet\n\n\ndef win_lose_indices(winning_indices_list):\n \"\"\"Input a list of indices of winning bets.\n Returns a tuple (list 1, list2, list3).\n \n list 1: indices of winning bets\n list 2: indices of subsequent losing bets\n list 3: indices of winning and losing bets\n \n If the smallest winning index is odd, then its paired bet (the preceeding index), \n will be that of a losing bet and must be included in the losing index list.\n \n If the largest winning index is even, then its paired bet (the proceeding index), \n will be that of a losing bet and must be included in the losing index list.\"\"\"\n \n if min(winning_indices_list) % 2 == 0:\n range_min = min(winning_indices_list)\n else:\n range_min = min(winning_indices_list) - 1\n \n if max(winning_indices_list) % 2 == 0:\n range_max = max(winning_indices_list) + 2\n else:\n range_max = max(winning_indices_list) + 1\n \n bets_to_be_updated = range(range_min, range_max)\n losing_indices_list = list(set(bets_to_be_updated) - set(winning_indices_list))\n \n return (winning_indices_list, losing_indices_list, list(bets_to_be_updated))\n\n\ndef update_bet_results(spreadsheet, winning_indices_list):\n \"\"\"Updates the spreadsheet with which bets won and lost.\"\"\"\n \n win_lose_tuple = win_lose_indices(winning_indices_list)\n\n spreadsheet.loc[win_lose_tuple[0], 'Bet Result'] = \"Win\"\n spreadsheet.loc[win_lose_tuple[1], \"Bet Result\"] = \"Lose\"\n \n return spreadsheet\n\n\ndef losing_paired_indices(spreadsheet, winning_indices_list):\n \"\"\"Updates the spreadsheet with which bets won and lost.\n \n \"\"\"\n \n profit_ids = list(spreadsheet.loc[winning_indices_list, 'Profit ID'])\n win_lose_indices = list(spreadsheet[spreadsheet['Profit ID'].isin(profit_ids)].index)\n lose_indices = [index for index in win_lose_indices if index not in winning_indices_list]\n \n return lose_indices\n\n\ndef update_paired_results(spreadsheet, winning_indices_list):\n \"\"\"Updates the spreadsheet with which bets won and lost.\n \n \"\"\"\n \n losing_indices_list = losing_paired_indices(spreadsheet, winning_indices_list)\n \n spreadsheet.loc[winning_indices_list, 'Bet Result'] = \"Win\"\n spreadsheet.loc[losing_indices_list, 'Bet Result'] = \"Lose\"\n \n return spreadsheet","repo_name":"andrewhwest/Matched-Betting","sub_path":"Script /processing_functions.py","file_name":"processing_functions.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15595110952","text":"from deepspeech import Model\r\nfrom globals import config\r\n\r\n# These constants control the beam search decoder\r\n\r\n# Beam width used in the CTC decoder when building candidate transcriptions\r\nBEAM_WIDTH = 500\r\n\r\n# The alpha hyperparameter of the CTC decoder. Language Model weight\r\nLM_ALPHA = 0.75\r\n\r\n# The beta hyperparameter of the CTC decoder. Word insertion bonus.\r\nLM_BETA = 1.85\r\n\r\n\r\n# These constants are tied to the shape of the graph used (changing them changes\r\n# the geometry of the first layer), so make sure you use the same constants that\r\n# were used during training\r\n\r\n# Number of MFCC features to use\r\n# N_FEATURES = 26\r\n\r\n# Size of the context window used for producing timesteps in the input vector\r\n# N_CONTEXT = 9\r\n\r\n\r\nclass SpeechToTextService:\r\n def transcribe(self, audio):\r\n\r\n name = 'speech_server_main'\r\n conf = config.ConfigDeepSpeech()\r\n model = conf.get_config('model')\r\n print(model)\r\n # alphabet = conf.get_config('alphabet')\r\n # print(alphabet)\r\n lm = conf.get_config('lm')\r\n trie = conf.get_config('trie')\r\n print(trie)\r\n ds = Model(model, BEAM_WIDTH)\r\n if lm and trie:\r\n ds.enableDecoderWithLM(lm, trie, LM_ALPHA, LM_BETA)\r\n text = ds.stt(audio)\r\n return text\r\n\r\n def ready(self):\r\n print(\"Deepspeech Server Initialization\")","repo_name":"Atris-tech/deepspeech_usage","sub_path":"DeepSpeechEngine/Transcribe/Services/SpeechToTextService.py","file_name":"SpeechToTextService.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"15689879823","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.DashboardView.as_view(), name='doctor_dashboard'),\n path('/', views.DashboardView.as_view(), name='doctor_dashboard'),\n path('add_prescription/', views.addSuggesions.as_view(), name='add_prescription'),\n path('create_appointment/', views.createAppointment.as_view(), name='create_appointment'),\n path('checkup_report_status/', views.checkup_report_status_view.as_view(), name='checkup_report_status'),\n path('member_condition//', views.member_condition_view.as_view(), name='member_condition'),\n path('nurse_schedule//', views.nurse_schedule_view.as_view(), name='nurse_schedule'),\n path('prescription/', views.prescription_view.as_view(), name='prescription'),\n path('resident_status/', views.resident_status_view.as_view(), name='resident_status'),\n]\n","repo_name":"sheikhDipta003/CSE-326-BUET","sub_path":"presentations/Implementation/ISD_demo/doctors/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28249925435","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymysql\nimport pymysql.cursors\nfrom suning.items import SuningUrlLogItem\nfrom suning.Models.urlLogModel import UrlLogModel\n\nclass SuningPipeline(object):\n\n def __init__(self):\n # 连接数据库\n self.connect = pymysql.connect(\n host='127.0.0.1',\n port=3306,\n db='suning',\n user='root',\n passwd='root',\n charset='utf8',\n use_unicode=True\n )\n # 通过cursor 执行sql\n self.cursor = self.connect.cursor()\n\n def process_item(self, item, spider):\n if isinstance(item, SuningUrlLogItem):\n # 商品信息入库\n UrlLogModel().insertUrlLog(item)\n return item\n\n","repo_name":"zheng-sun/scrapy","sub_path":"suning/suning/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73384618260","text":"import torch\nimport torch.nn as nn\nfrom torchvision.transforms import Resize\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('Norm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass ConvBlock(nn.Sequential):\n def __init__(self, in_channel, out_channel, ker_size, padd, stride):\n super(ConvBlock, self).__init__()\n self.add_module('conv', nn.Conv2d(in_channel, out_channel, kernel_size=ker_size, stride=stride, padding=padd)),\n self.add_module('norm', nn.BatchNorm2d(out_channel)),\n self.add_module('LeakyRelu', nn.LeakyReLU(0.2, inplace=False))\n\n\nclass WDiscriminator(nn.Module):\n def __init__(self, opt):\n super(WDiscriminator, self).__init__()\n ker_size = 3\n padd_size = 0\n N = int(opt.nfc)\n self.head = ConvBlock(3, N, ker_size, padd_size, 1)\n self.body = nn.Sequential()\n for i in range(opt.num_model_blocks - 2):\n N = int(opt.nfc / pow(2, (i + 1)))\n block = ConvBlock(max(2 * N, opt.nfc), max(N, opt.nfc), ker_size, padd_size, 1)\n self.body.add_module('block%d' % (i + 1), block)\n # self.body.add_module('MP%d' % (i + 1), torch.nn.MaxPool2d(2))\n self.tail = nn.Conv2d(max(N, opt.nfc), 1, kernel_size=ker_size, stride=1, padding=padd_size)\n\n def forward(self, x):\n x = self.head(x)\n x = self.body(x)\n x = self.tail(x)\n return x\n\n\nclass Generator(nn.Module):\n def __init__(self, opt):\n super(Generator, self).__init__()\n self.is_cuda = torch.cuda.is_available()\n ker_size = 3\n # padd_size = 1\n padd_size = 0\n self.padding = (ker_size // 2) * opt.num_model_blocks\n\n N = opt.nfc\n self.head = ConvBlock(3, N, ker_size, padd_size, 1)\n\n self.body = nn.Sequential()\n for i in range(opt.num_model_blocks - 2):\n N = int(opt.nfc / pow(2, (i + 1)))\n block = ConvBlock(max(2 * N, opt.nfc), max(N, opt.nfc), ker_size, padd_size, 1)\n self.body.add_module('block%d' % (i + 1), block)\n\n self.tail = nn.Sequential(\n nn.Conv2d(max(N, opt.nfc), 3, kernel_size=ker_size, stride=1, padding=padd_size),\n nn.Tanh()\n )\n\n def forward(self, x):\n import torch.nn.functional as F\n x = F.pad(x, [self.padding, self.padding, self.padding, self.padding])\n x = self.head(x)\n x = self.body(x)\n x = self.tail(x)\n\n return x\n\n\ndef reset_grads(model, require_grad):\n for p in model.parameters():\n p.requires_grad_(require_grad)\n return model\n\n\nclass ArrayOFGenerators:\n def __init__(self):\n self.Gs = []\n self.shapes = []\n self.noise_amps = []\n\n def append(self, netG, shape, noise_amp):\n self.Gs.append(netG)\n self.shapes.append(shape)\n self.noise_amps.append(noise_amp)\n\n def sample_zs(self, n_samples):\n \"\"\"Sample spacial noise in the correct shapes for the trained generators and with the right amplitudes\"\"\"\n device = next(self.Gs[0].parameters()).device\n zs = []\n for shape, amp in zip(self.shapes, self.noise_amps):\n zs += [torch.randn(n_samples, 3, shape[0], shape[1], device=device) * amp]\n return zs\n\n def sample_images(self, zs=None):\n \"\"\"\n Gradualy synthesize an image by running the generators on the supplied spacial latents.\n If zs are not supplied they are sampled.\n \"\"\"\n if not self.Gs:\n return 0\n\n if zs is None:\n zs = self.sample_zs(1)\n\n output = 0\n for i, (z, G) in enumerate(zip(zs, self.Gs)):\n output = G(z + output) + output\n if i != len(self.Gs) - 1:\n output = Resize(zs[i+1].shape[-2:], antialias=True)(output)\n return output\n\n def __len__(self):\n return len(self.Gs)\n\n\nif __name__ == '__main__':\n from argparse import Namespace\n opt = Namespace()\n opt.nfc = 32\n opt.num_model_blocks = 5\n\n D = WDiscriminator(opt)\n x = torch.ones(1,3,40,40)\n\n print(D)\n print(D(x).shape)","repo_name":"ariel415el/Simple-SinGAN","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4237,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"35664431784","text":"import urllib.request, urllib.parse, urllib.error\r\nimport json\r\nimport ssl\r\n#E:\\py4e\\assignment_15.py\r\n#Ignore SSL certificate errors\r\nctx = ssl.create_default_context()\r\nctx.check_hostname = False\r\nctx.verify_mode = ssl.CERT_NONE\r\n\r\ncount = 0\r\ns = 0\r\n\r\nurl = input('Enter location: ')\r\n\t\r\nprint('Retrieving ' + url)\r\ndata = urllib.request.urlopen(url, context=ctx).read().decode()\r\n\r\njs = json.loads(data)\r\n\r\nfor i in js['comments']:\r\n\r\n\tcount += 1\r\n\ts += int(i['count'])\r\nprint(\"count:\",count)\r\nprint(\"sum:\",s)\t","repo_name":"Anoop0712/Python-for-Everybody-Offered-by-University-of-Michigan","sub_path":"py4e/assignment_15.py","file_name":"assignment_15.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"37571934903","text":"import pandas as pd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\n\ndata = pd.read_csv(r\"C:\\Users\\Sarthak\\Downloads\\winequality.csv\")\nx = data.iloc[:,data.columns!=\"quality\"]\ny = data.iloc[:,11]\nreg = LinearRegression().fit(x,y)\n# Find Most Optimal coefficient of all attributes\ncoeff_df = pd.DataFrame(reg.coef_, x.columns, columns=['Coefficient'])\nprint(coeff_df)\ny_predict = reg.predict(x)\nprint(r2_score(y,y_predict))","repo_name":"sarthakdixit/Machine-Learning","sub_path":"Machine Learning Models/Multiple Linear Regression/Wine.py","file_name":"Wine.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"24708502773","text":"import concurrent.futures\nimport pickle\nimport sys\n\nimport matplotlib.pyplot as plt\nimport moments\nimport numpy as np\nimport seaborn as sns\n\nimport fwdpy11\nimport testutils.analysis_tools\nimport testutils.two_deme_IM_argument_parser\n\n\ndef runsim(model, num_subsamples, nsam, seed):\n rng = fwdpy11.GSLrng(seed)\n pop = fwdpy11.DiploidPopulation(model[\"Nref\"], model[\"genome_length\"])\n fwdpy11.evolvets(rng, pop, fwdpy11.ModelParams(**model[\"pdict\"]), 100)\n if model[\"mutations_are_neutral\"] is True:\n fwdpy11.infinite_sites(rng, pop, model[\"theta\"] / 4 / model[\"Nref\"])\n mean_fst = 0.0\n deme_zero_fs = np.zeros(2 * args.nsam - 1)\n deme_one_fs = np.zeros(2 * args.nsam - 1)\n for _ in range(num_subsamples):\n fs = testutils.analysis_tools.tskit_fs(pop, nsam)\n fs = moments.Spectrum(fs)\n mean_fst += fs.Fst()\n deme_zero_fs += fs.marginalize([1]).data[1:-1]\n deme_one_fs += fs.marginalize([0]).data[1:-1]\n\n mean_fst /= num_subsamples\n deme_zero_fs /= num_subsamples\n deme_one_fs /= num_subsamples\n\n return mean_fst, deme_zero_fs, deme_one_fs\n\n\nif __name__ == \"__main__\":\n parser = testutils.two_deme_IM_argument_parser.make_model_runner_parser()\n parser.add_argument(\n \"--num_subsamples\", type=int, default=None, help=\"Number of subsamples to take\"\n )\n args = parser.parse_args(sys.argv[1:])\n\n with open(args.infile, \"rb\") as f:\n model = pickle.load(f)\n\n initial_seed = np.random.randint(0, np.iinfo(np.uint32).max, 1)[0]\n np.random.seed(initial_seed)\n\n seeds = np.random.randint(0, np.iinfo(np.uint32).max, args.nreps)\n\n fsta = np.zeros(args.nreps)\n mean_deme_zero_fs = np.zeros(2 * args.nsam - 1)\n mean_deme_one_fs = np.zeros(2 * args.nsam - 1)\n idx = 0\n with concurrent.futures.ProcessPoolExecutor(max_workers=args.nworkers) as e:\n futures = {\n e.submit(runsim, model, args.num_subsamples, args.nsam, i) for i in seeds\n }\n for fut in concurrent.futures.as_completed(futures):\n fst, d0, d1 = fut.result()\n fsta[idx] = fst\n idx += 1\n mean_deme_zero_fs += d0\n mean_deme_one_fs += d1\n\n mean_deme_zero_fs /= args.nreps\n mean_deme_one_fs /= args.nreps\n\n with open(args.outdir + \"/caption.rst\", \"w\") as f:\n f.write(f\"The initial_seed was {initial_seed}.\\n\")\n f.write(\"The model details are:\\n\\n::\\n\\n\")\n mp = fwdpy11.ModelParams(**model[\"pdict\"])\n for i in mp.asblack().split(\"\\n\"):\n f.write(f\"\\t{i}\\n\")\n f.write(\"\\n\")\n\n with open(args.outdir + \"/fst.np\", \"wb\") as f:\n fsta.tofile(f)\n with open(args.outdir + \"/deme0.np\", \"wb\") as f:\n mean_deme_zero_fs.tofile(f)\n with open(args.outdir + \"/deme1.np\", \"wb\") as f:\n mean_deme_one_fs.tofile(f)\n","repo_name":"molpopgen/fwdpy11_statistical_tests","sub_path":"IM_lowlevel/testcode/run_two_deme_IM_model.py","file_name":"run_two_deme_IM_model.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36118912490","text":"import shutil, os\nfrom pathlib import Path\n\ndef main():\n print()\n path = '/workspaces/115507760/'\n destination = os.path.join(os.path.abspath('.'), 'copyofallnonpyfiles')\n for folder_name, sub_folders, file_names in os.walk(path):\n sub_folders[:] = [sub_folder\n for sub_folder in sub_folders\n if sub_folder[0] != '.']\n for file_name in file_names:\n source = os.path.join(folder_name, file_name)\n if not file_name.endswith(\".py\"):\n try:\n shutil.copy(source, destination)\n\n except shutil.SameFileError:\n print(\"File already exists in source and destination.\")\n\n except PermissionError:\n print(\"Permission Denied.\")\n\n except:\n print(\"Error occurred while copying file.\")\n\nmain()","repo_name":"rohingoyal25/AtBS","sub_path":"CH10_fileorg/selectivecopy.py","file_name":"selectivecopy.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13167264903","text":"import sys\nfrom collections import deque\n\ndef input():\n return sys.stdin.readline().rstrip()\n\nn,m,k,x=map(int,input().split())\n\ngraph=[[] for _ in range(n+1)]\n\nvisited=[0]*(n+1)\n\nfor _ in range(m):\n a,b=map(int,input().split())\n graph[a].append(b)\n \nq=deque([(0,x)])\nvisited[x]=True\n\nans=[]\n\nwhile q:\n dist,now=q.popleft()\n if dist==k:\n ans.append(now)\n for node in graph[now]:\n if visited[node]==False:\n visited[node]=True\n q.append((dist+1,node))\n\nans.sort()\nif len(ans)==0:\n print(-1)\nfor value in ans:\n print(value)\n ","repo_name":"Sonjeongbeom/AlgorithmOfToday","sub_path":"BJ/DFS.BFS/특정거리의도시찾기.py","file_name":"특정거리의도시찾기.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34630272042","text":"from Rapids_Classes.KDG import *\nimport re\n\nCONT_SECTION = \":\"\nDISC_SECTION = \":\"\nDEP_SECTION = \":\"\nMETRIC_SECTION = \":\"\n\ndef is_float(str):\n try:\n float(str)\n return True\n except ValueError:\n return False\n\n\n# read in a fact and generate a dictionary where the key is a config set,\n# and the value is the cost\ndef readFact(fact_file, knobs, gt, COST=True):\n fact = open(fact_file, 'r')\n if fact is None:\n print\n \"RAPID-C / STAGE-4 : reading trained profile failed\"\n return\n for line in fact:\n col = line.split()\n knob_name = \"\"\n knob_val = 0.0\n configuration = Configuration()\n is_digit = False\n vals = []\n for i in range(len(col)):\n if col[i].isdigit() or is_float(col[i]):\n if is_digit:\n # this is the start of values\n for j in range(i, len(col)):\n vals.append(col[j])\n break\n is_digit = True\n knob_val = int(col[i])\n configuration.addConfig(\n [Config(knobs.getKnob(knob_name), knob_val)])\n continue\n else:\n is_digit = False\n knob_name = col[i]\n if not gt.hasEntry(configuration):\n print\n \"cant find key:\" + knob_name + str(knob_val)\n if COST:\n gt.setCost(configuration, float(vals[0]))\n else:\n gt.setMV(configuration, list(map(lambda x: float(x), vals)))\n print\n \"RAPID-C / STAGE-4 : trained profile constructed\"\n return\n\n\n# read in a description file\ndef readDesc(desc_file):\n knobs = set()\n and_constraints = set()\n or_constraints = set()\n desc = open(desc_file, 'r')\n section = 0\n for line in desc:\n print(line)\n if CONT_SECTION in line:\n section = 0\n continue\n elif DISC_SECTION in line:\n section = 1\n continue\n elif DEP_SECTION in line:\n section = 2\n continue\n elif METRIC_SECTION in line:\n section = 3\n continue\n else:\n # section lines\n line = line.strip()\n col = line.split(' ')\n if section == 0: # continuous knobs\n setting_name = col[0]\n knob_name = setting_name + \"Num\"\n regex = re.compile('[0-9]+')\n setting_min, setting_max = regex.findall(col[1])\n ktype = col[2]\n knobs.add(Knob(knob_name, setting_name, setting_min, setting_max))\n elif section == 1: # discrete knobs\n setting_name = col[0]\n knob_name = setting_name + \"Num\"\n regex = re.compile('[0-9]+')\n vals = regex.findall(col[1])\n ktype = col[2]\n k = Knob(knob_name, setting_name, min(vals), max(vals))\n k.setValues(vals)\n knobs.add(k)\n elif section == 2: # deps\n dtype = col[0][0:-1].lower()\n sink = col[1]\n # sink\n regex = re.compile('[0-9]+')\n sink_vals = regex.findall(col[2])\n if len(sink_vals) == 2:\n # cont sink\n sink_min = sink_vals[0]\n sink_max = sink_vals[1]\n else:\n sink_min = sink_vals[0]\n sink_max = sink_vals[0]\n # source\n sources = \" \".join(col[4:])\n sources_col = sources.split(',')\n if len(sources_col) == 1:\n dtype = \"and\"\n for source_col in sources_col:\n source_col = source_col.strip()\n source = source_col.split(\" \")[0]\n if '{' in source_col.split(\" \")[1]:\n # discrete\n vals = regex.findall(source_col.split(\" \")[1])\n for val in vals:\n if dtype == \"and\":\n and_constraints.add(Constraint(dtype, source, sink, val, val,\n sink_min, sink_max))\n else:\n or_constraints.add(Constraint(dtype, source, sink, val, val,\n sink_min, sink_max))\n else:\n # continue\n vals = regex.findall(source_col.split(\" \")[1])\n if dtype == \"and\":\n and_constraints.add(Constraint(dtype, source, sink, vals[0], vals[1],\n sink_min, sink_max))\n else:\n or_constraints.add(Constraint(dtype, source, sink, vals[0], vals[1],\n sink_min, sink_max))\n\n return knobs, and_constraints, or_constraints\n","repo_name":"niuye8911/rapidlib-linux","sub_path":"modelConstr/Rapids/Parsing_Util/readFact.py","file_name":"readFact.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"73450786900","text":"import binascii\n\nclass Convert:\n\n @staticmethod\n def string_to_binary(string: str) -> str:\n bin_array = [bin(ord(x))[2:].zfill(8) for x in string]\n bin_str = \"\"\n i = 0\n while i < len(bin_array):\n bin_str += bin_array[int(i)]\n i += 1\n return bin_str\n\n @staticmethod\n def binary_to_string(bin_str: str) -> str:\n raw_str = binascii.unhexlify('%x' % (int('0b'+bin_str, 2)))\n display_str = (str(raw_str).lstrip('b').replace(\"'\", \"\").strip('\"'))\n return display_str\n\n @staticmethod\n def rgb_to_hex(r, g, b):\n return '#{:02x}{:02x}{:02x}'.format(r,g,b)\n\n @staticmethod\n def hex_to_rgb(hexcode: str):\n hexcode = hexcode.lstrip('#')\n rgb_values = tuple(int(hexcode[i:i+2], 16) for i in (0,2,4))\n return rgb_values[0], rgb_values[1], rgb_values[2]\n","repo_name":"Th3-F00L/Stegafun","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"14889650379","text":"import os\nfrom dotenv import load_dotenv\nfrom http.server import BaseHTTPRequestHandler\nfrom urllib.parse import urlparse\nfrom urllib.parse import parse_qs\nimport requests\nimport json\n\n# Environment variables\nload_dotenv()\nCOVALENT_API_KEY = os.getenv(\"COVALENT_API_KEY\")\n\nclass handler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n query = urlparse(self.path).query\n chainId = parse_qs(query)[\"chainId\"][0]\n contractId = parse_qs(query)[\"contractId\"][0]\n\n response = requests.get(f\"https://api.covalenthq.com/v1/{chainId}/address/{contractId}/transactions_v2/?page-size=250&key={COVALENT_API_KEY}\")\n transactions_v2 = response.json()[\"data\"][\"items\"]\n\n transactions = []\n for transaction in transactions_v2:\n date = transaction[\"block_signed_at\"].split(\"T\")[0].replace(\"2021-\", \"\").replace(\"-\", \"/\")\n\n if not any(d[\"date\"] == date for d in transactions):\n transactions.insert(0, {\"date\": date, \"transactions\": 1})\n else:\n ex_transaction_date = next(d for d in transactions if d[\"date\"] == date)\n ex_transaction_date[\"transactions\"] += 1\n \n self.wfile.write(json.dumps({\"transactions\": transactions}).encode())\n return","repo_name":"Seth-McKilla/covalent-dao","sub_path":"api/v1/get-transactions.py","file_name":"get-transactions.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"38873389185","text":"# Uses Heath Nutrition and Population statistics, avalaible at\n# http://datacatalog.worldbank.org, stored in the file HNP_Data.csv,\n# assumed to be stored in the working directory.\n# Prompts the user for an Indicator Name. If it exists and is associated with\n# a numerical value for some countries or categories, for some the years 1960-2015,\n# then finds out the maximum value, and outputs:\n# - that value;\n# - the years when that value was reached, from oldest to more recents years;\n# - for each such year, the countries or categories for which that value was reached,\n# listed in lexicographic order.\n# \n# Written by *** and Eric Martin for COMP9021\n\nimport sys\nimport os\nimport csv\n\ndef if_a_number(number):\n try:\n number = float(number)\n except ValueError:\n return False\n else:\n return True\n\nfilename = 'HNP_Data.csv'\nif not os.path.exists(filename):\n print('There is no file named {} in the working directory, giving up...'.format(filename))\n sys.exit()\n\nindicator_of_interest = input('Enter an Indicator Name: ')\n\nfirst_year = 1960\nnumber_of_years = 56\nmax_value = None\ncountries_for_max_value_per_year = {}\nhas_i_n = []\nbest_value = 0\nhas_best_value = []\n\nwith open(filename) as csvfile:\n for line in csvfile:\n if indicator_of_interest in line:\n has_i_n.append(line)\n for information in has_i_n:\n information_set = set(information.split(','))\n for something in information_set:\n if if_a_number(something):\n if eval(something) > best_value:\n best_value = eval(something) \n if not best_value == 0:\n max_value = best_value\n while has_i_n != []:\n pop_ = has_i_n.pop()\n if str(best_value) in pop_:\n has_best_value.append(pop_)\n \n for contents in has_best_value:\n n = 0\n if contents.find('\"') == 0:\n country = contents[1 : contents.find('\"', 1)]\n contents = contents.split('\"')\n contents = contents[-1].split(',')[2: ]\n while contents != []:\n pop_contents = contents.pop()\n if pop_contents == str(max_value):\n if 2015 - n in countries_for_max_value_per_year:\n if country not in countries_for_max_value_per_year[2015 - n]: \n countries_for_max_value_per_year[2015 - n].append(country)\n else:\n countries_for_max_value_per_year[2015 - n] = [country]\n n += 1\n \n elif ',' in indicator_of_interest:\n contents = contents.replace('\"' + indicator_of_interest + '\"' + ',', '')\n country, cc, ic, values = contents.split(',', 3)\n values = values.split(',')\n while values != []:\n pop_values = values.pop()\n if pop_values == str(max_value):\n if 2015 - n in countries_for_max_value_per_year:\n if country not in countries_for_max_value_per_year[2015 - n]: \n countries_for_max_value_per_year[2015 - n].append(country)\n else:\n countries_for_max_value_per_year[2015 - n] = [country]\n n += 1\n\n \n else:\n country, cc, i_n, ic, values = contents.split(',', 4)\n values = values.split(',')\n while values != []:\n pop_values = values.pop()\n if pop_values == str(max_value):\n if 2015 - n in countries_for_max_value_per_year:\n if country not in countries_for_max_value_per_year[2015 - n]: \n countries_for_max_value_per_year[2015 - n].append(country)\n else:\n countries_for_max_value_per_year[2015 - n] = [country]\n n += 1\n \nfor a_year in countries_for_max_value_per_year:\n countries_for_max_value_per_year[a_year].sort()\n \nif max_value == None:\n print('Sorry, either the indicator of interest does not exist or it has no data.')\nelse:\n print('The maximum value is:', max_value)\n print('It was reached in these years, for these countries or categories:')\n for year in sorted(countries_for_max_value_per_year):\n print(' {}: {}'.format(year, countries_for_max_value_per_year[year]))\n\n\n","repo_name":"goya1020/UNSW-Assignments","sub_path":"Other-Python-Related-Files/COMP9021/COMP9021_Quizzes/quiz_4.py","file_name":"quiz_4.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"16120193742","text":"\"\"\"\n========================================================================\n@file tree_traverse.py\n\nTraversal of a Binary Search Tree.\n\nBig O:\n Space Complexity:\n Time Complexity:\n\n: zach wolpe\n: zach.wolpe@medibio.com.au\n: 18 July 2023\n========================================================================\n\"\"\"\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n \nclass BinarySearchTree:\n def __init__(self) -> None:\n self.root = None\n \n def insert(self, value):\n node = Node(value)\n if self.root == None:\n self.root = node\n return self\n temp = self.root\n while True:\n if node.value == temp.value:\n return None\n if node.value < temp.value:\n if temp.left is None:\n temp.left = node\n return self\n temp = temp.left\n else:\n if temp.right is None:\n temp.right = node\n return self\n temp = temp.right\n \n def find(self, value):\n if self.root == None:\n return False\n temp = self.root\n while True:\n if temp == None:\n return False\n if temp.value == value:\n return True\n if value < temp.value:\n temp = temp.left\n else:\n temp = temp.right\n \n\n\nclass TreeTraversal(BinarySearchTree):\n def __init__(self) -> None:\n super().__init__()\n\n def BFS(self):\n \"\"\"\n Breadth First Search\n \"\"\"\n node = self.root\n queue = []\n visited = []\n queue.append(node)\n while len(queue) > 0:\n node = queue.pop(0)\n visited.append(node.value)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return visited\n \n @staticmethod\n def traverse(node, visited, method):\n if method == 'preorder': visited.append(node.value)\n if node.left:\n TreeTraversal.traverse(node.left, visited, method)\n if method == 'inorder': visited.append(node.value)\n if node.right:\n TreeTraversal.traverse(node.right, visited, method)\n if method == 'postorder': visited.append(node.value)\n return visited\n \n def DFS(self, method='preorder'):\n if method not in ['preorder', 'postorder', 'inorder']:\n raise Exception('Invalid method specified')\n return TreeTraversal.traverse(self.root, [], method)\n\n\nif __name__ == '__main__':\n tree = TreeTraversal()\n tree.insert(47)\\\n .insert(21)\\\n .insert(76)\\\n .insert(18)\\\n .insert(27)\\\n .insert(52)\\\n .insert(82)\n\n print(tree.BFS())\n print(tree.DFS('preorder'))\n print(tree.DFS('inorder'))\n print(tree.DFS('postorder'))\n\n\n","repo_name":"ZachWolpe/Data-Structures-and-Algorithms","sub_path":"Algorithms/python/tree_traverse.py","file_name":"tree_traverse.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27424345600","text":"from PIL import ImageColor\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom _profile.forms import LayoutForm, ProfileForm\nfrom _profile.models import Profile, Layout\nfrom blog.models import Post\nfrom project.forms import ProjectForm, ProjectItemsForm\nfrom project.models import Project, ProjectItem\nfrom resume.models import Resume\nfrom service.models import Service\nfrom skill.forms import SkillsForm\nfrom skill.models import Skill\nfrom testimonial.forms import TestimonialForm\nfrom testimonial.models import Testimonial\nfrom users.forms import ContactUserForm\nfrom users.models import User\n\nDEFAULT_REDIRECT_URL = settings.DEFAULT_REDIRECT_URL\n\n\ndef handler404(request, exception):\n response = render(request, '404.html', context={})\n response.status_code = 404\n return response\n\n\ndef handler500(request, exception):\n response = render(request, '500.html', context={})\n response.status_code = 500\n return response\n\ndef my_portfolio(request, username):\n print('the requsest get_raw_uri', request.get_raw_uri())\n print('this is the username', username)\n _username = User.objects.filter(username=username).first()\n if _username:\n project = Project.objects.filter(user__username=username)\n project_items = ProjectItem.objects.filter(user__username=username)\n profile = Profile.objects.filter(user__username=username).first()\n skills = Skill.objects.filter(user__username=username)\n testimonial = Testimonial.objects.filter(user__username=username)\n layout = Layout.objects.filter(user__username=username).first()\n service = Service.objects.filter(user__username=username)\n resume = Resume.objects.filter(user__username=username)\n post = Post.objects.filter(user__username=username)\n if post.count() >= 6:\n post = Post.objects.filter(user__username=username)\n print('this is the post', post)\n try:\n primary_color_nums = ImageColor.getrgb(layout.primary_color)\n secondary_color_nums = ImageColor.getrgb(layout.secondary_color)\n primary_color_1 = primary_color_nums[0]\n primary_color_2 = primary_color_nums[1]\n primary_color_3 = primary_color_nums[2]\n secondary_color_1 = secondary_color_nums[0]\n secondary_color_2 = secondary_color_nums[1]\n secondary_color_3 = secondary_color_nums[2]\n print('this is the project items', project_items)\n host_url = f\"{profile.user.username}{settings.PARENT_HOST}\",\n\n\n except Exception:\n primary_color_nums = None\n secondary_color_nums = None\n host_url = None\n primary_color_1 = None\n primary_color_2 = None\n primary_color_3 = None\n secondary_color_1 = None\n secondary_color_2 = None\n secondary_color_3 = None\n else:\n try:\n messages.warning(request, \"the site does not exist\")\n except:\n pass\n return HttpResponseRedirect(DEFAULT_REDIRECT_URL)\n print(f'{primary_color_1},{primary_color_2},{primary_color_3}')\n context = {\n 'project': project,\n 'ProjectItem': project_items,\n 'profile': profile,\n 'skills': skills,\n 'testimonial': testimonial,\n 'layout': layout,\n 'post': post,\n 'service': service,\n 'resume': resume,\n 'media_url': DEFAULT_REDIRECT_URL,\n 'host_url': host_url,\n 'primary_color_rgba': f'{primary_color_1},{primary_color_2},{primary_color_3}',\n 'secondary_color': {\n 'secondary_color_1': secondary_color_1,\n 'secondary_color_2': secondary_color_2,\n 'secondary_color_3': secondary_color_3,\n },\n\n 'contact_user_form': ContactUserForm(),\n 'user': 'user',\n 'layout_form': LayoutForm(),\n 'profile_form': ProfileForm(),\n 'testimonial_form': TestimonialForm(),\n 'skills_form': SkillsForm(),\n 'Project_form': ProjectForm(),\n 'Project_items_form': ProjectItemsForm(),\n 'domain': DEFAULT_REDIRECT_URL,\n\n }\n print(DEFAULT_REDIRECT_URL)\n if profile:\n try:\n return render(request,\n f'portfolio/{layout.portfolio_version.portfolio_version}/{layout.portfolio_version.portfolio_version}.html',\n context)\n except Exception as a:\n print(a)\n return render(request, 'portfolio/portfolio_v1/portfolio_v1.html', context)\n else:\n messages.warning(request, \"the site does not exist\")\n return HttpResponseRedirect(DEFAULT_REDIRECT_URL)\n\n\n","repo_name":"Codertjay-projects/dynamic-portfolio","sub_path":"portfolio_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"13096813976","text":"class Solution:\n def eventualSafeNodes(self, graph: List[List[int]]) -> List[int]:\n visited = [0 for x in range(len(graph))]\n state = [0 for x in range(len(graph))]\n for x in range(len(graph)):\n if len(graph[x]) == 0:\n state[x] = 1\n\n def dfs(graph, curr, visited, state):\n visited[curr] = 1\n for x in graph[curr]:\n if state[x] == 1:\n continue\n if state[x] == -1:\n state[curr] = -1\n continue\n if visited[x] == 1:\n state[x] = -1\n state[curr] = -1\n return\n dfs(graph, x, visited, state)\n for x in graph[curr]:\n if state[x] != 1:\n return\n state[curr] = 1\n\n for m in range(len(graph)):\n dfs(graph, m, visited, state)\n print(state)\n for z in range(len(state)):\n if state[z] == 1:\n state[z] = z\n else:\n state[z] = -1\n state = filter(lambda l: l != -1, state)\n print(state)\n return state\n","repo_name":"varshajayaraman/SheCodesInPython","sub_path":"src/M802_FindEventualSafeStates.py","file_name":"M802_FindEventualSafeStates.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"6356058647","text":"\"\"\" SciPy \n 著名的开源科学计算库,建立在NumPy之上,它增加的功能包括数值积分、最优化、统计和一些专用函数\n\"\"\"\n\"\"\" Summary \n .mat(MATLAB,Octave) \n 统计模块scipy.stats 离散分布 连续分布 统计检验\n 正态分布 偏度 峰度 正态性检验\n\"\"\"\n\nimport numpy as np\nfrom scipy import io\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\ncurrentDir = \"NumPy_Beginner_Guide/\"\n\ndef demo_mat():\n \"\"\" 保存和加载.mat文件 \"\"\"\n a = np.arange(7)\n io.savemat(currentDir+\"a.mat\", {\"array\":a})\n\n#demo_mat()\n\"\"\" What do\n 生成的mat文件是一个二进制文件,与MATLAB交互使用。\n\"\"\"\n\ndef demo_stats_normal():\n \"\"\" 按正态分布生成随机数,并分析绘图 \"\"\"\n # 按正太分布生成随机数\n generated = stats.norm.rvs(size=900)\n # 用正态分布去拟合生成的数据,得到其均值和标准差\n print(stats.norm.fit(generated))\n # 偏度(skewness)描述的是概率分布的偏斜(非对称)程度,做一个偏度检验,数据集是否\n # 服从正态分布,取值范围0-1\n print(stats.skewtest(generated)) #pvalue=0.9195164327603584 91% 服从正态分布\n # 峰度(kurtosis)是概率分布曲线的陡峭程度\n print(stats.kurtosistest(generated))\n # 正态性检验(normality test)检查数据集服从正态分布的程度。\n print(stats.normaltest(generated))\n # 得到数据所在的区段中某一百分比处的数值:\n print(stats.scoreatpercentile(generated, 95))\n # 反过来,从数值1出发找到对应的百分比\n print(stats.percentileofscore(generated, 1))\n\n plt.hist(generated)\n plt.show()\n\n#demo_stats_normal()\n\"\"\"what do\n 检验一个数据集是否服从正态分布,判断峰度、偏度、正态性\n\"\"\"\n\n","repo_name":"laoxian423/learn_notes","sub_path":"NumPy_Beginner_Guide/SciPy_study.py","file_name":"SciPy_study.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"43017622105","text":"def count_one(L, R, C, r, c, CS, RS) :\n East, South, West, North, a, b = 0, 0, 0, 0, 0, 0\n \n for Lst in CS :\n if Lst[0] <= c and Lst[0] + Lst[1] - 1 >= c :\n a = 1\n East = Lst[0] + Lst[1] - c\n West = c - Lst[0] + 1\n if West == Lst[1] : CS.remove(Lst)\n break \n\n if a == 0 :\n for e in range(c, C) :\n if L[r][e] == 1 :\n East += 1\n else : break\n \n West = 1\n if East > 1 : CS.append([c, East])\n \n for Lst in RS[c] :\n if Lst[0] <= r and Lst[0] + Lst[1] - 1 >= r :\n b = 1\n South = Lst[0] + Lst[1] - r\n North = r - Lst[0] + 1\n if North == Lst[1] : RS[c].remove(Lst)\n break \n \n if b == 0 :\n for s in range(r, R) :\n if L[s][c] == 1 :\n South += 1\n else : break\n \n North = 1\n if South > 1 : RS[c].append([r, South])\n \n return East, South, West, North\n \ndef count_LShape(a, b) :\n if a == 1 or b == 1 : return 0\n else :\n count = 0 \n count += min(b, a // 2) - 1\n count += min(a, b // 2) - 1\n return count \n \ndef count_total_LShape(A, B, C, D) :\n Count = 0\n Count += count_LShape(A, B)\n Count += count_LShape(B, C)\n Count += count_LShape(C, D)\n Count += count_LShape(D, A)\n return Count\n \nT = int(input())\ncount = []\n\nfor t in range(0, T) :\n count.append(0)\n\n R, C = input().split()\n R, C = int(R), int(C)\n \n List, Row, CS, RS = [], [], [], []\n \n for l in range(0, R) :\n Row = list(input().split())\n Row = list(map(int, Row))\n List.append(Row)\n Row = []\n \n for p in range(0, C) : RS.append([])\n \n for i in range(0, R) :\n for j in range(0, C) :\n if List[i][j] == 1 :\n E, S, W, N = count_one(List, R, C, i, j, CS, RS)\n count[t] += count_total_LShape(E, S, W, N)\n \nfor t in range(0, T) :\n print(\"Case #%d: %d\" % (t+1, count[t]))","repo_name":"Jenix8/baekjoon","sub_path":"심화문제/23000/L_shape.py","file_name":"L_shape.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"37652755875","text":"from math import prod, sqrt\n\ndef vec_dis(v):\n\tt = 0\n\tfor i in v:\n\t\tt += i*i\n\treturn sqrt(t)\n\ndef mat_dis(m):\n\tt = []\n\tfor i in m:\n\t\tt.append(vec_dis(i))\n\treturn vec_dis(t)\n\ndef data_improved(data, upper_lim):\n\ttmp = data\n\twhile (max(tmp) > upper_lim):\n\t\ttmp.remove(max(tmp))\n\treturn tmp\n\ndef align_list(l1, l2):\n\tt1 = l1\n\tt2 = l2\n\tif len(t1) < len(t2):\n\t\tt2 = t2[:len(t1)]\n\telse:\n\t\tt1 = t1[:len(t2)]\n\treturn t1, t2\n","repo_name":"YunHsiuLu/YunSideChannel","sub_path":"LWE_preTEST/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"10337097076","text":"\"\"\" This is the same code as example 1, however it uses unified memory\n\"\"\"\nimport numpy as np\nimport time\nimport cupy\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nfrom pycuda.compiler import SourceModule\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom scipy.signal import convolve2d\nfrom collections import namedtuple\nfrom string import Template\nimport py3nvml\npy3nvml.grab_gpus(1)\nimport os\nos.environ['CUDA_LAUNCH_BLOCKING'] = '1'\nStream = namedtuple('Stream', ['ptr'])\n\nCUDA_NUM_THREADS = 1024\n\n\nrowfilter_kernel = \"\"\"\nextern \"C\"\n__global__ void rowfilter(\nfloat* dest, const float* src, const float *w, int N, int C, int H, int W, int M) {\n/* dest - output array. should be same shape as input\n src - input array\n w - input kernel. Should be a 1d array\n N, C, H, W - input tensor sizes\n M - weight size\n zero_m - position of the 'zero' in the weight. As python doesn't support\n negative indexing.\n*/\n for (int i = blockIdx.x * blockDim.x + threadIdx.x;\n i < N*C*H*W; i += blockDim.x * gridDim.x) {\n const int n = i / C / H / W;\n const int c = (i / H / W) % C;\n const int y = (i / W) % H;\n const int x = i % W;\n\n // The stride of the input assuming correct padding - if the filter has\n // even length, we will pad M/2 on either side, so the stride will be\n // W + M. If it has odd length, we will padd (M-1)/2 either side, so\n // the stride will be W+M-1.\n const int S = W + M - (M % 2);\n float value = 0;\n#pragma unroll\n for (int kw = 0; kw < M; kw++) {\n const int offset = n*C*H*S + c*H*S + y*S + x + kw;\n value += w[M-1-kw]*src[offset];\n }\n dest[i] = value;\n }\n}\n\"\"\"\nrowfilter_pad_kernel2 = \"\"\"\nextern \"C\"\n__global__ void rowfilter_pad(float *dest, const float* src, const float* w,\n int N, int C, int H, int Wmin, int Wmax,\n int W_in, int Mlow, int Mhigh, int rev,\n int stride)\n/*\n We want to avoid costly padding operations, but instead just pull from x\n modulo. We also allow the output to be an arbitrary width, not necessarily\n going from 0 to W_in. For support outside [0, W_in) symmetric padding is\n used on the input.\n\n Wmin - low index of the output x (can be negative)\n Wmax - high index of the output x (can be larger than W_in)\n W_in - the width of the input.\n Mlow - low index of the filter (can be negative)\n Mhigh - high index of the filter\n*/\n{\n const int W = Wmax - Wmin + 1;\n for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);\n i < N*C*H*W; i += stride*(blockDim.x * gridDim.x)) {\n const int n = i / C / H / W;\n const int c = (i / H / W) % C;\n const int y = (i / W) % H;\n const int x = i % W;\n float value = 0;\n for (int k = Mlow; k <= Mhigh; k++) {\n int x_in = x + Wmin + k;\n\n // handle padding - the below complicated equation\n // simply makes sure that the correct index input is used\n // for symmetric padding. I.e. it should result in x_in going from:\n // -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9\n // to:\n // 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4\n // It also allows padding by more than the input length.\n // The group variable will be:\n // 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...\n const int group = x_in >= 0 ? (x_in / W_in) % 2 :\n 1 - (((-x_in-1)/W_in) % 2);\n const int res = (x_in % W_in + W_in) % W_in;\n x_in = (group == 1) ? (W_in-1) - res : res;\n\n const int offset = n*C*H*W_in + c*H*W_in + y*W_in + x_in;\n value += rev ? w[Mhigh-k] * src[offset] : w[k-Mlow] * src[offset];\n }\n dest[i / stride] = value;\n }\n}\n\"\"\"\nrowfilter_pad_kernel = \"\"\"\nextern \"C\"\n__global__ void rowfilter_pad(\n float* dest, const float* src, const float *w, int N, int C, int H, int W,\n int Win, int Mlow, int Mhigh, int rev, int stride) {\n/* dest - output array. should be same shape as input\n src - input array\n w - input kernel. Should be a 1d array\n N, C, H, W - input tensor sizes\n Mlow - idx of most negative filter tap\n Mhigh - idx of most positive filter tap\n rev - used for calculating gradients - need to do correlation, and\n some funny things with the filter.\n*/\n for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);\n i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {\n const int n = i / C / H / W;\n const int c = (i / H / W) % C;\n const int y = (i / W) % H;\n const int x = i % W;\n float value = 0;\n // Use convolution formula: y[n] = sum h[k]*x[n-k]\n#pragma unroll\n for (int k = Mlow; k <= Mhigh; k++) {\n int x_in = x - k;\n\n // handle padding - the above complicated equation\n // simply makes sure that the correct index input is used\n // for symmetric padding. I.e. it should result in x_in going from:\n // -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9\n // to:\n // 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4\n // It also allows padding by more than the input length.\n // The group variable will be:\n // 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...\n const int group = x_in >= 0 ? ((x_in / Win) % 2)\n : 1-(((-x_in-1)/Win) % 2);\n\n // This does modulo operation but allowing for negative numbers\n // i.e. we want -2 % 5 = 3. In python this works but in C we it\n // gives -2.\n // On top of reflecting the signal, we also need to reflect the\n // filter around the boundary (unlike with the forward pass).\n const int res = (x_in % Win + Win) % Win;\n x_in = (group == 1) ? (Win-1) - res : res;\n\n const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;\n if (rev) {\n value += w[Mhigh-k] * src[offset];\n } else {\n value += w[k-Mlow] * src[offset];\n }\n }\n dest[i/stride] = value;\n }\n}\n\"\"\"\n\nrowfilter_pad_kernel_bwd = \"\"\"\nextern \"C\"\n__global__ void rowfilter_pad_bwd(\n float* dest, const float* src, const float *w, int N, int C, int H, int W,\n int Win, int Mlow, int Mhigh, int stride) {\n/* dest - output array. should be same shape as input\n src - input array\n w - input kernel. Should be a 1d array\n N, C, H, W - input tensor sizes\n Mlow - idx of most negative filter tap\n Mhigh - idx of most positive filter tap\n rev - used for calculating gradients - need to do correlation, and\n some funny things with the filter.\n*/\n for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);\n i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {\n const int n = i / C / H / W;\n const int c = (i / H / W) % C;\n const int y = (i / W) % H;\n const int x = i % W;\n float value = 0;\n // Use correlation formula: y[n] = sum h[k]*x[n+k]\n#pragma unroll\n for (int k = Mlow; k <= Mhigh; k++) {\n int x_in = x + k;\n int k_in = (x_in < 0 || x_in >= Win) ? -k : k;\n\n // handle padding - the above complicated equation\n // simply makes sure that the correct index input is used\n // for symmetric padding. I.e. it should result in x_in going from:\n // -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9\n // to:\n // 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4\n // It also allows padding by more than the input length.\n // The group variable will be:\n // 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...\n const int group = x_in >= 0 ? ((x_in / Win) % 2)\n : 1-(((-x_in-1)/Win) % 2);\n\n // This does modulo operation but allowing for negative numbers\n // i.e. we want -2 % 5 = 3. In python this works but in C we it\n // gives -2.\n // On top of reflecting the signal, we also need to reflect the\n // filter around the boundary (unlike with the forward pass).\n const int res = (x_in % Win + Win) % Win;\n x_in = (group == 1) ? (Win-1) - res : res;\n\n const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;\n value += w[k_in - Mlow] * src[offset];\n }\n dest[i/stride] = value;\n }\n}\n\"\"\"\n\n@cupy.util.memoize(for_each_device=True)\ndef load_kernel(kernel_name, code, **kwargs):\n code = Template(code).substitute(**kwargs)\n kernel_code = cupy.cuda.compile_with_cache(code)\n return kernel_code.get_function(kernel_name)\n\ndef reflect(x, minx, maxx):\n \"\"\"Reflect the values in matrix *x* about the scalar values *minx* and\n *maxx*. Hence a vector *x* containing a long linearly increasing series is\n converted into a waveform which ramps linearly up and down between *minx*\n and *maxx*. If *x* contains integers and *minx* and *maxx* are (integers +\n 0.5), the ramps will have repeated max and min samples.\n\n .. codeauthor:: Rich Wareham , Aug 2013\n .. codeauthor:: Nick Kingsbury, Cambridge University, January 1999.\n\n \"\"\"\n x = np.asanyarray(x)\n rng = maxx - minx\n rng_by_2 = 2 * rng\n mod = np.fmod(x - minx, rng_by_2)\n normed_mod = np.where(mod < 0, mod + rng_by_2, mod)\n out = np.where(normed_mod >= rng, rng_by_2 - normed_mod, normed_mod) + minx\n return np.array(out, dtype=x.dtype)\n\n\nclass RowFilter(Function):\n\n def __init__(self):\n super(RowFilter, self).__init__()\n\n def forward(self, input, weight):\n assert input.dim() == 4 and input.is_cuda and weight.is_cuda\n n, ch, h, w = input.shape\n kh, kw = weight.shape\n m2 = kw // 2\n output = torch.zeros_like(input)\n xe = reflect(np.arange(-m2, w+m2, dtype='int32'), -0.5, w-0.5)\n input = input[:,:,:,xe]\n\n with torch.cuda.device_of(input):\n f = load_kernel('rowfilter', rowfilter_kernel)\n f(block=(CUDA_NUM_THREADS,1,1),\n grid=(128,1,1),\n args=[output.data_ptr(), input.data_ptr(), weight.data_ptr(),\n np.int32(n), np.int32(ch), np.int32(h), np.int32(w),\n np.int32(kw)],\n stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))\n return output\n\nclass RowFilter_pad(Function):\n\n def __init__(self, weight, klow, khigh):\n super(RowFilter_pad, self).__init__()\n self.weight = weight\n self.klow = klow\n self.khigh = khigh\n assert abs(klow) == khigh\n self.delay = (khigh + klow) / 2\n self.pad_end = 1 - ((khigh - 1 - klow) % 2)\n self.f = load_kernel('rowfilter_pad', rowfilter_pad_kernel)\n self.fbwd = load_kernel('rowfilter_pad_bwd', rowfilter_pad_kernel_bwd)\n\n # @staticmethod\n def forward(ctx, input):\n assert input.dim() == 4 and input.is_cuda and ctx.weight.is_cuda\n n, ch, h, w = input.shape\n ctx.in_shape = (n, ch, h, w)\n output = torch.zeros((n, ch, h, w + ctx.pad_end),\n dtype=torch.float32,\n requires_grad=input.requires_grad).cuda()\n\n with torch.cuda.device_of(input):\n ctx.f(block=(CUDA_NUM_THREADS,1,1),\n grid=(128,1,1),\n args=[output.data_ptr(), input.data_ptr(),\n ctx.weight.data_ptr(), np.int32(n), np.int32(ch),\n np.int32(h), np.int32(w+ctx.pad_end), np.int32(w),\n np.int32(ctx.klow), np.int32(ctx.khigh), np.int32(0),\n np.int32(1)],\n stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))\n return output\n\n # @staticmethod\n def backward(ctx, grad_out):\n in_shape = ctx.in_shape\n n, ch, h, w = grad_out.shape\n grad_input = torch.zeros(in_shape, dtype=torch.float32).cuda()\n with torch.cuda.device_of(grad_out):\n ctx.fbwd(block=(CUDA_NUM_THREADS,1,1),\n grid=(128,1,1),\n args=[grad_input.data_ptr(), grad_out.data_ptr(),\n ctx.weight.data_ptr(), np.int32(n), np.int32(ch),\n np.int32(h), np.int32(w-ctx.pad_end), np.int32(w),\n np.int32(ctx.klow), np.int32(ctx.khigh), np.int32(1)],\n stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))\n return grad_input\n\n\nif __name__ == '__main__':\n # It is important to use float32 as most GPUs still work on single precision\n # floating points. These arrays need 64 MiB of memory each.\n # a = np.random.randn(16, 3, 64, 64).astype('float32')\n w = np.random.randn(1, 11).astype('float32')\n\n # Define an input 'tensor' a and a rowfilter w.\n a = np.repeat(np.expand_dims(np.arange(10),axis=0), repeats=5,\n axis=0).astype('float32')\n a = np.repeat(np.expand_dims(a, axis=0), repeats=3, axis=0)\n a = np.repeat(np.expand_dims(a, axis=0), repeats=2, axis=0)\n # w = np.array([[1,2,3,4,5]]).astype('float32')\n\n # Push a to the gpu and make w a 4-dimensional tensor, ready for torch's\n # conv2d function (i.e. need to add some dimensions and transpose it)\n a_t = torch.tensor(a, requires_grad=True)\n a_t_gpu = a_t.cuda()\n w_t = np.reshape(w[:,::-1], [1, 1, *w.shape])\n w_t = np.repeat(w_t, repeats=3, axis=0)\n w_t = np.copy(w_t)\n w_t = torch.tensor(w_t, dtype=torch.float32).cuda()\n\n # Torch doesn't do symmetric padding, so do that manually by making a new\n # tensor that is larger\n m = w_t.shape[3]\n m2 = m // 2\n c = a.shape[3]\n xe = reflect(np.arange(-m2, c+m2, dtype='int32'), -0.5, c-0.5)\n\n # Now do torch's convolution\n # Run once to 'burn in'\n y_t = F.conv2d(a_t_gpu[:,:,:,xe], w_t, groups=3)\n start = time.time()\n for i in range(10):\n y_t = F.conv2d(a_t_gpu[:,:,:,xe], w_t, groups=3)\n print('Torch implementation took on avg (10 runs):\\t{}'.format((time.time()-start)/10))\n y_t.backward(torch.ones_like(y_t))\n grad1 = np.array(a_t.grad.data)\n a_t.grad.data.zero_()\n\n # In comparison, do the convolution directly with our above code.\n #\n # The first of these also manually extends a to be larger. The second does\n # the symmetric padding in the low-level cuda code and it turns out this is\n # faster.\n w_t2 = torch.tensor(w, dtype=torch.float32).cuda()\n mod = RowFilter()\n y_t2 = mod(a_t_gpu, w_t2)\n start = time.time()\n for i in range(10):\n y_t2 = mod(a_t_gpu, w_t2)\n print('My implementation took on avg (10 runs):\\t{}'.format((time.time()-start)/10))\n\n\n mod = RowFilter_pad(w_t2, -m2+(1-m%2), m2)\n y_t3 = mod(a_t_gpu)\n start = time.time()\n for i in range(10):\n y_t3 = mod(a_t_gpu)\n print('My implementation took on avg (10 runs):\\t{}'.format((time.time()-start)/10))\n y_t3.backward(torch.ones_like(y_t3))\n grad2 = np.array(a_t.grad.data)\n a_t.grad.data.zero_()\n\n y_fwd = mod(torch.ones_like(a_t_gpu))\n # np.testing.assert_array_almost_equal(y_t.data.detach().numpy(), y_t3.data.detach().numpy(), decimal=4)\n np.testing.assert_array_almost_equal(y_t.cpu().detach().numpy(), y_t3.cpu().detach().numpy(), decimal=4)\n np.testing.assert_array_almost_equal(grad1, grad2, decimal=4)\n from torch.autograd import gradcheck\n gradcheck(mod, (a_t_gpu,), eps=1e-2, atol=1e-3)\n","repo_name":"fbcotter/cuda_examples","sub_path":"pad_convolve/example6.py","file_name":"example6.py","file_ext":"py","file_size_in_byte":15719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"27398720658","text":"# 每天爬取一次每只股票的分时图\n\nfrom selenium import webdriver\nimport time\nimport json\nimport requests\nfrom scrapy.selector import Selector\nfrom requests import RequestException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.support import expected_conditions as EC\nimport re\nfrom PIL import Image\nimport threading\n\n\ndef extract_date(s):\n '''匹配字符串s中的日期,返回xxxx-xx-xx格式的日期'''\n m = re.search(r'(\\d{4})[-/年](\\d{1,2})[-/月](\\d{1,2})日?', s)\n return None if m is None else '-'.join(m.groups())\n\n\nstock_error = []\nstock_not_exist = []\nstock_id_array = []\ndriver = webdriver.PhantomJS(executable_path='C:\\python\\phantomjs-2.1.1-windows\\phantomjs.exe',\n service_args=['--ignore-ssl-errors=true', '--ssl-protocol=TLSv1'])\nstock_headers = {\n 'User-Agent': 'Mozilla/5.0(Windows NT 6.2; WOW64) AppleWebKit/'\n '537.36(KHTML, likeGecko)Chrome/'\n '65.0.3325.146Safari/537.36 ',\n}\ndriver.set_page_load_timeout(10)\ndriver.set_script_timeout(10)\nif_black = Image.open('if_black.png')\nif_white_blue1 = Image.open('if_white_blue1.png')\nif_white_blue2 = Image.open('if_white_blue2.png')\nfile = open('stock_code.json', 'r', encoding='utf-16')\nstock = json.load(file)\n\n\ndef if_stop(stock_id, a):\n while (1):\n try:\n html = requests.get('https://gupiao.baidu.com/stock/' + stock_id + '.html', headers=stock_headers,\n timeout=20)\n html.encoding = html.apparent_encoding\n r = Selector(text=html.text)\n except (RequestException, TimeoutException) as e:\n continue\n else:\n try:\n situation = r.xpath('//*[@id=\"app-wrap\"]/div[2]/div/h1/span/text()').extract()[0]\n except IndexError:\n continue\n else:\n if a == 2:\n stop = situation[:2]\n break\n elif a == 3:\n stop = situation[:3]\n break\n return stop\n\n\ndef get_stock_image(stock_id):\n try:\n driver.get('https://gupiao.baidu.com/stock/' + stock_id + '.html')\n except:\n print(stock_id + 'timeout')\n return 'error'\n else:\n driver.execute_script(\"\"\"\n (function () {\n var y = 0;\n var step = 100;\n window.scroll(0, 0);\n function f() {\n if (y < document.body.scrollHeight) {\n y += step;\n window.scroll(0, y);\n setTimeout(f, 50);\n } else {\n window.scroll(0, 0);\n document.title += \"scroll-done\";\n }\n }setTimeout(f, 1000);\n })();\n \"\"\")\n for i in range(30):\n if \"scroll-done\" in driver.title:\n break\n time.sleep(2)\n try:\n element = WebDriverWait(driver, 10).until(\n EC.presence_of_element_located((By.ID, \"d-chart\")))\n except:\n return 'error'\n try:\n situation = driver.find_element_by_xpath('//*[@id=\"app-wrap\"]/div[2]/div/h1/span').text\n except:\n try:\n driver.find_element_by_xpath('/html/body/div[2]/div/h2')\n except NoSuchElementException:\n return 'error'\n else:\n print(stock_id + '404')\n return 'error_404'\n else:\n if situation[:3] == '已退市' or situation[:3] == '未上市':\n if situation[:3] == if_stop(stock_id, 3):\n print(stock_id + situation[:3])\n if stock_id in stock_error:\n stock_error.remove(stock_id)\n return 'error_404'\n else:\n return 'error'\n if situation[:2] != if_stop(stock_id, 2):\n return 'error'\n print(stock_id + situation[:2])\n date = extract_date(situation)\n driver.save_screenshot('../../Public/daily_stock_img/' + stock_id[2:] + '_' + date + '.png')\n try:\n p = driver.find_element_by_id('d-chart')\n left = p.location['x']\n top = p.location['y']\n elementWidth = p.location['x'] + p.size['width']\n elementHeight = p.location['y'] + p.size['height']\n picture = Image.open('../../Public/daily_stock_img/' + stock_id[2:] + '_' + date + '.png')\n picture = picture.crop((left, top, elementWidth, elementHeight))\n picture = picture.crop((88, 5, 520, 230))\n picture.save('../../Public/daily_stock_img/' + stock_id[2:] + '_' + date + '.png')\n image = Image.open('../../Public/daily_stock_img/' + stock_id[2:] + '_' + date + '.png')\n except:\n return 'error'\n else:\n for i in range(3):\n if if_black == image:\n print(stock_id + 'black_same')\n stop = 'error'\n break\n elif if_white_blue1 == image and situation[:2] != '停牌':\n print(stock_id + 'white_blue_same')\n stop = 'error'\n break\n elif if_white_blue2 == image and situation[:2] != '停牌':\n print(stock_id + 'white_blue_same')\n stop = 'error'\n break\n else:\n if image == if_white_blue1 and situation[:2] != '停牌':\n print(stock_id + 'white_blue_same')\n stop = 'error'\n break\n else:\n image.save('../../Public/daily_stock_img/' + stock_id[2:] + '_' + date + '.png')\n print(stock_id + 'success')\n time.sleep(1)\n stop = 'true'\n return stop\n\n\ndef get_true_image(stock, a, b):\n print('start_thread:' + str(a))\n i = a\n while i < b:\n if_success = get_stock_image(stock[i])\n if if_success == 'error':\n if stock[i] not in stock_error:\n stock_error.append(stock[i])\n elif if_success == 'error_404':\n stock_not_exist.append(stock[i])\n i += 1\n elif if_success == 'true':\n if stock[i] in stock_error:\n stock_error.remove(stock[i])\n stock_id_array.append(stock[i])\n i += 1\n i = 0\n while i < len(stock_error):\n if_success = get_stock_image(stock_error[i])\n if if_success == 'error':\n continue\n elif if_success == 'error_404':\n stock_not_exist.append(stock_error[i])\n i += 1\n elif if_success == 'true':\n stock_id_array.append(stock_error[i])\n i += 1\n print(len(stock_not_exist) + len(stock_id_array))\n\n\nthreads = []\nt1 = threading.Thread(target=get_true_image, args=(stock, 0, 400))\nthreads.append(t1)\nt2 = threading.Thread(target=get_true_image, args=(stock, 400, 800))\nthreads.append(t2)\nt3 = threading.Thread(target=get_true_image, args=(stock, 800, 1200))\nthreads.append(t3)\nt4 = threading.Thread(target=get_true_image, args=(stock, 1200, 1600))\nthreads.append(t4)\nt5 = threading.Thread(target=get_true_image, args=(stock, 1600, 2000))\nthreads.append(t5)\nt6 = threading.Thread(target=get_true_image, args=(stock, 2000, 2400))\nthreads.append(t6)\nt7 = threading.Thread(target=get_true_image, args=(stock, 2400, len(stock)))\nthreads.append(t7)\n\nif __name__ == '__main__':\n print(len(stock))\n start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n print(start_time)\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n driver.quit()\n","repo_name":"chocoford/Stockphet-Server-Side","sub_path":"Resource/exc/get_image.py","file_name":"get_image.py","file_ext":"py","file_size_in_byte":8136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"9183918367","text":"import cv2\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\nimport joblib\n\n\ndef dilate_erode(img, k1, i1, k2, i2):\n kernel = np.ones(k1)\n ans = cv2.erode(img, kernel, iterations=i1)\n kernel = np.ones(k2)\n ans = cv2.dilate(ans, kernel, iterations=i2)\n return ans\n\n\ndef erode_dilate(img, k1, i1, k2, i2):\n kernel = np.ones(k2)\n ans = cv2.dilate(img, kernel, iterations=i2)\n kernel = np.ones(k1)\n ans = cv2.erode(ans, kernel, iterations=i1)\n return ans\n\n\n\nimg_ = cv2.imread('../outputs/placa.png',1)\nh, w, d = img_.shape\nnn_numeros = joblib.load('../model/saved/nn_numbers.joblib')\nnn_letras = joblib.load('../model/saved/nn_letters.joblib')\nsc_numeros = joblib.load('../model/saved/scaler_numbers.joblib')\nsc_letras = joblib.load('../model/saved/scaler_letters.joblib')\nletter_dig = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',\n 18:'S', 19:'T', 20:'U', 21:'V', 22:'W', 23: 'X', 24:'Y', 25:'Z'}\n\n\ncopy = np.reshape(img_, (h*w, d))\n\nkm = KMeans(n_clusters=2)\ny_pred = km.fit_predict(copy)\nif ((y_pred==0).sum()<(y_pred==1).sum()):\n y_pred = 1-y_pred\n\n\narr = np.zeros((h,w,d), dtype=np.uint8)\narr[:,:,0] = y_pred.reshape((h,w))*255\narr[:,:,1] = y_pred.reshape((h,w))*255\narr[:,:,2] = y_pred.reshape((h,w))*255\n\narr = dilate_erode(arr, (2,1),1,(2,1),1)\n#arr = erode_dilate(arr, (1,2),1,(1,2),1)\n\nimg_cont = arr[:,:,0].astype(np.uint8)\n#print(img_cont, type(img_cont[0,0]))\ncv2.imshow('cluster',arr)\ncv2.waitKey(0)\ncnt, hie = cv2.findContours(img_cont, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)[-2:]\n\ncharacter_array = []\n\nfor i, cont in enumerate(cnt):\n x,y,wc,hc=cv2.boundingRect(cont)\n u1 = (w*h)/25\n u2 = (w/12)\n u3 = (h/3)\n u4 = (w*h/2)\n if (wc*hc > u1) and (wc>u2) and (hc>u3) and (wc*hc 0:\n len_t -= 1\n need[item] -= 1\n if len_t == 0:\n while True:\n item = s[i]\n if need[item] == 0:\n break\n need[item] += 1\n i += 1\n if j - i < result[1] - result[0]:\n result = (i, j)\n need[s[i]] += 1\n len_t += 1\n i += 1\n return \" \" if result[1] > len(s) else s[result[0]: result[1]+1]\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.minWindow(\"adfasdfsafsagagassa\", \"ssg\"))\n","repo_name":"xinleiw/leetcode","sub_path":"difficulty/min_window.py","file_name":"min_window.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"16140664554","text":"#Program 10\n\n'''\nWrite a program to display those strings which are starting with \"A\" or \"a\" of a given string of list\n\nName: Vikhyat Jagini\nClass: 12\nDate of Execution: 6/7/2021\n\n'''\n\ndef chk(l):\n for i in l:\n if(i[0] in \"Aa\"):\n print(i)\n\nprint(\"Enter a list of strings to see which elements start with A or a:\")\nl=eval(input(\"Enter a list:\"))\nchk(l)\n","repo_name":"Vikhyat04/Grade-12-Python-programs","sub_path":"LabProgram_10_6.7.21.py","file_name":"LabProgram_10_6.7.21.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19598304566","text":"import json\nimport logging\nimport re\nfrom typing import Any, List, Optional\n\nfrom qtpy import QtCore, QtDesigner, QtWidgets\n\nfrom ..utilities import copy_to_clipboard, get_clipboard_text\nfrom ..utilities.macro import parse_macro_string\n\nlogger = logging.getLogger(__name__)\n\n\ndef update_property_for_widget(widget: QtWidgets.QWidget, name: str, value):\n \"\"\"Update a Property for the given widget in the designer.\"\"\"\n formWindow = QtDesigner.QDesignerFormWindowInterface.findFormWindow(widget)\n logger.info(\"Updating %s.%s = %s\", widget.objectName(), name, value)\n if formWindow:\n formWindow.cursor().setProperty(name, value)\n else:\n setattr(widget, name, value)\n\n\nclass DictionaryTable(QtWidgets.QTableWidget):\n def __init__(self, dictionary=None, *args, parent=None, **kwargs):\n super().__init__(*args, parent=parent, **kwargs)\n\n self.setColumnCount(2)\n self.setMinimumSize(300, 150)\n self.setHorizontalHeaderLabels([\"Key\", \"Value\"])\n\n self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(self._context_menu)\n self.dictionary = dictionary\n\n def _context_menu(self, pos):\n self.menu = QtWidgets.QMenu(self)\n item = self.itemAt(pos)\n if item is not None:\n\n def copy(*_):\n copy_to_clipboard(item.text())\n\n copy_action = self.menu.addAction(f\"&Copy: {item.text()}\")\n copy_action.triggered.connect(copy)\n\n clipboard_text = get_clipboard_text() or \"\"\n\n def paste(*_):\n item.setText(clipboard_text)\n\n paste_action = self.menu.addAction(f\"&Paste: {clipboard_text[:100]}\")\n paste_action.triggered.connect(paste)\n\n def delete_row(*_):\n self.removeRow(item.row())\n\n delete_row_action = self.menu.addAction(\"&Delete row...\")\n delete_row_action.triggered.connect(delete_row)\n\n self.menu.addSeparator()\n\n def add_row(*_):\n row = self.rowCount()\n self.setRowCount(row + 1)\n self.setItem(row, 0, QtWidgets.QTableWidgetItem(\"\"))\n self.setItem(row, 1, QtWidgets.QTableWidgetItem(\"\"))\n\n add_row_action = self.menu.addAction(\"&Add row...\")\n add_row_action.triggered.connect(add_row)\n self.menu.exec_(self.mapToGlobal(pos))\n\n @property\n def dictionary(self) -> dict:\n items = [(self.item(row, 0), self.item(row, 1)) for row in range(self.rowCount())]\n key_value_pairs = [(key.text() if key else \"\", value.text() if value else \"\") for key, value in items]\n return {key.strip(): value for key, value in key_value_pairs}\n\n @dictionary.setter\n def dictionary(self, dct):\n dct = dct or {}\n self.setRowCount(len(dct))\n for row, (key, value) in enumerate(dct.items()):\n self.setItem(row, 0, QtWidgets.QTableWidgetItem(key))\n self.setItem(row, 1, QtWidgets.QTableWidgetItem(value))\n\n self.resizeColumnsToContents()\n self.resizeRowsToContents()\n\n\nclass StringListTable(QtWidgets.QTableWidget):\n def __init__(self, values=None, *args, parent=None, **kwargs):\n super().__init__(*args, parent=parent, **kwargs)\n\n self.setColumnCount(1)\n self.setMinimumSize(300, 150)\n self.setHorizontalHeaderLabels([\"Value\"])\n\n self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(self._context_menu)\n self.values\n\n # we let user swap points by dragging one on-top of the other\n self.setDragEnabled(True)\n self.setAcceptDrops(True)\n self.drag_source_row = None\n\n def startDrag(self, event):\n self.drag_source_row = self.currentRow()\n # call super() since we use the default dragging functionality\n super().startDrag(event)\n\n def dropEvent(self, event):\n # don't call super() here, functionality messes with our swapping!\n source_row = self.drag_source_row\n target_row = self.indexAt(event.pos()).row()\n if target_row >= 0 and source_row != target_row:\n self.swap_rows(source_row, target_row)\n\n def swap_rows(self, row1, row2):\n item1 = self.takeItem(row1, 0)\n item2 = self.takeItem(row2, 0)\n self.setItem(row1, 0, item2)\n self.setItem(row2, 0, item1)\n\n def _context_menu(self, pos):\n self.menu = QtWidgets.QMenu(self)\n item = self.itemAt(pos)\n if item is not None:\n\n def copy(*_):\n copy_to_clipboard(item.text())\n\n copy_action = self.menu.addAction(f\"&Copy: {item.text()}\")\n copy_action.triggered.connect(copy)\n\n clipboard_text = get_clipboard_text()\n\n def paste(*_):\n item.setText(clipboard_text)\n\n paste_action = self.menu.addAction(f\"&Paste: {clipboard_text}\")\n paste_action.triggered.connect(paste)\n\n def delete_row(*_):\n self.removeRow(item.row())\n\n delete_row_action = self.menu.addAction(\"&Delete row...\")\n delete_row_action.triggered.connect(delete_row)\n\n self.menu.addSeparator()\n\n def add_row(*_):\n row = self.rowCount()\n self.setRowCount(row + 1)\n self.setItem(row, 0, QtWidgets.QTableWidgetItem(\"\"))\n\n add_row_action = self.menu.addAction(\"&Add row...\")\n add_row_action.triggered.connect(add_row)\n self.menu.exec_(self.mapToGlobal(pos))\n\n @property\n def values(self) -> list:\n items = [self.item(row, 0) for row in range(self.rowCount())]\n return [item.text().strip() for item in items if item is not None]\n\n @values.setter\n def values(self, values):\n values = values or []\n self.setRowCount(len(values))\n for row, value in enumerate(values):\n self.setItem(row, 0, QtWidgets.QTableWidgetItem(str(value)))\n\n self.resizeColumnsToContents()\n self.resizeRowsToContents()\n\n\nclass _PropertyHelper:\n def __init__(self, *args, property_widget, property_name, **kwargs):\n super().__init__(*args, **kwargs)\n self._property_name = property_name\n self._property_widget = property_widget\n\n value = None\n try:\n value = self.value_from_widget\n self.set_value_from_widget(\n widget=self._property_widget,\n attr=self._property_name,\n value=value,\n )\n except Exception:\n logger.exception(\n \"Failed to set helper widget %s state from %s=%s\",\n type(self).__name__,\n self._property_name,\n value,\n )\n\n def set_value_from_widget(self, widget, attr, value):\n \"\"\"For subclasses.\"\"\"\n ...\n\n @property\n def value_from_widget(self):\n return getattr(self._property_widget, self._property_name, None)\n\n @property\n def saved_value(self) -> Optional[Any]:\n raise None\n\n def save_settings(self):\n value = self.saved_value\n if value is not None:\n update_property_for_widget(self._property_widget, self._property_name, value)\n\n\nclass PropertyRuleEditor(_PropertyHelper, QtWidgets.QPushButton):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setAutoDefault(False)\n self.setDefault(False)\n self.clicked.connect(self._open_rules_editor)\n self.setText(\"&Rules...\")\n\n def _open_rules_editor(self):\n from .rules_editor import RulesEditor\n\n self._rules_editor = RulesEditor(self._property_widget, parent=self)\n self._rules_editor.exec_()\n\n @property\n def saved_value(self) -> Optional[str]:\n return None\n\n\nclass PropertyCheckbox(_PropertyHelper, QtWidgets.QCheckBox):\n def set_value_from_widget(self, widget, attr, value):\n self.setChecked(bool(value))\n\n @property\n def saved_value(self) -> bool:\n return self.isChecked()\n\n\nclass PropertyLineEdit(_PropertyHelper, QtWidgets.QLineEdit):\n def set_value_from_widget(self, widget, attr, value):\n self.setText(value or \"\")\n\n @property\n def saved_value(self) -> Optional[str]:\n return self.text().strip()\n\n\nclass PropertyIntSpinBox(_PropertyHelper, QtWidgets.QSpinBox):\n def set_value_from_widget(self, widget, attr, value):\n self.setValue(value)\n\n @property\n def saved_value(self) -> int:\n return self.value()\n\n\nclass PropertyMacroTable(_PropertyHelper, DictionaryTable):\n def set_value_from_widget(self, widget, attr, value):\n try:\n macros = parse_macro_string(value or \"\")\n except Exception:\n logger.exception(\"Failed to parse macro string: %r\", value)\n else:\n self.dictionary = macros\n\n @property\n def saved_value(self) -> Optional[str]:\n return json.dumps(self.dictionary)\n\n\nclass PropertyStringList(_PropertyHelper, StringListTable):\n def set_value_from_widget(self, widget, attr, value):\n self.values = value\n\n @property\n def saved_value(self) -> Optional[List[str]]:\n return self.values\n\n\ndef get_qt_properties(cls):\n \"\"\"Yields all QMetaProperty instances from a given class.\"\"\"\n meta_obj = cls.staticMetaObject\n for prop_idx in range(meta_obj.propertyCount()):\n prop = meta_obj.property(prop_idx)\n if prop is not None and prop.isDesignable():\n yield prop.name()\n\n\ndef get_helper_label_text(attr: str) -> str:\n spaced = re.sub(\"(PyDM|.)([A-Z])\", r\"\\1 \\2\", attr)\n return spaced.strip().capitalize()\n\n\nclass BasicSettingsEditor(QtWidgets.QDialog):\n \"\"\"\n QDialog for user-friendly editing of essential PyDM properties in Designer.\n\n Parameters\n ----------\n widget : PyDMWidget\n The widget which we want to edit.\n \"\"\"\n\n _common_attributes_ = {\n \"channel\": PropertyLineEdit,\n \"display\": PropertyLineEdit,\n \"macros\": PropertyMacroTable,\n \"filenames\": PropertyStringList,\n \"rules\": PropertyRuleEditor,\n }\n\n _type_to_widget_ = {\n str: PropertyLineEdit,\n int: PropertyIntSpinBox,\n bool: PropertyCheckbox,\n \"QStringList\": PropertyStringList,\n }\n\n def __init__(self, widget, parent=None):\n super(BasicSettingsEditor, self).__init__(parent)\n\n self.widget = widget\n\n # PV names can be pretty wide...\n self.setMinimumSize(400, 150)\n\n self.setSizePolicy(\n QtWidgets.QSizePolicy.MinimumExpanding,\n QtWidgets.QSizePolicy.MinimumExpanding,\n )\n\n self.property_widgets = []\n self.setup_ui()\n\n def setup_ui(self):\n \"\"\"\n Create the required UI elements for the form.\n\n Returns\n -------\n None\n \"\"\"\n self.setWindowTitle(\"PyDM Widget Basic Settings Editor\")\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.setContentsMargins(5, 5, 5, 5)\n vlayout.setSpacing(5)\n self.setLayout(vlayout)\n\n settings_form = QtWidgets.QFormLayout()\n settings_form.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)\n vlayout.addLayout(settings_form)\n\n for helper_widget in self._create_helper_widgets(settings_form):\n self.property_widgets.append(helper_widget)\n\n buttons_layout = QtWidgets.QHBoxLayout()\n self.save_btn = QtWidgets.QPushButton(\"&Save\", parent=self)\n self.save_btn.setAutoDefault(True)\n self.save_btn.setDefault(True)\n self.save_btn.clicked.connect(self.save_changes)\n self.update_btn = QtWidgets.QPushButton(\"&Update\", parent=self)\n self.update_btn.clicked.connect(self.save_changes)\n cancel_btn = QtWidgets.QPushButton(\"&Cancel\", parent=self)\n cancel_btn.clicked.connect(self.cancel_changes)\n buttons_layout.addStretch()\n buttons_layout.addWidget(cancel_btn)\n buttons_layout.addWidget(self.update_btn)\n buttons_layout.addWidget(self.save_btn)\n\n vlayout.addLayout(buttons_layout)\n\n def _create_helper_widgets(self, settings_form: QtWidgets.QFormLayout):\n other_attrs = []\n for attr in sorted(get_qt_properties(type(self.widget))):\n if attr not in self._common_attributes_ and attr not in other_attrs:\n other_attrs.append(attr)\n\n for attr in list(self._common_attributes_) + other_attrs:\n prop = getattr(type(self.widget), attr, None)\n if prop is None:\n continue\n\n prop_type = getattr(prop, \"type\", None)\n helper_widget_cls = self._common_attributes_.get(attr, self._type_to_widget_.get(prop_type, None))\n if helper_widget_cls is not None:\n helper_widget = helper_widget_cls(\n property_widget=self.widget,\n property_name=attr,\n )\n label_text = get_helper_label_text(attr)\n settings_form.addRow(f\"&{label_text}\", helper_widget)\n yield helper_widget\n\n @QtCore.Slot()\n def save_changes(self):\n \"\"\"Save the new settings on the widget properties.\"\"\"\n for helper in self.property_widgets:\n try:\n helper.save_settings()\n except Exception:\n logger.exception(\n \"Failed to save settings for %s.%s = %r\",\n self.widget.objectName(),\n helper._property_name,\n helper.saved_value,\n )\n\n if self.sender() == self.save_btn:\n self.accept()\n\n @QtCore.Slot()\n def cancel_changes(self):\n \"\"\"Abort the changes and close the dialog.\"\"\"\n self.close()\n","repo_name":"slaclab/pydm","sub_path":"pydm/widgets/designer_settings.py","file_name":"designer_settings.py","file_ext":"py","file_size_in_byte":13803,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"12"} +{"seq_id":"69852433941","text":"#import libraries\r\nimport serial\r\nimport matplotlib.pyplot as plt, mpld3\r\nimport numpy as np\r\nimport time\r\nfrom threading import Thread\r\nimport http.server\r\nimport socketserver\r\nfrom datetime import datetime\r\nfrom matplotlib.dates import DateFormatter\r\n\r\ndef update_line(l, ax, x, y, max_len):\r\n # Plot updating function\r\n if x.size > max_len:\r\n x = x[-max_len:-1]\r\n y = y[-max_len:-1]\r\n\r\n l.set_ydata(y)\r\n l.set_xdata(x)\r\n ax.relim()\r\n ax.autoscale_view()\r\n\r\ndef plot_data():\r\n #initialize serial connection \r\n s = serial.Serial('COM6',9600)\r\n\r\n # Initialize empty plots\r\n plt.ion()\r\n fig = plt.figure()\r\n axt1 = fig.add_subplot(221)\r\n axt2 = fig.add_subplot(222)\r\n axh1 = fig.add_subplot(223)\r\n axh2 = fig.add_subplot(224)\r\n\r\n t1, = axt1.plot([],[],'r-')\r\n t2, = axt2.plot([],[],'r-')\r\n h1, = axh1.plot([],[],'b-')\r\n h2, = axh2.plot([],[],'b-')\r\n\r\n # Add plot labels\r\n axt1.set_ylabel(\"Temperature [C]\")\r\n axh1.set_ylabel(\"Humidity [%]\")\r\n axh1.set_xlabel(\"Seconds Ago\")\r\n axh2.set_xlabel(\"Seconds Ago\")\r\n # Initialize 'time'\r\n t = np.array([0])\r\n\r\n # Initialize Data Arrays\r\n temp1 = np.array([])\r\n temp2 = np.array([])\r\n humidity1 = np.array([])\r\n humidity2 = np.array([])\r\n soilMoisture = np.array([])\r\n while True:\r\n\r\n #read data from serial port\r\n res_b = s.readline()\r\n res = res_b.decode(\"utf-8\")\r\n\r\n # split string into arrays of data\r\n rawData = res.split(' ')\r\n temp1 = np.append(temp1, float(rawData[0]))\r\n humidity1 = np.append(humidity1,float(rawData[1]))\r\n temp2 = np.append(temp2,float(rawData[2]))\r\n humidity2 = np.append(humidity2,float(rawData[3]))\r\n\r\n # update 'time'\r\n t_label = np.append(t, datetime.now())\r\n t = np.append(t, t[-1]+1)\r\n\r\n # update plots\r\n max_plot_len = 50\r\n update_line(t1,axt1,t[0:-1],temp1,max_plot_len)\r\n update_line(t2,axt2,t[0:-1],temp2,max_plot_len)\r\n update_line(h1,axh1,t[0:-1],humidity1,max_plot_len)\r\n update_line(h2,axh2,t[0:-1],humidity2,max_plot_len)\r\n\r\n #plt.draw()\r\n plt.savefig('data.png')\r\n plt.pause(1)\r\n #mpld3.save_html(fig,'test.html')\r\n #f=open(\"test.html\", \"w\")\r\n #f.write(\"\\n\")\r\n #f.write(\"\\\"some\")\r\n #f.close()\r\n #time.sleep(1)\r\n\r\ndef web_server():\r\n #set up web server\r\n PORT = 8000\r\n\r\n Handler = http.server.SimpleHTTPRequestHandler\r\n\r\n with socketserver.TCPServer((\"\", PORT), Handler) as httpd:\r\n print(\"serving at port\", PORT)\r\n httpd.serve_forever()\r\n\r\nif __name__ == \"__main__\":\r\n Thread(target = plot_data).start()\r\n Thread(target = web_server).start()","repo_name":"jdhuun/spacebucket","sub_path":"readSerial.py","file_name":"readSerial.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"5392427843","text":"# Third-party\nfrom django.db.models.deletion import Collector\n\n# First-party/Local\nfrom controlpanel.api import cluster\nfrom controlpanel.api.models import App, AppS3Bucket, S3Bucket, User, UserS3Bucket\nfrom controlpanel.api.models.access_to_s3bucket import AccessToS3Bucket\nfrom controlpanel.api.tasks.handlers.base import BaseModelTaskHandler, BaseTaskHandler\n\n\nclass CreateS3Bucket(BaseModelTaskHandler):\n model = S3Bucket\n name = \"create_s3bucket\"\n\n def handle(self, bucket_owner=None):\n bucket_owner = bucket_owner or \"USER\"\n self.object.cluster.create(owner=bucket_owner)\n self.complete()\n\n\nclass GrantAppS3BucketAccess(BaseModelTaskHandler):\n model = AppS3Bucket\n name = 'grant_app_s3bucket_access'\n\n def handle(self):\n cluster.App(self.object.app).grant_bucket_access(\n self.object.s3bucket.arn,\n self.object.access_level,\n self.object.resources,\n )\n self.complete()\n\n\nclass GrantUserS3BucketAccess(BaseModelTaskHandler):\n model = UserS3Bucket\n name = \"grant_user_s3bucket_access\"\n\n def handle(self):\n if self.object.s3bucket.is_folder:\n cluster.User(self.object.user).grant_folder_access(\n root_folder_path=self.object.s3bucket.name,\n access_level=self.object.access_level,\n paths=self.object.paths,\n )\n else:\n cluster.User(self.object.user).grant_bucket_access(\n bucket_arn=self.object.s3bucket.arn,\n access_level=self.object.access_level,\n path_arns=self.object.resources,\n )\n self.complete()\n\n\nclass S3BucketRevokeUserAccess(BaseTaskHandler):\n name = \"revoke_user_s3bucket_access\"\n\n def handle(self, bucket_identifier, bucket_user_pk, is_folder):\n bucket_user = User.objects.get(pk=bucket_user_pk)\n if is_folder:\n cluster.User(bucket_user).revoke_folder_access(bucket_identifier)\n else:\n cluster.User(bucket_user).revoke_bucket_access(bucket_identifier)\n self.complete()\n\n\nclass S3BucketRevokeAppAccess(BaseTaskHandler):\n name = \"revoke_app_s3bucket_access\"\n\n def handle(self, bucket_arn, app_pk):\n try:\n app = App.objects.get(pk=app_pk)\n except App.DoesNotExist:\n # if the app doesnt exist, nothing to revoke, so mark completed\n self.complete()\n cluster.App(app).revoke_bucket_access(bucket_arn)\n self.complete()\n\n\nclass S3BucketRevokeAllAccess(BaseModelTaskHandler):\n model = S3Bucket\n name = \"s3bucket_revoke_all_access\"\n\n def handle(self, *args, **kwargs):\n \"\"\"\n When an S3Bucket is soft-deleted, the related objects that handle access will\n remain in place. In order to keep IAM roles updated, this task collects objects\n that would have been deleted by a cascade, and revokes access to deleted bucket\n \"\"\"\n task_user = User.objects.filter(pk=self.task_user_pk).first()\n collector = Collector(using=\"default\")\n collector.collect([self.object])\n for model, instance in collector.instances_with_model():\n if not issubclass(model, AccessToS3Bucket):\n continue\n\n instance.current_user = task_user\n instance.revoke_bucket_access()\n\n self.complete()\n","repo_name":"ministryofjustice/analytics-platform-control-panel-public","sub_path":"controlpanel/api/tasks/handlers/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"1351192507","text":"def checkNum(n):\n \"\"\"Nhap vao mot so trong doan [-89,90] va kiem tra no\"\"\"\n while True:\n if n<-89 or n>90:\n a = int(input('Nhap lai n = '))\n n = a\n else:\n return True\n\nprint(checkNum.__doc__)\nb = int(input('n = '))\nprint(checkNum(b))","repo_name":"TranLuan2608/pythonProject","sub_path":"Lab1_51.py","file_name":"Lab1_51.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"29875678042","text":"from PyQt5.QtWidgets import QWidget, QGridLayout\n\nclass BaseFrame(QWidget):\n def __init__(self):\n super(BaseFrame, self).__init__()\n self.setWindowTitle(\"Who Wants to be a programmer???\")\n self.setFixedWidth(1000)\n self.setStyleSheet(\"background: #161219;\")\n self.setLayout(QGridLayout())\n\n\n\n","repo_name":"SpinalSubset69/TriviaGame","sub_path":"base_frame.py","file_name":"base_frame.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"35259274652","text":"import requests\nfrom datetime import datetime\nimport os\n\nSHEETY_TOKEN = os.environ[\"SHEETY_TOKEN\"]\nAPP_ID = os.environ[\"APP_ID\"]\nAPI_KEY = os.environ[\"API_KEY\"]\nBASE_URL = os.environ[\"BASE_URL\"]\nSHEETY_URL = os.environ[\"SHEETY_URL\"]\nGENDER = \"male\"\nWEIGHT_KG = \"60\"\nHEIGHT_CM = \"167\"\nAGE = \"22\"\n\nauth_data = {\n \"x-app-id\": APP_ID,\n \"x-app-key\": API_KEY,\n \"x-remote-user-id\": \"0\"\n}\n\nexercise = input(\"Tell me which exercise you did:\")\nquery_data = {\n \"query\": exercise,\n \"gender\": GENDER,\n \"weight_kg\": WEIGHT_KG,\n \"height_cm\": HEIGHT_CM,\n \"age\": AGE\n}\nresponse = requests.post(url=f\"{BASE_URL}/natural/exercise\", json=query_data, headers=auth_data)\nexercise_data = response.json()[\"exercises\"]\n\n\ndate = datetime.now()\nformat_date = date.strftime(\"%d/%m/%Y\")\nformat_time = date.strftime(\"%H:%M:%S\")\nheaders = {\"Authorization\": f\"Bearer {SHEETY_TOKEN}\"}\nfor exercise in exercise_data:\n workout = {\n \"workout\":{\n \"date\": format_date,\n \"time\": format_time,\n \"exercise\": exercise[\"name\"].title(),\n \"duration\": exercise[\"duration_min\"],\n \"calories\": exercise[\"nf_calories\"]\n }\n }\n print(workout)\n response = requests.post(url=SHEETY_URL, json=workout,headers=headers)\n response.raise_for_status()\n print(response.json())\n","repo_name":"shay-cohen/100-Days-Of-Code-Python-Bootcamp","sub_path":"day 38 - Workout Tracking with Google Sheets/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6578147737","text":"import unittest\nfrom unittest.mock import patch, MagicMock\nimport ssl\n\nclass TestSSLContext(unittest.TestCase):\n @patch('ssl.SSLContext')\n def test_create_ssl_context(self, mock_ssl_context):\n from your_module import create_ssl_context # replace 'your_module' with the name of your module\n\n mock_context = MagicMock()\n mock_ssl_context.return_value = mock_context\n\n result = create_ssl_context()\n\n mock_ssl_context.assert_called_once_with(ssl.PROTOCOL_TLSv1_2)\n self.assertEqual(result, mock_context)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"Kamel773/LLM-code-refine","sub_path":"data/PythonSecurityEval/164/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"1196579290","text":"import numpy as np\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nimport math\nfrom ecdfEstimate import *\nfrom scipy.optimize import least_squares\nimport pandas as pd\nimport timeit\nfrom extentForPlot import *\nfrom scipy.io import loadmat\nimport seaborn as sns\n\nclass fit2:\n def __init__(self,path,parameter,combined,outputpath):\n self.path=path\n self.parameterpath=parameter\n \n ### parameters used\n filecontent=np.load(self.parameterpath)\n Polym_speed=filecontent['Polym_speed']\n TaillePreMarq=filecontent['TaillePreMarq']\n TailleSeqMarq=filecontent['TailleSeqMarq']\n TaillePostMarq=filecontent['TaillePostMarq']\n EspaceInterPolyMin=filecontent['EspaceInterPolyMin']\n FrameLen=filecontent['FrameLen']\n Intensity_for_1_Polym=filecontent['Intensity_for_1_Polym']\n FreqEchImg=filecontent['FreqEchImg']\n DureeSignal=filecontent['DureeSignal']\n \n FreqEchSimu = 1/(EspaceInterPolyMin/Polym_speed) # how many interval(possible poly start position) in 1s\n self.FreqEchSimu =FreqEchSimu \n \n ####### parameters for the plots\n fsz=16 #figure size\n lw = 2\n msz=10\n \n ## function needed to set the the parameters for the color map\n # cm_jet= plt.cm.get_cmap('jet') # set the colormap to jet array\n \n DataFilePath0 = outputpath+'python_Results2'\n if os.path.exists(DataFilePath0):\n shutil.rmtree(DataFilePath0, ignore_errors = True)\n\n os.mkdir(DataFilePath0)\n\n xlsfilename = DataFilePath0 + '/fit2_results.xls'\n\n # Setting the Names of the Data output in the excel sheet\n xls_cont = pd.DataFrame({'Data': [],'k1p': [], 'k1m': [], 'k2': [],'p1': [], 'p2': [],\n 'l1':[],'l2':[],'A1':[],'A2':[], 'Obj': [],'KS test':[],'Nuclei': [],'Frames': []}) \n\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter(xlsfilename, engine='xlsxwriter')\n\n # Convert the dataframe to an XlsxWriter Excel object.\n xls_cont.to_excel(writer, sheet_name='Sheet1', index=False)\n \n ## loading the result of the deconvolution\n NPZFilePath = path; \n file_name_list = np.array(os.listdir(NPZFilePath)) # list of the data\n self.file_name_list=file_name_list\n nexp = len(file_name_list) # length of the list\n nfiles=nexp\n \n print(file_name_list)\n #######################################################################\n pooled =combined; #### if this is 1, pool all the result files from NPZfilePath\n ############### if not, use each file separately########################\n \n if not pooled:\n nfiles=nexp\n else:\n nfiles=1\n \n ### starting the fit for each file\n for ifile in range(nfiles):\n\n \n if pooled: \n \n ### first compute max dimension\n nmax=1\n nmaxpos=1\n\n for iii in range(nexp):\n fname=file_name_list[iii]\n ffname=NPZFilePath+fname\n if '.npz' in ffname:\n fnameContent=np.load(ffname)\n else:\n fnameContent=loadmat(ffname)\n DataPred = fnameContent['DataPred']\n DataExp=fnameContent['DataExp']\n Fit = fnameContent['Fit']\n PosPred=fnameContent['PosPred']\n n2 =DataExp.shape\n n3= PosPred.shape\n \n if n2[0] >nmax:\n nmax = n2[0]\n \n if n3[0]> nmaxpos:\n nmaxpos = n3[0]\n \n #### lump files, Procustes method \n dataExp=np.empty((nmax, 0), int)\n dataPred=np.empty((nmax,0), int)\n posPred= np.empty((nmaxpos,0), int)\n tmax = np.empty((0, n2[1]), int)\n\n \n \n for iii in range(nexp): \n fname=file_name_list[iii]\n ffname=NPZFilePath+fname\n if '.npz' in ffname:\n fnameContent=np.load(ffname)\n else:\n fnameContent=loadmat(ffname)\n DataPred = fnameContent['DataPred']\n DataExp=fnameContent['DataExp']\n Fit = fnameContent['Fit']\n PosPred=fnameContent['PosPred']\n n2 =DataExp.shape\n n3= PosPred.shape\n \n DataExp=np.append(DataExp,np.zeros((nmax-n2[0],n2[1])),axis=0)\n DataPred=np.append(DataPred,np.zeros((nmax-n2[0],n2[1])),axis=0)\n PosPred=np.append(PosPred,np.zeros((nmaxpos-n3[0],n3[1])),axis=0)\n \n # we are adding all the data from different files together\n dataExp = np.append(dataExp, DataExp, axis=1) \n dataPred = np.append(dataPred, DataPred, axis=1)\n posPred = np.append(posPred, PosPred, axis=1)\n\n tmax=np.append(tmax, n2[0]/FreqEchImg*np.ones(n2[1]))\n \n \n DataExp = dataExp.copy()\n DataPred=dataPred.copy()\n PosPred = posPred.copy()\n else:\n\n fname = file_name_list[ifile]\n #### full path file name\n ffname = NPZFilePath+ fname\n if '.npz' in ffname:\n fnameContent=np.load(ffname)\n else:\n fnameContent=loadmat(ffname)\n DataPred = fnameContent['DataPred']\n DataExp=fnameContent['DataExp']\n Fit = fnameContent['Fit']\n PosPred=fnameContent['PosPred']\n n2=DataExp.shape\n tmax=n2[0]/FreqEchImg*np.ones(n2[1]) #### movie length, the same for all nuclei in a data sets \n \n ### extract short name from result file name\n iend=fname.index('.npz')\n name=fname[5:iend] \n self.name=name\n \n ### where to write figure files \n # dirwrite = DataFilePath0+'/'+name+'_result'\n # if os.path.exists(dirwrite):\n # shutil.rmtree(dirwrite, ignore_errors = True)\n\n # os.mkdir(dirwrite)\n\n n = DataExp.shape\n nexp = n[1]\n \n ## parameters\n DureeSimu = n[0]*FrameLen #in s\n frame_num = n[0]\n DureeAnalysee = DureeSignal + DureeSimu # (s)\n num_possible_poly = round(DureeAnalysee/(EspaceInterPolyMin/Polym_speed))\n \n \n MIntensity = np.array([])\n T0 = np.array([])\n \n ## find first hit which is 1/5th of the max intensity\n for data_i in range(nexp):\n \n # ifig= math.floor((data_i)/36)+1\n \n max_intensity=max(DataPred[:,data_i])\n MIntensity=np.append(MIntensity,max_intensity)\n \n ihit=np.where(DataPred[:,data_i]> max_intensity/5)[0]\n \n if len(ihit)== 0:\n ihit=n[0]\n t0o1 = (ihit)/FreqEchImg \n else:\n ihit=min(np.where(DataPred[:,data_i]> max_intensity/5)[0])\n t0o1 = (ihit+1)/FreqEchImg \n \n t0=t0o1 \n T0 = np.append(T0, t0) #stores the first hit for each nuclei\n \n \n # h = plt.figure(ifig+ ifile)#, figsize=[10,12] \n # plt.subplots_adjust(hspace=1,wspace=1)\n # plt.subplot(6,6,(data_i%36+1))\n # plt.fill_between(np.array([t0/60, tmax[data_i]/60]), np.array([150,150]), facecolors =np.array([0.9, 0.9, 0.9])) #this what is really analyzed\n # plt.plot(np.arange(0, frame_num)/FreqEchImg/60, DataExp[:,data_i].T, color = 'k', linewidth = 0.1)\n # plt.plot(np.arange(0, frame_num)/FreqEchImg/60, DataPred[:,data_i].T, color = 'r', linewidth = 0.1)\n # plt.xlim(0,40)\n # plt.ylim(0, 100)\n # plt.xticks(fontsize=5)\n # plt.yticks(fontsize=5)\n # sns.despine()\n # if data_i%36==35 or (data_i+1)==nexp:\n # figfile=dirwrite+'/figure'+str(ifig)+'.pdf'\n # h.savefig(figfile)\n # plt.close()\n \n ### Figure showing Data Signal Prediction\n # h=plt.figure(40)\n # sz= DataPred.shape\n # Y_normal = np.arange(1,sz[1]+1)\n # Y=Y_normal[::-1]\n # X = np.arange(0, sz[0])/FreqEchImg/60\n # plt.imshow(DataPred.T, cmap=cm_jet, extent=extentForPlot(X).result + extentForPlot(Y).result, aspect='auto', origin='upper')\n # plt.xlabel('Time [min]', fontsize=12)\n # plt.ylabel('Transcription site', fontsize=12)\n # cb= plt.colorbar()\n # cb.ax.tick_params(labelsize=fsz)\n # figfile=dirwrite+'/DataPred_'+name+'.pdf'\n # h.savefig(figfile, dpi=800) \n # plt.close()\n\n # ### Figure showing Data Signal Experimental\n # h = plt.figure(50)\n # plt.imshow(DataExp.T, cmap=cm_jet, extent=extentForPlot(X).result + extentForPlot(Y).result, aspect='auto', origin='upper')\n # plt.xlabel('Time [min]', fontsize=12)\n # plt.ylabel('Transcription site', fontsize=12)\n # plt.colorbar()\n # figfile=dirwrite+'/DataExp_'+name+'.pdf'\n # h.savefig(figfile, dpi=800) \n # plt.close()\n\n # ### Figure showing Data Position Prediction\n # h=plt.figure(60)\n # Y_normal=np.arange(1, len(PosPred[0])+1)\n # Y=Y_normal[::-1]\n # X=np.arange(0,len(PosPred))*EspaceInterPolyMin/Polym_speed/60 -(TaillePreMarq+TailleSeqMarq+TaillePostMarq)/Polym_speed/60 ### time\n # plt.imshow(PosPred.T, cmap='gray', extent=extentForPlot(X).result + extentForPlot(Y).result, aspect='auto', origin='upper')\n # plt.xlabel('Time [min]', fontsize=12)\n # plt.ylabel('Transcription site', fontsize=12)\n # figfile=dirwrite+'/PosPred'+name+'.pdf'\n # h.savefig(figfile, dpi=800) \n # plt.close()\n \n ### compute distribution of spacings\n nn=PosPred.shape\n\n dt=np.array([])\n dtc=np.array([])\n \n # figfile=dirwrite +'/PosPred'+name+'.txt' ### text file for pol positions \n # fid = open(figfile,'w+')\n\n \n for i in range(nn[1]): #for all cells\n times = (np.where(PosPred[:,i]==1)[0]+1) / FreqEchSimu -(TaillePreMarq+TailleSeqMarq+TaillePostMarq)/Polym_speed \n # fid.writelines([' \\n'+ str(times/60)])\n\n if len(times) !=0:\n dtimes = np.diff(times)\n\n ### find first index larger than T0\n firstind=np.where(times > T0[i] - (TaillePreMarq+TailleSeqMarq+TaillePostMarq)/Polym_speed )[0]\n \n if len(firstind)==0:\n istart=0\n else:\n istart= min(firstind)\n dt=np.append(dt, dtimes[istart:])\n if tmax[i]-times[-1]>0:\n dtc = np.append(dtc, tmax[i]-times[-1]) ### the very last time\n\n # fid.close()\n\n #Store: define matrices that store parameters and objective functions \n # for each iteration of the least_square_function\n # to have better results of least square we ran the function for 100 iterations with random initial values for each iteration\n store = np.empty((0,5))\n\n if len(dt)!=0: \n for cens in range(2):\n if cens:\n xs,fs,flo,fup =ecdf_bounds(np.append(dt,dtc),np.append( np.zeros(len(dt)), np.ones(len(dtc)) ) )\n else:\n \n xs,fs,flo,fup =ecdf_bounds(dt )\n\n ## fit distribution of spacings using combination of two exponentials\n\n xs = xs[:-1]\n fs = fs[:-1]\n flo = flo[:-1]\n fup = fup[:-1]\n\n ##############\n\n sN = np.sqrt(len(xs))\n\n def exp_fitness(k): #objective function\n return np.abs(np.log(k[2]*np.exp(k[0]*xs)+(1-k[2])*np.exp(k[1]*xs) ) -np.log(1-fs)) /sN # k: parameters\n\n k00 = np.array([-0.01, -0.001, 1])\n amp = np.array([np.log(100), np.log(100)])\n NbIterationinFit = 100\n\n test=0\n for mc in range(NbIterationinFit):\n print('iteration nbr ',mc)\n while test==0: #test is just to re-do the iteration until we encounter no error\n ## change k00 which is the initial value\n factor = np.exp(amp* (2* np.random.uniform(size=2)-1))\n k0 = k00.copy()\n k0[:2] = k0[0:2]*factor\n k0[2]=2*np.random.uniform(size=1)-1\n\n ## sort k0(1:2)\n k0[0:2] = np.sort(k0[0:2])\n\n ## impose constraints\n if not (k0[0]*k0[2]+k0[1]*(1-k0[2])<0):\n while not (k0[0]*k0[2]+k0[1]*(1-k0[2])<0):\n k0[2] = 2*np.random.uniform(size=1)-1 # A1, A2 value\n\n # Use the fcn lsqnonlin which is the least square function\n\n try:\n k = least_squares(exp_fitness, k0,bounds=(-np.inf,[0,0,np.inf]) ,ftol = (1e-8),max_nfev= 1e6, xtol= (1e-10)).x\n obj = sum(exp_fitness(k)**2)\n test = 1\n except:\n pass\n test=0\n\n # write down results\n\n\n ## sort k\n A = np.array([k[2],1-k[2]]) # A values before sorting \n kk = np.sort(k[0:2])\n IX = np.argsort(k[0:2])\n k[:2]= kk\n A=A[IX]\n k[2]=A[0]\n k_obj=np.append(k,np.array([obj]))\n to_store=np.append(k_obj,cens)\n to_store=to_store.reshape(1,len(to_store))\n store = np.append(store,to_store,axis=0)\n\n \n # select optimal \n \n # ind is index of the least square with real numbers\n ind = np.where(np.max(np.abs(store[:,0:3].imag ) ,axis=1) <1e-10)[0]\n # objmin is minimun of the above results (least square value of ind)\n objmin = np.min(store[ind,3])\n\n #ind is the index of the minimum value of the least square with real value\n indmin=np.argmin(store[ind,3])\n imin = ind[indmin]\n\n #overflow help us set the Uncertainty interval\n overflow = 1\n\n # ind index where the least square are real and less than < (1+overflow)*objmin)\n ind = np.where( (store[:,3] < (1+overflow)*objmin) & (np.max(np.abs(store[:,0:3].imag ),axis=1) <1e-10)) [0]\n ksel = store[ind,0:3].real\n kmin = store[imin,0:3].real\n censmin=store[imin,4].real\n\n if censmin:\n xs,fs,flo,fup =ecdf_bounds(np.append(dt,dtc),np.append( np.zeros(len(dt)), np.ones(len(dtc)) ) )\n else:\n xs,fs,flo,fup =ecdf_bounds(np.append(dt,dtc))\n\n\n # ## Survival function\n # h = plt.figure(70)\n # plt.semilogy(xs, 1-fs, 'o', color='r', mfc = 'none', markersize=9, linestyle='') #empirical function\n # plt.semilogy(xs, 1-flo, '--r') # lower confidence\n # plt.semilogy(xs, 1-fup, '--r') # upper confidence\n pred = kmin[2]*np.exp(kmin[0]*xs)+(1-kmin[2])*np.exp(kmin[1]*xs)\n # plt.semilogy(xs, pred, 'k', linewidth = 3) # predicted 2 exp\n # plt.xlim(0,250)\n # plt.ylim(1e-6, 1)\n # plt.xlabel('Time [s]',fontsize=fsz)\n # plt.ylabel('Survival function',fontsize = fsz)\n\n # compute 3 rates k1p,m k2 from the 3 parameters\n l1 = kmin[0]\n l2 = kmin[1]\n A1 = kmin[2]\n A2 = 1-A1\n \n S1 = kmin[2]*l1+(A2)*l2\n k2 = -S1\n S2 = A1*(l1)**2+(A2)*(l2)**2\n S3 = A1*(l1)**3+(A2)*(l2)**3\n k1m = S1-S2/S1\n k1p = (S3*S1-S2**2)/S1/(S1**2-S2)\n # plt.title('k_1^-='+str(\"%.1g\" % k1m)+'k_1^+='+str(\"%.1g\" % k1p)+'k_2='+str(\"%.1g\" % k2))\n\n # figfile = dirwrite+'/Fit_'+name+'.pdf'\n # h.savefig(figfile)\n # plt.close()\n\n\n ####### KS test\n\n thr= 20\n ind = np.where(xs>thr)[0]\n dist = np.max(np.abs( fs[ind] -1+pred[ind]))\n nsample=len(dt[dt>thr])\n N=10\n c=math.sqrt(nsample)*dist\n r=np.arange(1,N+1)\n aa=2*sum(np.exp(-2*c**2*r**2)*((-1)**(r-1)))\n \n # h=plt.figure(80)\n # plt.plot(xs,fs,'kx')\n # plt.plot(xs,1-pred,'ro')\n # plt.xlabel('Time [s]',fontsize=fsz)\n # plt.ylabel('CDF',fontsize=fsz)\n # figfile=dirwrite+'/Fit_CDF_'+name+'.pdf'\n # h.savefig(figfile)\n # plt.close()\n # ##########################\n \n ## optimal\n res = np.array([k1p,k1m,k2,k1m/(k1m+k1p),k1p/(k1m+k1p),l1,l2,A1,A2,objmin,aa,len(DataExp[0]),len(DataExp)])\n\n\n # compute interval\n l1=ksel[:,0] \n l1sel=l1.copy()\n l2=ksel[:,1] \n l2sel=l2.copy()\n A1=ksel[:,2] \n A1sel=A1.copy()\n A2=1-A1\n A2sel=A2.copy()\n \n S1 = A1*l1+(A2)*l2\n K2 = -S1\n S2 = A1*l1**2+(A2)*l2**2\n S3 = A1*l1**3+(A2)*l2**3\n K1m = S1-S2/S1\n K1p = (S3*S1-S2**2)/S1/(S1**2-S2)\n #################\n\n\n\n P1=K1m/(K1m+K1p)\n P2=K1p/(K1m+K1p)\n\n resl= np.array([np.min(K1p),np.min(K1m),np.min(K2),np.min(P1),np.min(P2),np.min(l1sel),np.min(l2sel),np.min(A1sel),np.min(A2sel)])\n resl[0:5] = np.max(np.vstack([resl[0:5], np.zeros(5)]), axis=0)\n\n\n resh= np.array([np.max(K1p),np.max(K1m),np.max(K2),np.max(P1),np.max(P2),np.max(l1sel),np.max(l2sel),np.max(A1sel),np.max(A2sel)])\n\n df1 = pd.DataFrame([res.tolist(), #best result\n resl.tolist(), # low \n resh.tolist()]) # high\n\n\n df1.to_excel(writer,sheet_name='Sheet1', startrow=4*(ifile+1)-3, startcol=1, header=False, index=False)\n\n df2 = pd.DataFrame([name.replace('result_','')]) #filename\n df2.to_excel(writer,sheet_name='Sheet1', startrow=4*(1+ifile)-3, startcol=0, header=False, index=False)\n\n\n else:\n df2 = pd.DataFrame([name.replace('result_','')]) #filename\n df2.to_excel(writer,sheet_name='Sheet1', startrow=4*(1+ifile)-3, startcol=1, header=False, index=False)\n res = [0, 0, 0, 0, 0, 0, len(DataExp[0]), len(DataExp)]\n df1 = pd.DataFrame(res)\n df1.to_excel(writer,sheet_name='Sheet1', startrow=4*(ifile+1)-3, startcol=2, header=False, index=False)\n writer.save()\n\n","repo_name":"oradules/BurstDECONV","sub_path":"BurstDECONV_Python/utilities/drosoFit2_KS_test.py","file_name":"drosoFit2_KS_test.py","file_ext":"py","file_size_in_byte":20563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"30637031605","text":"import tkinter as tk\r\nimport json\r\nfrom tkinter import Frame,Label,Entry,Button,Radiobutton,ttk,VERTICAL,YES,BOTH,END,Tk,W,StringVar,messagebox\r\nfrom Mobil import *\r\nclass FrmMobil:\r\n \r\n def __init__(self, parent, title):\r\n self.parent = parent \r\n self.parent.geometry(\"420x445\")\r\n self.parent.title(title)\r\n self.parent.protocol(\"WM_DELETE_WINDOW\", self.onKeluar)\r\n self.ditemukan = None\r\n self.aturKomponen()\r\n self.onReload()\r\n \r\n def aturKomponen(self):\r\n mainFrame = Frame(self.parent, bd=10)\r\n mainFrame.pack(fill=BOTH, expand=YES)\r\n Label(mainFrame, text='PLAT:').grid(row=0, column=0,\r\n sticky=W, padx=5, pady=5)\r\n Label(mainFrame, text='MERK:').grid(row=1, column=0,\r\n sticky=W, padx=5, pady=5)\r\n Label(mainFrame, text='JENIS:').grid(row=2, column=0,\r\n sticky=W, padx=5, pady=5)\r\n Label(mainFrame, text='WARNA:').grid(row=3, column=0,\r\n sticky=W, padx=5, pady=5)\r\n Label(mainFrame, text='HARGASEWA:').grid(row=4, column=0,\r\n sticky=W, padx=5, pady=5)\r\n # Textbox\r\n self.txtPlat = Entry(mainFrame) \r\n self.txtPlat.grid(row=0, column=1, padx=5, pady=5)\r\n self.txtPlat.bind(\"\",self.onCari) # menambahkan event Enter key\r\n # Textbox\r\n self.txtMerk = Entry(mainFrame) \r\n self.txtMerk.grid(row=1, column=1, padx=5, pady=5)\r\n # Textbox\r\n self.txtJenis = Entry(mainFrame) \r\n self.txtJenis.grid(row=2, column=1, padx=5, pady=5)\r\n # Textbox\r\n self.txtWarna = Entry(mainFrame) \r\n self.txtWarna.grid(row=3, column=1, padx=5, pady=5)\r\n # Textbox\r\n self.txtHargasewa = Entry(mainFrame) \r\n self.txtHargasewa.grid(row=4, column=1, padx=5, pady=5)\r\n # Button\r\n self.btnSimpan = Button(mainFrame, text='Simpan', command=self.onSimpan, width=10)\r\n self.btnSimpan.grid(row=0, column=3, padx=5, pady=5)\r\n self.btnClear = Button(mainFrame, text='Clear', command=self.onClear, width=10)\r\n self.btnClear.grid(row=1, column=3, padx=5, pady=5)\r\n self.btnHapus = Button(mainFrame, text='Hapus', command=self.onDelete, width=10)\r\n self.btnHapus.grid(row=2, column=3, padx=5, pady=5)\r\n # define columns\r\n columns = ('id','plat','merk','jenis','warna','hargasewa')\r\n self.tree = ttk.Treeview(mainFrame, columns=columns, show='headings')\r\n # define headings\r\n self.tree.heading('id', text='ID')\r\n self.tree.column('id', width=\"30\")\r\n self.tree.heading('plat', text='PLAT')\r\n self.tree.column('plat', width=\"70\")\r\n self.tree.heading('merk', text='MERK')\r\n self.tree.column('merk', width=\"70\")\r\n self.tree.heading('jenis', text='JENIS')\r\n self.tree.column('jenis', width=\"70\")\r\n self.tree.heading('warna', text='WARNA')\r\n self.tree.column('warna', width=\"70\")\r\n self.tree.heading('hargasewa', text='HARGASEWA')\r\n self.tree.column('hargasewa', width=\"90\")\r\n # set tree position\r\n self.tree.place(x=0, y=200)\r\n \r\n def onClear(self, event=None):\r\n self.txtPlat.delete(0,END)\r\n self.txtPlat.insert(END,\"\")\r\n self.txtMerk.delete(0,END)\r\n self.txtMerk.insert(END,\"\")\r\n self.txtJenis.delete(0,END)\r\n self.txtJenis.insert(END,\"\")\r\n self.txtWarna.delete(0,END)\r\n self.txtWarna.insert(END,\"\")\r\n self.txtHargasewa.delete(0,END)\r\n self.txtHargasewa.insert(END,\"\")\r\n self.btnSimpan.config(text=\"Simpan\")\r\n self.onReload()\r\n self.ditemukan = False\r\n \r\n def onReload(self, event=None):\r\n # get data mobil\r\n obj = Mobil()\r\n result = obj.get_all()\r\n parsed_data = json.loads(result)\r\n for item in self.tree.get_children():\r\n self.tree.delete(item)\r\n \r\n for i, d in enumerate(parsed_data):\r\n self.tree.insert(\"\", i, text=\"Item {}\".format(i), values=(d[\"id\"],d[\"plat\"],d[\"merk\"],d[\"jenis\"],d[\"warna\"],d[\"hargasewa\"]))\r\n def onCari(self, event=None):\r\n plat = self.txtPlat.get()\r\n obj = Mobil()\r\n a = obj.get_by_plat(plat)\r\n if(len(a)>0):\r\n self.TampilkanData()\r\n self.ditemukan = True\r\n else:\r\n self.ditemukan = False\r\n messagebox.showinfo(\"showinfo\", \"Data Tidak Ditemukan\")\r\n def TampilkanData(self, event=None):\r\n plat = self.txtPlat.get()\r\n obj = Mobil()\r\n res = obj.get_by_plat(plat)\r\n self.txtPlat.delete(0,END)\r\n self.txtPlat.insert(END,obj.plat)\r\n self.txtMerk.delete(0,END)\r\n self.txtMerk.insert(END,obj.merk)\r\n self.txtJenis.delete(0,END)\r\n self.txtJenis.insert(END,obj.jenis)\r\n self.txtWarna.delete(0,END)\r\n self.txtWarna.insert(END,obj.warna)\r\n self.txtHargasewa.delete(0,END)\r\n self.txtHargasewa.insert(END,obj.hargasewa)\r\n self.btnSimpan.config(text=\"Update\")\r\n \r\n def onSimpan(self, event=None):\r\n # get the data from input\r\n plat = self.txtPlat.get()\r\n merk = self.txtMerk.get()\r\n jenis = self.txtJenis.get()\r\n warna = self.txtWarna.get()\r\n hargasewa = self.txtHargasewa.get()\r\n # create new Object\r\n obj = Mobil()\r\n obj.plat = plat\r\n obj.merk = merk\r\n obj.jenis = jenis\r\n obj.warna = warna\r\n obj.hargasewa = hargasewa\r\n if(self.ditemukan==False):\r\n # save the record\r\n res = obj.simpan()\r\n else:\r\n # update the record\r\n res = obj.update_by_plat(plat)\r\n # read data in json format\r\n data = json.loads(res)\r\n status = data[\"status\"]\r\n msg = data[\"message\"]\r\n # display json data into messagebox\r\n messagebox.showinfo(\"showinfo\", status+', '+msg)\r\n #clear the form input\r\n self.onClear()\r\n def onDelete(self, event=None):\r\n plat = self.txtPlat.get()\r\n obj = Mobil()\r\n obj.plat = plat\r\n if(self.ditemukan==True):\r\n res = obj.delete_by_plat(plat)\r\n else:\r\n messagebox.showinfo(\"showinfo\", \"Data harus ditemukan dulu sebelum dihapus\")\r\n \r\n # read data in json format\r\n data = json.loads(res)\r\n status = data[\"status\"]\r\n msg = data[\"message\"]\r\n \r\n # display json data into messagebox\r\n messagebox.showinfo(\"showinfo\", status+', '+msg)\r\n \r\n self.onClear()\r\n \r\n def onKeluar(self, event=None):\r\n # memberikan perintah menutup aplikasi\r\n self.parent.destroy()\r\nif __name__ == '__main__':\r\n root2 = tk.Tk()\r\n aplikasi = FrmMobil(root2, \"Aplikasi Data Mobil\")\r\n root2.mainloop()\r\n","repo_name":"RizkyResiJulian/pemrograman_berorientasi_objek_lanjut","sub_path":"TugasBesar/FrmMobil.py","file_name":"FrmMobil.py","file_ext":"py","file_size_in_byte":6872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70463527700","text":"from django.shortcuts import redirect, render\nfrom base.models import Topic\nfrom .models import User\nfrom .forms import UserForm, MyUserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\n\n# Create your views here.\n\ndef userProfile(request, pk):\n user = User.objects.get(id=pk)\n rooms = user.room_set.all()\n comments = user.message_set.all()\n topics = Topic.objects.all()\n context = {\n 'user': user,\n 'rooms': rooms,\n 'comments': comments,\n 'topics': topics\n }\n return render(request, 'user/profile.html', context)\n\n\ndef registerUser(request):\n form = MyUserCreationForm\n if request.method == 'POST':\n form = MyUserCreationForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.username = user.username.lower()\n user.save()\n return redirect('login')\n else: \n messages.error(request, 'Error has occured')\n context = {'form': form}\n return render(request, 'user/login_register.html', context)\n\n\ndef loginUser(request):\n page = 'login'\n\n if request.user.is_authenticated:\n return redirect('/')\n \n if request.method == 'POST':\n email = request.POST.get('email')\n password = request.POST.get('password')\n\n # try:\n # User.objects.get(email=email)\n # except:\n # messages.error(request, \"User does not exist\")\n \n user = authenticate(request, email=email, password=password)\n\n if user is not None:\n login(request, user)\n return redirect('/')\n else:\n messages.error(request, 'Username or password is Incorrect')\n context = {'page': page}\n return render(request, 'user/login_register.html', context)\n\ndef logoutUser(request):\n if not request.user.is_authenticated:\n return redirect('/')\n logout(request)\n return redirect('home')\n\n\ndef editUser(request):\n user = request.user\n form = UserForm(instance=user)\n if request.method == 'POST':\n form = UserForm(request.POST, request.FILES, instance=user)\n if form.is_valid:\n form.save()\n return redirect('user-profile', pk=user.id)\n\n \n context = {'form': form}\n return render(request, 'user/edit-user.html', context)","repo_name":"umidjon03/SocialMy","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"28191280913","text":"import os\nfrom datetime import timedelta\n\nfrom celery import Celery\n\n# Set the default Django settings module for the 'celery' program.\nfrom celery.schedules import crontab\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_email_celery.settings')\nos.environ.setdefault('FORKED_BY_MULTIPROCESSING', '1')\n\napp = Celery('django_email_celery')\n\n# Using a string here means the worker doesn't have to serialize\n# the configuration object to child processes.\n# - namespace='CELERY' means all celery-related configuration keys\n# should have a `CELERY_` prefix.\napp.config_from_object('django.conf:settings', namespace='CELERY')\n\n# Load task modules from all registered Django apps.\napp.autodiscover_tasks()\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print(f'Request: {self.request!r}')\n\n\napp.conf.beat_schedule = {\n \"sending email\": {\n \"task\": \"send_email_task\", # <---- Name of task\n \"schedule\": crontab(\n minute='*'\n )\n }, \"Print hello\": {\n \"task\": \"hello_task\", # <---- Name of task\n \"schedule\": timedelta(seconds=30)\n }, \"Write a file\": {\n \"task\": \"write_task\", # <---- Name of task\n \"schedule\": timedelta(seconds=30)\n }\n}\n","repo_name":"GuillermoDuque/django_email_celery","sub_path":"django_email_celery/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"9217535387","text":"import logging\nimport asyncio\nimport urllib.parse\n\nimport aiohttp\n\nfrom ..linereader import FdLineReader\nfrom .auth import encode_auth_querystring_param\n\nlogger = logging.getLogger(__name__)\n\n\nasync def challenge(url, token, fd_in, fd_out, loop=None):\n with aiohttp.ClientSession(loop=loop) as session:\n auth_params = encode_auth_querystring_param(token)\n url_parts = urllib.parse.urlparse(url)\n query = urllib.parse.parse_qs(url_parts.query)\n query.update(auth_params)\n url_parts = list(url_parts)\n url_parts[4] = urllib.parse.urlencode(query)\n url = urllib.parse.urlunparse(url_parts)\n async with session.ws_connect(url, headers={'Origin': 'localhost'}) as ws:\n logger.debug('opening websocket')\n if logger.isEnabledFor(logging.DEBUG):\n for header in ('Access-Control-Allow-Origin', 'Access-Control-Allow-Credentials',\n 'Access-Control-Allow-Methods', 'Access-Control-Allow-Headers',\n 'Access-Control-Expose-Headers'):\n logger.debug('CORS header {} origin: {!r}'.format(header, ws._response.headers.get(header)))\n sender = send_from_fd_to_ws(fd_in, ws, loop=loop)\n receiver = send_from_ws_to_fd(ws, fd_out)\n done, pending = await asyncio.wait([sender, receiver], return_when=asyncio.FIRST_COMPLETED)\n for task in pending:\n task.cancel()\n logger.debug('closing websocket')\n # await ws.close()\n\n\nasync def send_from_fd_to_ws(fd, ws, loop=None):\n async for line in FdLineReader(fd, loop=loop):\n logger.debug('sending line from fd to ws %r', line)\n ws.send_str(line)\n await ws._writer.writer.drain()\n\n\nasync def send_from_ws_to_fd(ws, fd):\n async for msg in ws:\n if msg.tp == aiohttp.MsgType.text:\n logger.debug('sending data from ws to fd: %s', msg.data)\n fd.write(msg.data)\n fd.flush()\n elif msg.tp == aiohttp.MsgType.error:\n logger.error('ws connection closed with exception %s', ws.exception())\n return\n","repo_name":"elastic-coders/pushpull","sub_path":"pushpull/websocket/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"38193593826","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport os\nimport requests\nimport random\nimport pygame\nfrom threading import Thread\n\n\nclass RequestThread(Thread):\n def __init__(self, queue, url, payload):\n super(RequestThread, self).__init__()\n\n self.queue = queue\n self.url = url\n self.payload = payload\n self.daemon = True\n\n def run(self):\n res = requests.get(self.url, params=self.payload)\n self.queue.put(res.json())\n\n\nclass ImageFetchThread(Thread):\n def __init__(self, url, news, directory):\n super(ImageFetchThread, self).__init__()\n\n self.url = url\n self.news = news\n self.directory = directory\n self.daemon = True\n\n def run(self):\n if not self.news['imageName']:\n return\n\n image_path = os.path.join(self.directory, self.news['imageName'])\n if os.path.isfile(image_path):\n prefix = str(random.randint(100000, 1000000)) + \"-\"\n new_image_name = prefix + self.news['imageName']\n image_path = os.path.join(self.directory, new_image_name)\n self.news['imageName'] = new_image_name\n\n with open(image_path, 'wb') as f:\n f.write(requests.get(self.url).content)\n\n\nclass ImageRotateThread(Thread):\n def __init__(self, image, center, degree, queue):\n super(ImageRotateThread, self).__init__()\n\n self.image = image\n self.center = center\n self.degree = degree\n self.queue = queue\n self.daemon = True\n\n def run(self):\n rotate_image = pygame.transform.rotate(self.image, self.degree)\n rotate_rect = rotate_image.get_rect(center=self.center)\n self.queue.put((rotate_image, rotate_rect))\n","repo_name":"k47ma/rpi-interface","sub_path":"lib/threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"2498617387","text":"from datetime import timedelta\nfrom random import randint\nimport time\n\n#Horário da chegada da Van\nchegada = (timedelta(hours=20))\nprint (\"Chegada da Van ás\",chegada)\nprint (\"Das 20:00 ás 20:10:\\n\")\n\n#Número de pessoas na fila ás 20:00hs (aleatório max= 15)\ntamanhoDaFila = (randint(5,15))\nprint (\"Chegaram na fila:\",tamanhoDaFila, \"pessoas\")\n\n#Criar a fila de acordo com o número aleatório e adicionar 10 minutos ao tempo de espera dos primeiros moradores\nfila = []\ntempoDeEspera = []\nwhile tamanhoDaFila != 0:\n fila.append(1)\n tempoDeEspera.append(10)\n tamanhoDaFila -= 1\n\n#Limite de inicio da entrega da comida\nlimite = (timedelta(hours=20,minutes=10))\n\n#controle de minutos\ncontadorDeMinutos = 2\n#Das 20:00 ás 20:10 (a cada 2 minutos 1 pessoa chega na fila e limite máximo da fila 15 pessoas)\nwhile (len(fila)) <= 15 and chegada != limite:\n time.sleep(1.5)\n chegada = chegada + (timedelta(minutes=1))\n print (\"Ás\",chegada)\n print (\"O tamanho da fila é:\",len(fila),\"\\n\")\n if contadorDeMinutos % 2 == 0:\n fila.append(1)\n tempoDeEspera.append(contadorDeMinutos + 10)\n contadorDeMinutos += 1\n\n#Tempo em minutos é o resultado de X=resto da divisão dos últimos 2 dígitos do seu RM por 3 somado a 1 (RM88059)\ntempoDeServirSopa = 59 % 3 + 1\n\n#Sobra pra acrescentar ao tempo de espera aos próximo moradores que chegarem na fila\nsobraDeTempo = contadorDeMinutos\n\n#controle de minutos\ncontadorDeMinutos = 0\n\n#Começar a servir a sopa\nchegada = timedelta(hours=20,minutes=10)\nprint(\"Às\",chegada,\"Começa a servir a sopa\\n\")\n\n#Servindo as sopas\nwhile len(fila) > 0:\n time.sleep(1.5)\n chegada = chegada + (timedelta(minutes=1))\n contadorDeMinutos += 1\n \n #A cada 2 minutos 1 morador chega na fila\n if contadorDeMinutos %2 == 0 and len(fila) <= 15:\n fila.append(1)\n tempoDeEspera.append(sobraDeTempo + contadorDeMinutos)\n print(\"Ás\",chegada)\n print(\"Chegou +1 morador de rua, total na fila:\",len(fila),\"\\n\")\n\n #A cada 3 minutos 3 pratos são servidos (fórmula RM) \n if contadorDeMinutos % tempoDeServirSopa == 0:\n if len(fila) > 3:\n fila.pop()\n fila.pop()\n fila.pop()\n print(\"Ás\",chegada)\n print(\"3 pratos servidos, na fila ainda:\",len(fila),\"\\n\")\n\n #Se o serviço durar mais de 30 minutos um ajudante voluntário também servirá 3 sopas a cada 3 minutos\n if contadorDeMinutos >= 30:\n print(\"Ás\",chegada)\n print(\"Ajudante voluntário precisou ajudar\")\n if len(fila) > 3:\n fila.pop()\n fila.pop()\n fila.pop() \n print(\"3 pratos servidos pelo ajudante, na fila ainda:\",len(fila),\"\\n\")\n\n #Quando houver 3 ou menos moradores, haverá a última rodada de sopa para servir\n if len(fila) <= 3:\n fila = [] \n print(\"Ás\",chegada)\n print(\"Todos foram servidos, não há mais ninguém na fila\")\n\n print(\"O tempo de espera respectivamente para cada morador foi:\",tempoDeEspera)","repo_name":"rafaelrara/Agilidade-para-atendimento-a-moradores-de-rua","sub_path":"cap11.py","file_name":"cap11.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"20682199623","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n Environment implementation (abstract class) that represents the experiment using Video\n This class implements the basic functions to control the APs,\n but it does not implement the QoE\n\n\n\"\"\"\nimport time\nimport logging\nimport pickle\nfrom abc import abstractmethod\nimport socket\n\nimport numpy as np\n\nimport http.client\nimport urllib.parse\n\nfrom Environment.interface_env import Interface_Env\nfrom Environment.common import kill_aps\nfrom Environment.common import change_channel_hostapd\nfrom Environment.common import start_hostapd\n\n\ndef decode_txpower(t):\n \"\"\" convert the data in info['txpower'] which is, for example, '15.00 dBm' into 15.0\n\n @return: the value of the tx power\n @rtype: float\n \"\"\"\n r = float(t.split()[0].strip())\n return r\n\n\nclass Generic_AP(Interface_Env):\n\n NUM_CHANNELS = 11\n NUM_TXPOWER_LEVELS = 15\n DEFAULT_C = 0.4 # used in the reward hossfeld\n\n def __init__(self,\n aps, # List[AP_Config]\n model_filename, # filename that contains the trained model\n mac_mapping={}, # {'hostname':'mac'}\n log_level=logging.DEBUG,\n log_name='AP Controller',\n wait_for_states=10,\n execute_action=False,\n ):\n \"\"\"\n initialize the environment\n @param aps: list of aps controlled in the experiment\n\n @param model_filename: name of the file that contains the trained model\n @type model_filename: str\n\n @param mac_mapping: a dictionary that maps the hostname to its mac address\n\n @param execute_action: if True send the selected actions to the devices\n \"\"\"\n super().__init__(LOG_NAME=log_name, log_level=log_level)\n\n self.aps = aps\n # load model from json\n self.mos_model = self.get_model(model_filename=model_filename)\n\n # num_states is inf because there are continuous dimensions\n self.num_states = None\n self.dim_states = 20 # (None, 20)\n self.num_actions = self.NUM_CHANNELS * self.NUM_TXPOWER_LEVELS\n\n self.station_data = dict()\n # used to inform command_ap the mapping between the station name and its MACs\n self.mac_mapping = mac_mapping\n self.wait_for_states = wait_for_states\n\n self.execute_action = execute_action\n self.last_channnel = [1 for _ in range(len(aps))]\n\n def command_ap(self, server, port, iface, cmd, extra_params=None):\n \"\"\"\n @return: returns true if receive the response,\n also returns the data or an empty dict (if error)\n @rtype bool, dict\n \"\"\"\n conn = http.client.HTTPConnection(server, port)\n params = {'iface': iface}\n if extra_params is not None:\n params.update(extra_params)\n q = urllib.parse.urlencode(params)\n url = \"{}?{}\".format(cmd, q)\n try:\n conn.request(method='GET', url=url)\n except (ConnectionRefusedError, http.client.RemoteDisconnected, socket.gaierror):\n return False, {} # Got an error\n resp = conn.getresponse()\n self.log.info(\"cmd: {} @ {} status:{}\".format(cmd, server, resp.status))\n try:\n data = pickle.loads(resp.read())\n except (EOFError, pickle.UnpicklingError):\n data = {}\n conn.close()\n return resp.status == 200, data\n\n def restart_aps(self, run_id):\n \"\"\" this is done because our ap sometimes crashes. the hostapd continues to run, but does not provide a channel\n \"\"\"\n aps_to_change = []\n chans = []\n for ap, channel in zip(self.aps, self.last_channnel):\n _, data = self.command_ap(ap.name, ap.port, ap.iface, '/get_info')\n ch = data.get('channel', -1)\n if ch != -1:\n continue\n aps_to_change.append(ap)\n chans.append(ch)\n if len(aps_to_change) == 0:\n # nothing to do\n return\n\n # alter the hostapd.conf file\n change_channel_hostapd(aps_to_change, chans)\n # restart the hostapd\n kill_aps(aps_to_change)\n # start hostapd\n start_hostapd(aps_to_change, [run_id for i in len(aps_to_change)])\n\n def valid_actions(self, state=None):\n \"\"\" return a list with all valid actions for a specific state,\n if state == None, return all possible states\n @param state: current state\n @return: list(int)\n \"\"\"\n # TODO: check for valid actions when states is not None\n valid = list(range(self.num_actions)) # now we always return all actions\n return valid\n\n def one_hot(self, channel):\n \"\"\" code the channel using one-hot encoding\n @param channel:\n @type channel: int\n @return: the channel hot encoded\n @rtype: list(int)\n \"\"\"\n assert channel > 0 and channel <= self.NUM_CHANNELS, \"Wrong channel = {}\".format(channel)\n cs = [0 for i in range(self.NUM_CHANNELS)]\n cs[channel - 1] = 1\n self.log.debug(\"one-hot {} = {}\".format(channel, cs))\n return cs\n\n def get_states(self):\n \"\"\" get the states, one for each AP\n the state contains:\n - ( #stations, ch1, ch2, ch3, ch4, ch5, ch6, ch7, ch8, ch9, ch10, ch11,\n tx_power, #num_neighbors, ch_noise_max, perc_phy_busy_time,\n sta_signal_avg,\n rec_bitrate_min, tx_byte_avg, rx_byte_avg )\n @return: return the value that represent the state of all APs. Returns None if an error occurs.\n \"\"\"\n known_macs = set([ap.mac for ap in self.aps])\n try:\n states = []\n for ap in self.aps:\n self.log.info(\"Data from {} @ {}\".format(ap.name, ap.iface))\n _, info = self.command_ap(ap.name, ap.port, ap.iface, '/get_info')\n self.log.info(\"Info: {}\".format(info))\n\n ch = int(info['channel'])\n self.log.info(\"Channel: {}\".format(ch))\n\n _, stations = self.command_ap(ap.name, ap.port, ap.iface, '/get_stations')\n self.log.info(\"Stations: {}\".format(stations))\n num_stations = len(stations) # number of stations now\n self.log.info(\"n: {}\".format(num_stations))\n\n # check #num_neighbors\n _, scan = self.command_ap(ap.name, ap.port, ap.iface, '/get_scan_mac')\n self.log.info(\"Scan: {}\".format(scan))\n macs = set([k for k in scan]) # the dictionary key is the mac of the detected AP\n num_neighbors = len(macs.intersection(known_macs))\n self.log.info(\"num_neighbors: {}\".format(num_neighbors))\n\n _, survey = self.command_ap(ap.name, ap.port, ap.iface, '/get_survey')\n self.log.info(\"survey: {}\".format(survey))\n chann_in_use = [v for v in survey if survey[v].get('in use', False)][0] # we need only the channel in use\n self.log.info(\"survey (in use): {}\".format(chann_in_use))\n survey_in_use = survey[chann_in_use]\n\n ch_noise_max = survey_in_use['noise']\n perc_phy_busy_time = (survey_in_use['channel busy time'] + survey_in_use['channel receive time'] + survey_in_use['channel transmit time']) \\\n / survey_in_use['channel active time']\n\n # obtain the state: one state per AP, so consolidate\n signal_avg = np.average([stations[s]['signal avg'] for s in stations])\n rx_bitrate = np.average([stations[s]['rx bitrate'] for s in stations])\n # detrend tx_bytes and rx_bytes\n tx_bytes = 0\n rx_bytes = 0\n for k in stations:\n if k not in self.station_data:\n self.station_data[k] = dict()\n self.station_data[k]['tx bytes'] = stations[k]['tx bytes']\n self.station_data[k]['rx bytes'] = stations[k]['rx bytes']\n\n tx_bytes = stations[k]['tx bytes'] - self.station_data[k]['tx bytes']\n rx_bytes = stations[k]['rx bytes'] - self.station_data[k]['rx bytes']\n\n # save to use in the next round\n self.station_data[k]['tx bytes'] = stations[k]['tx bytes']\n self.station_data[k]['rx bytes'] = stations[k]['rx bytes']\n\n # save the AP's state\n state = [num_stations] + \\\n self.one_hot(ch) + \\\n [decode_txpower(info['txpower']),\n num_neighbors, # num_neighbors\n ch_noise_max,\n perc_phy_busy_time,\n signal_avg,\n rx_bitrate,\n tx_bytes,\n rx_bytes,\n ]\n if np.any(np.isnan(state)):\n # some reading got nan == error\n states = None\n break\n states.append(state) # get the final state for the AP\n except (KeyError, ValueError, IndexError):\n # IndexError: can occur in chann_in_use\n # KeyError: can occur in ch, survey_in_use, ch_noise_max, perc_phy_busy_time\n states = None # trigger an Error\n\n self.log.info(\"States: {}\".format(states))\n return states\n\n def encode_action(self, txpower, channel):\n \"\"\"\n @param action: an integer that represents the action\n @return: decoded values of txpower (1 to 15 dBm) and channel (1 to 11)\n \"\"\"\n assert channel > 0 and txpower > 0\n\n action = (channel - 1) * self.NUM_TXPOWER_LEVELS + (txpower - 1)\n return action\n\n def decode_action(self, action):\n \"\"\"\n @param action: an integer that represents the action\n @return: decoded values of txpower (1 to 15 dBm) and channel (1 to 11)\n \"\"\"\n channel = action // self.NUM_TXPOWER_LEVELS + 1\n txpower = action % self.NUM_TXPOWER_LEVELS + 1\n return txpower, channel\n\n def setup_device(self, ap, txpower, channel):\n \"\"\" change the tx power and the ap's channel\n\n @param ap: the ap\n @param txpower: tx power (from 1 to 15 dBm)\n @param channel: the 2.4GHz channel number (1 to 11)\n \"\"\"\n assert txpower in range(1, 16)\n assert channel in range(1, 12)\n\n _, data = self.command_ap(ap.name, ap.port, ap.iface, '/get_info')\n ch = data.get('channel', -1)\n if ch not in [-1, channel]:\n # send command to change channel, if the channel is different\n self.log.info(\"last_channnel {} ==> new channel {}\".format(ch, channel))\n self.command_ap(ap.name, ap.port, ap.iface,\n '/set_channel', extra_params={'new_channel': channel})\n else:\n return False\n\n self.command_ap(ap.name, ap.port, ap.iface,\n '/set_power', extra_params={'new_power': txpower})\n\n self.log.info(\"setup_device ** ap {} txpower {} channel {}\".format(ap.name, txpower, channel))\n return True\n\n def make_step(self, actions, retries=5):\n \"\"\"send commands to aps\n @param actions: is a list of number (int) that represents the action to be taken for each AP\n @type actions: list(int)\n @param retries: number of times this function tries to get the next_state from the devices, if unsuccessful then return None in next_state\n @int retries: int\n\n @return: next_state: a (list of) number (int) that represents the next state\n @return: reward: a real number (reward feedback). Reward contains np.nan if an error occurs\n @rtype: list(int), float\n \"\"\"\n assert retries > 0, \"At least one try\"\n self.log.info(\"make_step ** actions {} - type {}\".format(actions, type(actions)))\n\n if self.execute_action:\n # make the move defined in action\n i = 0\n for ap, action in zip(self.aps, actions):\n # decode the number into the actual set of commands\n # send the commands to the ap\n txpower, channel = self.decode_action(action)\n self.setup_device(ap, txpower, channel)\n self.last_channnel[i] = channel\n i += 1\n else:\n # use this to just grab the data from a execution without the interference of the algorithm\n self.log.info(\"******************\")\n self.log.info(\"******************\")\n self.log.info(\"** NO STEP DONE **\")\n self.log.info(\"******************\")\n self.log.info(\"******************\")\n\n # check the new state\n i = 0\n while i < retries:\n new_states = self.get_states()\n if new_states is None:\n i += 1\n time.sleep(self.wait_for_states)\n else:\n i = retries # leave\n\n if new_states is None:\n # error\n return None, [np.nan] # send back error values\n\n # get the reward\n reward = self.reward()\n return new_states, reward\n\n #\n #\n # the INTERFACE\n #\n @abstractmethod\n def get_model(self, model_filename):\n \"\"\" called in the init() code to read the model from a file\n @param model_filename: name of the file that contains the trained model\n @type model_filename: str\n @return: the model\n \"\"\"\n return None\n","repo_name":"h3dema/deepwifi","sub_path":"Environment/generic_ap.py","file_name":"generic_ap.py","file_ext":"py","file_size_in_byte":13784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"26542899850","text":"from papyon.service.AddressBook.scenario.base import BaseScenario\nfrom papyon.service.AddressBook.scenario.base import Scenario\nfrom messenger_contact_add import MessengerContactAddScenario\nfrom update_memberships import UpdateMembershipsScenario\n\nfrom papyon.service.AddressBook.constants import *\nfrom papyon.profile import NetworkID, Membership\n\n__all__ = ['AcceptInviteScenario']\n\nclass AcceptInviteScenario(BaseScenario):\n def __init__(self, ab, sharing, callback, errback,\n account='',\n memberships=Membership.NONE,\n network=NetworkID.MSN,\n state='Accepted'):\n \"\"\"Accepts an invitation.\n\n @param ab: the address book service\n @param sharing: the membership service\n @param callback: tuple(callable, *args)\n @param errback: tuple(callable, *args)\n \"\"\"\n BaseScenario.__init__(self, Scenario.CONTACT_MSGR_API, callback, errback)\n self.__ab = ab\n self.__sharing = sharing\n\n self.add_to_contact_list = True\n\n self.account = account\n self.memberships = memberships\n self.network = network\n self.state = state\n\n def execute(self):\n if self.add_to_contact_list and not (self.memberships & Membership.FORWARD):\n self.__add_messenger_contact()\n else:\n new_membership = self.memberships | Membership.ALLOW\n self.__update_memberships(None, new_membership)\n\n def __add_messenger_contact(self):\n am = MessengerContactAddScenario(self.__ab,\n (self.__add_contact_callback,),\n self._errback,\n self.account,\n self.network)\n am()\n\n def __update_memberships(self, contact, new_membership):\n um = UpdateMembershipsScenario(self.__sharing,\n (self.__update_memberships_callback, contact),\n self._errback,\n self._scenario,\n self.account,\n self.network,\n self.state,\n self.memberships,\n new_membership)\n um()\n\n def __add_contact_callback(self, contact, memberships):\n memberships &= ~Membership.PENDING\n memberships |= Membership.REVERSE\n self.callback(contact, memberships)\n\n def __update_memberships_callback(self, memberships, contact):\n memberships &= ~Membership.PENDING\n memberships |= Membership.REVERSE\n self.callback(contact, memberships)\n","repo_name":"Kjir/papyon","sub_path":"papyon/service/AddressBook/scenario/contacts/accept_invite.py","file_name":"accept_invite.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"12"} +{"seq_id":"36192107479","text":"m,n = map(int, input().split())\na= list(map(int, input().split()))\nk=[]\nnum=[]\nkol = len(k)\nrez= 0\nmax_ =[]\ndup=[]\nb=[] \nc=[] \nwhile len(a)!=0 :\n print('next')\n print('a=',a)\n print('max_=',max_)\n print('dup=',dup)\n print('num=',num)\n print('rez=',rez )\n if len(dup)==1:\n for i in max_:\n if i != dup[0]:\n b.append(i)\n max_=b\n b=[] \n max_.append(dup[0]+1)\n if len(max_)!=1:\n dup = [x for i, x in enumerate(max_) if i != max_.index(x)]\n if len(dup)==1:\n for j in max_:\n if j != dup[0]:\n c.append(i)\n max_=c\n c=[]\n max_.append(dup[0]+1) \n if len(max_)==1:\n num=max_[0] \n max_.append(max(a))\n k.append(max(a))\n a.pop(a.index(max(a)))\n print(a)\n num=max_[0]\n dup = [x for i, x in enumerate(max_) if i != max_.index(x)]\n rez= num\n print('max_=',max_)\n for i in max_:\n if i== 4:\n break\nprint(rez)\n\n \n \n ","repo_name":"Sl1me621/NTO","sub_path":"stage1/1.8 nto3.py","file_name":"1.8 nto3.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"36098944711","text":"from fileinput import filename\nimport json\nimport os\nfrom transformers import BertTokenizer\nimport torch\nimport torch.utils.data as data\nfrom configure import FLAGS\nE11 = FLAGS.e11\nE12 = FLAGS.e12\nE21 = FLAGS.e21\nE22 = FLAGS.e22\n\ntokenizer = None\n\n\nclass BertEMDataset(data.Dataset):\n\n def __init__(self, file_name, bert_tokenizer, na_rate=0, max_length=512):\n super(BertEMDataset, self).__init__()\n self.max_length = max_length\n self.bertTokenizer = bert_tokenizer\n global tokenizer\n tokenizer = self.bertTokenizer\n self.N = 0\n self.K = 0\n self.Q = 0\n\n if not os.path.exists(file_name):\n raise Exception(\"[ERROR] Data file doesn't exist\")\n if not os.path.exists(FLAGS.rel_name):\n raise Exception(\"[ERROR] Relation name file doesn't exist\")\n # print(\"#############################{}#########################\".format(file_name))\n self.json_data = json.load(open(file_name, \"r\"))\n # print(self.json_data)\n # self.data = {}\n # print(\"Finish loading file\")\n self.task_num = len(self.json_data)\n self.id_to_name = json.load(open(FLAGS.rel_name))\n self.process_rel_name(self.id_to_name)\n\n self.__init_process_data__(self.json_data)\n self.na_prompt = FLAGS.na_prompt\n # print(\"Finish init process data\")\n\n def process_rel_name(self, name_dict):\n for key, val in name_dict.items():\n val.replace(\"_\", \" \")\n # val=tokenizer.tokenize(val)\n name_dict[key] = val\n\n def __init_process_data__(self, raw_data):\n\n def insert_and_tokenize(tokenizer, tokens, pos1, pos2, marker1, marker2):\n tokens.insert(pos2[-1]+1, marker2[-1])\n tokens.insert(pos2[0], marker2[0])\n tokens.insert(pos1[-1]+1, marker1[-1])\n tokens.insert(pos1[0], marker1[0])\n # tokens = tokens.copy()\n\n # tokens = tokenizer.tokenize(\" \".join(tokens))\n\n return tokens\n\n def __process_ins(ins):\n pos1 = ins['h'][2][0]\n pos2 = ins['t'][2][0]\n words = ins['tokens']\n\n if pos1[0] > pos2[0]:\n tokens = insert_and_tokenize(self.bertTokenizer, words, pos2, pos1, [\n E21, E22], [E11, E12])\n else:\n tokens = insert_and_tokenize(self.bertTokenizer, words, pos1, pos2, [\n E11, E12], [E21, E22])\n\n # pos1 = [tokens.index(FLAGS.e11), tokens.index(FLAGS.e12)]\n # pos2 = [tokens.index(FLAGS.e21), tokens.index(FLAGS.e22)]\n\n # if len(tokens) >= self.max_length:\n # max_right = max(pos2[-1], pos1[-1])\n # min_left = min(pos1[0], pos2[0])\n # gap_length = max_right-min_left\n # if gap_length+1 > self.max_length:\n # tokens = [FLAGS.e11, FLAGS.e12,\n # FLAGS.e21, FLAGS.e22]\n # elif max_right+1 < self.max_length:\n # tokens = tokens[:self.max_length-1]\n # else:\n # tokens = tokens[min_left:max_right]\\\n\n ins[\"pos1\"] = pos1\n ins['pos2'] = pos2\n ins[\"raw_tokens\"] = ins['tokens']\n ins['tokens'] = tokens\n\n if len(tokens) > self.max_length:\n raise Exception(\"sequence too long\")\n\n for task in raw_data:\n for meta_rel in task['meta_train']:\n for ins in meta_rel:\n __process_ins(ins)\n\n __process_ins(task['meta_test'])\n\n # print(\"init process data finish\")\n\n def __additem__(self, d, instance):\n word = instance['tokens']\n pos1 = instance['pos1']\n pos2 = instance['pos2']\n prompt = instance['prompt']\n # mask = instance['mask']\n\n # pos1 = torch.tensor(pos1).long()\n # pos2 = torch.tensor(pos2).long()\n # word = torch.tensor(word).long()\n # mask = torch.tensor(mask).long()\n d['word'].append(word)\n d['pos1'].append(pos1)\n d['pos2'].append(pos2)\n d['prompt'].append(prompt)\n # d['mask'].append(mask)\n\n def __getitem__(self, index):\n support_set = {'word': [], 'pos1': [],\n 'pos2': [], 'mask': [], 'prompt': []}\n query_set = {'word': [], 'pos1': [],\n 'pos2': [], 'mask': [], 'prompt': []}\n\n task_data = self.json_data[index]\n target_classes = task_data[\"relation\"]\n\n prompt_list = []\n for rel in target_classes:\n prompt_list.append(FLAGS.choice)\n try:\n prompt_list.append(self.id_to_name[rel])\n except KeyError:\n prompt_list.append(rel)\n if FLAGS.na_rate > 0:\n prompt_list.append(FLAGS.choice)\n prompt_list.append(self.na_prompt)\n # prompt_str = \" \".join(prompt_list)\n # prompt = tokenizer.tokenize(prompt_str)\n prompt = prompt_list\n\n for meta_rel in task_data['meta_train']:\n for ins in meta_rel:\n ins['prompt'] = prompt\n self.__additem__(support_set, ins)\n task_data['meta_test']['prompt'] = prompt\n self.__additem__(query_set, task_data['meta_test'])\n return support_set, query_set\n\n def __len__(self):\n return self.task_num\n\n\ndef idx_and_mask(batch_sets):\n global tokenizer\n # batch_sets = batch_sets.copy()\n # max_length = compute_max_length(batch_sets)\n sets = []\n batch_choice_idx = []\n for raw_set_item in batch_sets:\n set_item = {'word': [], 'pos1': [],\n 'pos2': [], 'mask': [], 'seg_ids': []}\n words_list = raw_set_item['word']\n prompt_list = raw_set_item['prompt']\n tokens_dict = tokenizer(\n prompt_list, words_list, add_special_tokens=True, is_split_into_words=True, return_tensors='pt', truncation=True, max_length=FLAGS.max_sentence_length, padding=True, return_token_type_ids=True)\n tokens_ids = tokens_dict['input_ids']\n mask = tokens_dict['attention_mask']\n\n set_item['mask'] = mask\n set_item['word'] = tokens_ids\n set_item['seg_ids'] = tokens_dict[\"token_type_ids\"]\n cur_choice_idx = []\n for idx in tokens_ids:\n tokens = tokenizer.convert_ids_to_tokens(idx)\n if len(cur_choice_idx) == 0:\n for j, token in enumerate(tokens):\n if token == FLAGS.choice:\n cur_choice_idx.append(j)\n\n pos1 = [tokens.index(FLAGS.e11), tokens.index(FLAGS.e12)]\n pos2 = [tokens.index(FLAGS.e21), tokens.index(FLAGS.e22)]\n if len(tokens) >= FLAGS.max_full_length:\n max_right = max(pos2[-1], pos1[-1])\n min_left = min(pos1[0], pos2[0])\n gap_length = max_right-min_left+cur_choice_idx[-1]\n if gap_length+1 > FLAGS.max_full_length:\n tokens = tokens[:cur_choice_idx[-1]]+[FLAGS.e11, FLAGS.e12,\n FLAGS.e21, FLAGS.e22]\n elif max_right+1 < FLAGS.max_full_length:\n tokens = tokens[:FLAGS.max_full_length-1]\n else:\n tokens = tokens[:cur_choice_idx[-1]] + \\\n tokens[min_left:max_right]\n pos1 = [tokens.index(FLAGS.e11), tokens.index(FLAGS.e12)]\n pos2 = [tokens.index(FLAGS.e21), tokens.index(FLAGS.e22)]\n\n set_item['pos1'].append(pos1)\n set_item['pos2'].append(pos2)\n set_item['pos1'] = torch.tensor(set_item['pos1'])\n set_item['pos2'] = torch.tensor(set_item['pos2'])\n sets.append(set_item)\n batch_choice_idx.append(cur_choice_idx)\n\n batch_choice_idx = torch.tensor(batch_choice_idx)\n return sets, batch_choice_idx\n\n\ndef collate_fn(data):\n batch_support = {'word': [], 'pos1': [],\n 'pos2': [], 'mask': [], 'seg_ids': []}\n batch_query = {'word': [], 'pos1': [],\n 'pos2': [], 'mask': [], 'seg_ids': []}\n raw_support_sets, raw_query_sets = zip(*data)\n\n # compute max length\n support_sets, choice_idx = idx_and_mask(raw_support_sets)\n query_sets, _ = idx_and_mask(raw_query_sets)\n\n for i in range(len(support_sets)):\n for k in support_sets[i]:\n batch_support[k] += support_sets[i][k]\n for k in query_sets[i]:\n batch_query[k] += query_sets[i][k]\n for k in batch_support:\n batch_support[k] = torch.stack(\n batch_support[k], 0)\n for k in batch_query:\n batch_query[k] = torch.stack(\n batch_query[k], 0)\n\n return batch_support, batch_query, choice_idx\n\n\ndef get_loader(file_path, tokenizer, max_length=FLAGS.max_sentence_length,\n num_workers=2, na_rate=0):\n dataset = BertEMDataset(\n file_path, tokenizer, max_length=max_length, na_rate=na_rate)\n data_loader = data.DataLoader(dataset=dataset,\n shuffle=False,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return iter(data_loader)\n\n\nif __name__ == \"__main__\":\n tokenizer = BertTokenizer.from_pretrained(\n \"bert-large-uncased\", do_basic_tokenize=False)\n tokenizer.add_special_tokens(\n {\"additional_special_tokens\": [FLAGS.e11, FLAGS.e12, FLAGS.e21, FLAGS.e22]})\n # dataset = BertEMDataset(\"data/sample.json\", tokenizer)\n dataloader = get_loader(\"data/sample.json\", tokenizer)\n # for task in dataloader:\n # print(1)\n","repo_name":"fc-liu/MCMN","sub_path":"dataloader/eval_dataloader_multi_choice.py","file_name":"eval_dataloader_multi_choice.py","file_ext":"py","file_size_in_byte":9730,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"12"} +{"seq_id":"4578267148","text":"#!/usr/bin/env python3\nfrom flask import Flask, render_template, request\nfrom bs4 import BeautifulSoup as soup\nimport requests\nimport json\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n try:\n link_results = djangocon()\n return render_template('index.html', results=link_results)\n except Exception as e:\n return render_template('index.html',results=f'

Exception: {e}

')\n\nsites = []\ndef djangocon():\n req = requests.get('https://2021.djangocon.us')\n bs = soup(req.text, 'lxml')\n desc = bs.header\n \n title = \"Djangocon\"\n date = desc.strong.text\n link = desc.a['href']\n target = desc.a.text\n\n sites.append({ 'title': title, 'target': target, 'link': link, 'date': date })\n\n return sites\n\n\ndef atlantacode():\n req = requests.get('https://www.atlantacodecamp.com/2021')\n bs = soup(req.text, 'lxml')\n container = bs.find_all('div','description')\n desc = container[0].find_all('a')\n\n title = 'Atlanta CodeCamp 2021'\n for item in desc:\n href = item['href']\n if href.startswith('http'):\n pass\n else:\n href = 'https://www.atlantacodecamp.com' + href\n target = item.text\n sites.append({ 'title': title, 'target': target, 'link': href, 'date': 'tbd' })\n\n return \n\ndef grab_links():\n get_res = []\n get_res.append(atlantacode())\n get_res.append(djangocon())\n\n return sites\n\nif __name__ == '__main__':\n app.run()","repo_name":"ronpichardo/conference-events","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25671146173","text":"# manipulate path to work dir\nimport sys, os\nmyPath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, myPath + '/../')\nimport yaml\n\nfrom mine232 import ExclusionRequest\nfrom mine232 import ERTag\n\ndef test_notfound():\n \"\"\"ID is a numerical\"\"\"\n my_er = ExclusionRequest(9999999)\n assert my_er.id == 9999999\n assert my_er.error\n\ndef test_tag_value():\n \"\"\"ID is a numerical\"\"\"\n _request_id = 25635\n my_er = ExclusionRequest(_request_id)\n assert my_er.id == _request_id\n\n # load expected result\n with open(r'tests\\data' + str(_request_id)+'.yaml', encoding='utf-8') as file: \n result = yaml.load(file, Loader=yaml.FullLoader)\n\n for it in my_er.tags:\n # extract tag data and verify it matches expected result\n my_tag = ERTag(it) \n assert str(my_tag.value) == str(result[my_tag.title])","repo_name":"Raphael9999/scraping-section-232","sub_path":"tests/test_mn_tag.py","file_name":"test_mn_tag.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"71391070421","text":"import os\nimport sys\nimport json\nimport time\nimport shutil\nimport getopt\nimport traceback\nfrom multiprocessing import Pool\nfrom multiprocessing.pool import ThreadPool\n\nimport yaml\nimport unicon\nfrom ttp import ttp\nfrom pyats.topology import loader\nimport pyats.utils.yaml.exceptions\nfrom genie.libs.parser.utils.common import ParserNotFound\nfrom genie.metaparser.util.exceptions import SchemaEmptyParserError\n\nfrom ..utils import mqttutils\nfrom ..utils.logger import log\n\n# Set default paths\ntestbed_file = \"/onboard/testbed.yml\"\nbroker_file = \"/onboard/thingsboard.yml\"\n\nDRY_RUN = False # Set to true for data display\nCDP_SAMPLED_ONCE = False # Set to true once we take a first sample of CDP neighbors\nON_PREM_OUTPUT_DIR = os.path.join(os.path.dirname(__file__), \"output\")\n\n\ndef connect_collect(device, commands):\n \"\"\"\n Connects to switch, collects CLI data and saves it on the local disk.\n Returns: data.\n \"\"\"\n\n output_data = {}\n interfaces = []\n d = testbed.devices[device]\n\n try:\n log.info(\"Device testbed: {}\".format(d))\n d.connect(learn_hostname=True, log_stdout=False, init_config_commands=[])\n output_data[\"date\"] = int(time.time_ns() / 1000000)\n output_data[\"device\"] = str(device)\n for idx, command in enumerate(commands):\n try:\n if idx == 2: # show power inline %s detail\n # traverse interfaces and run command for each\n for i in interfaces:\n composite_command = command % (i)\n try:\n out = d.parse(composite_command)\n output_data[composite_command] = out\n except SchemaEmptyParserError as e:\n log.error(\n \"{} Failed to parse output of composite command {}: {}\".format(\n str(d), composite_command, e\n )\n )\n output_data[composite_command] = {}\n\n # Save locally: command for power inline detail\n\n # Create device directory\n device_dir = os.path.join(ON_PREM_OUTPUT_DIR, device)\n os.makedirs(device_dir, exist_ok=True)\n\n interface_dir = os.path.join(\n device_dir, \"_\".join(composite_command.split(\" \"))\n )\n\n # Remove old content\n try:\n shutil.rmtree(interface_dir)\n except FileNotFoundError:\n pass\n try:\n os.makedirs(interface_dir)\n except FileExistsError:\n pass\n\n # Save file in directory\n with open(\n os.path.join(\n interface_dir,\n str(output_data[\"date\"]),\n ),\n \"w\",\n encoding=\"utf-8\",\n ) as output_file:\n output_file.write(\n json.dumps(output_data[composite_command])\n )\n\n else:\n if idx != 4 or not CDP_SAMPLED_ONCE:\n try:\n output_data[command] = d.parse(command)\n except SchemaEmptyParserError as e:\n log.error(\n \"{} Failed to parse output of command {}: {}\".format(\n str(d), command, e\n )\n )\n output_data[command] = {}\n if idx == 1 and output_data[command]: # show power inline\n # retain the interface names\n interfaces = output_data[command][\"interface\"].keys()\n\n except ParserNotFound:\n if command == \"show energywise\":\n data = d.execute(command)\n ttp_template = (\n \"\"\"Total: {{ total_usage }} (W), Count: {{ count }}\"\"\"\n )\n parser = ttp(data=data, template=ttp_template)\n parser.parse()\n out = float(parser.result()[0][0][\"total_usage\"])\n output_data[command] = out\n else:\n log.warning('No parser found for command : \"{}\"'.format(command))\n\n finally:\n if idx != 2:\n # Save locally only once: command for CDP neighbors\n if not CDP_SAMPLED_ONCE and idx == 4:\n # Create device directory\n device_dir = os.path.join(\n ON_PREM_OUTPUT_DIR, device, \"show_cdp_neighbors\"\n )\n os.makedirs(device_dir, exist_ok=True)\n\n # Save file in directory\n with open(\n os.path.join(device_dir, str(output_data[\"date\"])),\n \"w\",\n encoding=\"utf-8\",\n ) as output_file:\n output_file.write(json.dumps(output_data[command]))\n\n d.disconnect()\n except unicon.core.errors.ConnectionError:\n log.warning(\"Cannot connect to device {}\".format(device))\n return output_data\n\n\ndef collect(device):\n \"\"\"\n Collects CLI data from switch based on a list of commands.\n The data is saved locally on the disk.\n Returns: data.\n \"\"\"\n\n json_body = {}\n commands = [\n \"show env all\",\n \"show power inline\", # Must be at index 1\n \"show power inline %s detail\", # Must be at index 2\n \"show version\",\n \"show cdp neighbors\", # Must be at index 4\n # \"show energywise\",\n # \"show env temperature status\"\n ]\n payload = connect_collect(device, commands)\n\n try:\n # show env all\n if \"show env all\" in payload:\n switchstack = payload[\"show env all\"][\"switch\"]\n for switch in switchstack:\n log.info(\"Device {} - switch {}.\".format(payload[\"device\"], switch))\n device = \"{}_{}\".format(payload[\"device\"], switch)\n values = {}\n if \"fan\" in switchstack[switch]:\n for fan in switchstack[switch][\"fan\"]:\n values[\"fan_{}_state\".format(fan)] = switchstack[switch][\"fan\"][\n fan\n ][\"state\"]\n else:\n log.warning(\"No FAN information for {}\".format(device))\n\n if \"hotspot_temperature\" in switchstack[switch]:\n values[\"hotspot_temperature\"] = float(\n switchstack[switch][\"hotspot_temperature\"][\"value\"]\n )\n else:\n log.warning(\n \"No hotspot_temperature information for {}\".format(device)\n )\n\n if \"inlet_temperature\" in switchstack[switch]:\n\n values[\"inlet_temperature\"] = float(\n switchstack[switch][\"inlet_temperature\"][\"value\"]\n )\n else:\n log.warning(\n \"No inlet_temperature information for {}\".format(device)\n )\n\n if \"outlet_temperature\" in switchstack[switch]:\n values[\"outlet_temperature\"] = float(\n switchstack[switch][\"outlet_temperature\"][\"value\"]\n )\n else:\n log.warning(\n \"No outlet_temperature information for {}\".format(device)\n )\n\n # show power inline\n if payload[\"show power inline\"]:\n pw_inline_watts = payload[\"show power inline\"][\"watts\"]\n try:\n values[\"watts_available\"] = int(\n pw_inline_watts[str(switch)][\"available\"]\n )\n values[\"watts_remaining\"] = int(\n pw_inline_watts[str(switch)][\"remaining\"]\n )\n values[\"used\"] = int(pw_inline_watts[str(switch)][\"used\"])\n except KeyError:\n values[\"watts_available\"] = int(\n pw_inline_watts[switch][\"available\"]\n )\n values[\"watts_remaining\"] = int(\n pw_inline_watts[switch][\"remaining\"]\n )\n values[\"used\"] = int(pw_inline_watts[switch][\"used\"])\n\n pw_inline_interfaces = payload[\"show power inline\"][\"interface\"]\n total_power = 0\n for intf in pw_inline_interfaces:\n # TODO(): more robust testing\n if \"{}/0/\".format(switch) in intf:\n values[\"{}_oper_state\".format(intf)] = pw_inline_interfaces[\n intf\n ][\"oper_state\"]\n values[\"{}_power\".format(intf)] = int(\n pw_inline_interfaces[intf][\"power\"]\n )\n values[\"{}_device\".format(intf)] = pw_inline_interfaces[\n intf\n ].get(\"device\", None)\n total_power += int(pw_inline_interfaces[intf][\"power\"])\n values[\"total_interfaces_power\"] = total_power\n else:\n values[\"total_interfaces_power\"] = 0\n\n # show energywise\n # values[\"energywise\"] = float(payload['show energywise'])\n tb_payload = {\"ts\": payload[\"date\"], \"values\": values}\n json_body[device] = [tb_payload]\n except Exception as e:\n log.error(traceback.format_exc())\n log.error(\"Error on device : {}\".format(device))\n\n return json_body\n\n\ndef main(argv):\n \"\"\"Parses arguments and loads metadata.\"\"\"\n\n global client, broker, broker_file, testbed, testbed_file, DRY_RUN, CDP_SAMPLED_ONCE\n\n try:\n opts, args = getopt.getopt(\n argv, \"mtdbp:\", [\"brokerfile=\", \"testbedyml=\", \"dry-run\"]\n )\n except getopt.GetoptError:\n log.error(\n \"streamer_switches.py --brokerfile= --testbedyml=\"\n )\n sys.exit(2)\n for opt, arg in opts:\n if opt in (\"-m\", \"--brokerfile\"):\n broker_file = arg\n if opt in (\"-t\", \"--testbedyml\"):\n testbed_file = arg\n if opt in (\"-d\", \"--dry-run\"):\n DRY_RUN = True\n\n log.info(\"§§§ On-prem-only streaming. §§§\")\n os.makedirs(ON_PREM_OUTPUT_DIR, exist_ok=True)\n\n # Load Thingsboard's MQTT broker information\n broker = yaml.load(open(broker_file, encoding=\"utf-8\"), Loader=yaml.Loader)[\n \"broker\"\n ]\n\n # Load the switches testbed file\n try:\n testbed = loader.load(testbed_file)\n except pyats.utils.yaml.exceptions.LoadError as error:\n log.error(\"Failed to load testbed file: %s\", str(error))\n sys.exit(1)\n\n\n# Collect data from switches, save data on the disk\n# and to the Thingsboard's MQTT broker (to Thingsboard: all except APs data).\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n this_file = os.path.basename(__file__)\n\n while True:\n # with ThreadPool(processes=4) as p:\n with Pool(processes=4) as p:\n collections = p.map(collect, testbed.devices)\n\n if DRY_RUN:\n for c in collections:\n print(json.dumps(c))\n continue\n\n # Post data to Thingsboard\n client = mqttutils.create_client(broker, this_file)\n log.info(\"Finished gathering data.\")\n\n # Publish every 5 minutes but give MQTT client\n # 60s time to post messages before disconnecting\n msg_info = mqttutils.publish_collections_telemetry(\n client, collections, sleep_once_s=60\n )\n\n client.disconnect()\n time.sleep(270)\n CDP_SAMPLED_ONCE = True\n","repo_name":"CiscoSE/wifi-power-monitoring","sub_path":"streamer/pyats-power/streamer_switches.py","file_name":"streamer_switches.py","file_ext":"py","file_size_in_byte":12667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"44071284258","text":"from django.shortcuts import render, get_object_or_404\n\nfrom .models import audio\n\nfrom . import algo\n\nimport numpy as np\nfrom scipy.io import wavfile\n\n\ndef index(request):\n audios = audio.objects.all()\n context = {\"audios\": audios}\n return render(request, \"abe/index.html\", context)\n\n\ndef analysis(request):\n audio_name = request.POST[\"audio\"]\n noise_type = request.POST[\"noise\"]\n noise_level = request.POST[\"noise_level\"]\n\n aud_add = audio_name + \"_\" + noise_type + \"_\" + noise_level + \".wav\"\n aud = get_object_or_404(audio, pk=aud_add)\n np_arr, stoi_score = algo.bwe(aud.audio_file)\n\n normalized_arr = np_arr / np.max(np.abs(np_arr))\n scaled_arr = 0.9 * normalized_arr\n\n pred_path = \"audio_output/pred_aud.wav\"\n wavfile.write(pred_path, 16000, scaled_arr)\n\n # maxx = -1\n # minn = 1\n # for i in np_arr:\n # maxx = max(maxx, i)\n # minn = min(minn, i)\n # print(maxx, minn)\n\n return render(\n request,\n \"abe/analysis.html\",\n {\"stoi_score\": stoi_score, \"aud\": aud, \"pred_path\": pred_path},\n )\n","repo_name":"lovish22/Btp_demo","sub_path":"abe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38623402193","text":"# def a(func):\n# print(\"I am a\")\n# def b():\n# print(\"I am b\")\n# func()\n# print(\"I am done\")\n# # b()\n# @a\n# def show():\n# pass\n# show()\n\n\nimport pygame , sys\npygame.init()\nscreen=pygame.display.set_mode((400,500))\nclock=pygame.time.Clock()\nwhile True:\n for events in pygame.event.get():\n if events.type==pygame.QUIT:\n pygame.quit()\n sys.exit()\n pygame.display.update()\n clock.tick(60)\n","repo_name":"AtitSharma/Learning_Python","sub_path":"anan.py","file_name":"anan.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25452650924","text":"# -*- coding: utf-8 -*-\r\n# モデルによる予測、モデルの評価関数を定義\r\n\r\n#%%\r\n#モジュールインポート\r\nimport numpy as np\r\nfrom operator import itemgetter\r\nfrom sklearn.metrics import classification_report\r\n\r\n#関数の定義\r\ndef model_predict(clf, vec):\r\n '''\r\n ベクトルを入力して、推論されるラベルを出力する\r\n :param clf:classifier\r\n :param vec: vector\r\n :return: str, predicted label\r\n '''\r\n return clf.predict([vec])[0]\r\n\r\n\r\ndef multilabelmodel_predict(clf, vec, mlb):\r\n '''\r\n ベクト��を入力して、推論されるマルチラベルを出力する\r\n :param clf:classifier\r\n :param vec: vector\r\n :param mlb: MultiLabelBinarizer\r\n :return: tuple of predicted label\r\n '''\r\n y = clf.predict([vec])\r\n tuple_multilabel_predict = mlb.inverse_transform(np.array(y))[0]\r\n return tuple_multilabel_predict\r\n\r\n\r\ndef model_predict_proba(clf, vec):\r\n '''\r\n ベクトルを入力して、ラベルの確率リストを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :return: list of probability\r\n '''\r\n return clf.predict_proba([vec])[0]\r\n\r\n\r\ndef multilabelmodel_predict_proba(clf, vec, mlb, existproba_only=True):\r\n '''\r\n ベクトルを入力して、マルチラベルの確率リストを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :param mlb: MultiLableBinarizer\r\n :param ExistProbaOnly: 出力形式の指定(Trueだとラベルの存在確率のみ出力)\r\n :return: list of probability that each label exists\r\n '''\r\n array_raw = clf.predict_proba([vec])\r\n\r\n if existproba_only:\r\n #output_array = [ele[0][1] for ele in array_raw]\r\n output_array = [1.0 - ele[0][0] for ele in array_raw]\r\n\r\n else:\r\n output_array = [ele[0] for ele in array_raw]\r\n\r\n return output_array\r\n\r\n\r\ndef model_predict_probapair(clf, vec, allpair=False, top=5):\r\n '''\r\n ベクトルを入力して、ラベルとその確率のペアを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :param allpair: trueの場合、全ペアを出力、falseの場合、topで指定された上位のペアを出力\r\n :param top: int, 出力するペアの数\r\n :return: list, list of (label, probability) pair\r\n '''\r\n\r\n if allpair:\r\n classes = clf.classes_\r\n proba = clf.predict_proba([vec])[0]\r\n return list(zip(classes, proba))\r\n else:\r\n classes = clf.classes_\r\n proba = clf.predict_proba([vec])[0]\r\n allpairlist = list(zip(classes, proba))\r\n return sorted(allpairlist, key=itemgetter(1), reverse=True)[0:min(top, len(proba))]\r\n\r\n\r\ndef multilabelmodel_predict_probapair(clf, vec, mlb, allpair=False, proba_threshold=0.1, top=10):\r\n '''\r\n ベクトルを入力して、マルチラベルとその確率のペアを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :param mlb: MultilabelBinarizer\r\n :param allpair: trueの場合、全ペアを出力、falseの場合、閾値以上でtopで指定された上位のペアを出力\r\n :param proba_threshold: 出力対象を制限する確率の閾値\r\n :param top: int, 出力するペアの上限数\r\n :return: list, list of (label, probability) pair\r\n '''\r\n classes = mlb.classes_.tolist()\r\n array_raw = clf.predict_proba([vec])\r\n #proba = [ele[0][1] for ele in array_raw]\r\n proba = [1.0 - ele[0][0] for ele in array_raw]\r\n \r\n allpairlist = list(zip(classes, proba))\r\n\r\n if allpair:\r\n return allpairlist\r\n else:\r\n pairlist = []\r\n for tuple in allpairlist:\r\n if tuple[1] > proba_threshold:\r\n pairlist.append(tuple)\r\n else:\r\n pass\r\n return sorted(pairlist, key=itemgetter(1), reverse=True)[0:min(top, len(pairlist))]\r\n\r\n\r\ndef multilabelmodel_predict_toplist(clf, vec, mlb, proba_threshold=0.1, top=10):\r\n '''\r\n ベクトルを入力して、マルチラベルの確率上位のリストを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :param mlb: MultilabelBinarizer\r\n :param proba_threshold: 出力可否を決める確率の閾値\r\n :param top: int, 出力するペアの上限数\r\n :return: list, list of label\r\n '''\r\n classes = mlb.classes_.tolist()\r\n array_raw = clf.predict_proba([vec])\r\n #proba = [ele[0][1] for ele in array_raw]\r\n proba = [1.0-ele[0][0] for ele in array_raw]\r\n \r\n allpairlist = list(zip(classes, proba))\r\n\r\n pairlist = []\r\n for tuple in allpairlist:\r\n if tuple[1] > proba_threshold:\r\n pairlist.append(tuple)\r\n else:\r\n pass\r\n toppairlist = sorted(pairlist, key=itemgetter(1), reverse=True)[0:min(top, len(pairlist))]\r\n toplist = []\r\n for probapair in toppairlist:\r\n toplist.append((probapair[0]))\r\n\r\n return toplist\r\n\r\n\r\ndef model_predict_maxprobapair(clf, vec):\r\n '''\r\n ベクトルを入力して、推論されるラベルとその確率のペアを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :return: tuple of (label, probability)\r\n '''\r\n classes = clf.classes_\r\n proba = clf.predict_proba([vec])[0]\r\n allpair = dict(zip(classes, proba))\r\n predict = clf.predict([vec])[0]\r\n maxproba = allpair[predict]\r\n return (predict, maxproba)\r\n\r\n\r\ndef model_predict_maxproba(clf, vec):\r\n '''\r\n ベクトルを入力して、推論されるラベルの確率を出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :return: float, probability of predicted label\r\n '''\r\n classes = clf.classes_\r\n proba = clf.predict_proba([vec])[0]\r\n allpair = dict(zip(classes, proba))\r\n predict = clf.predict([vec])[0]\r\n maxproba = allpair[predict]\r\n return maxproba\r\n\r\n\r\ndef model_predict_toplist(clf, vec, proba_threshold=0.1, top=10):\r\n '''\r\n ベクトルを入力して、ラベルの確率上位のリストを出力する\r\n :param clf: classifier\r\n :param vec: vector\r\n :param proba_threshold: 出力可否を決める確率の閾値\r\n :param top: int, 出力するペアの上限数\r\n :return: list, list of label\r\n '''\r\n classes = clf.classes_\r\n array_raw = clf.predict_proba([vec])\r\n proba = array_raw[0]\r\n allpairlist = list(zip(classes, proba))\r\n\r\n pairlist = []\r\n for tuple in allpairlist:\r\n if tuple[1] > proba_threshold:\r\n pairlist.append(tuple)\r\n else:\r\n pass\r\n toppairlist = sorted(pairlist, key=itemgetter(1), reverse=True)[0:min(top, len(pairlist))]\r\n toplist = []\r\n for probapair in toppairlist:\r\n toplist.append((probapair[0]))\r\n\r\n return toplist\r\n\r\n\r\ndef model_evaluation_summary(clf, x_test, y_test):\r\n '''\r\n 分類器の評価結果を表示する。各データの正誤判定結果を出力する。\r\n :param clf: classifier\r\n :param x_test:\r\n :param y_test:\r\n '''\r\n\r\n y_predict = clf.predict(x_test)\r\n\r\n print('Score: ', clf.score(x_test, y_test))\r\n print(classification_report(y_test, y_predict))\r\n\r\n\r\ndef model_evaluation_groupname_detail(clf, x_test, y_test, i_test, df_sample):\r\n # テスト結果の詳細を表示\r\n y_predict = clf.predict(x_test)\r\n # y_proba = clf.predict_proba(x_test)\r\n y_proba = [model_predict_proba(clf, vec) for vec in x_test]\r\n y_maxproba = [model_predict_maxproba(clf, vec) for vec in x_test]\r\n y_maxprobapair = [model_predict_maxprobapair(clf, vec) for vec in x_test]\r\n\r\n df_test_result = pd.DataFrame(list(zip(x_test, y_test, y_predict, y_proba, y_maxproba, y_maxprobapair)),\r\n columns=['vector', 'Answer', 'Predict', 'Proba', 'MaxProba', 'MaxProbaPair'], index=i_test)\r\n\r\n df_test_result = df_test_result.join(df_sample['dataset_title']).join(df_sample['dataset_description'])\r\n df_test_result = df_test_result[['dataset_title', 'dataset_description', 'vector', 'Answer', 'Predict', 'Proba', 'MaxProba', 'MaxProbaPair']]\r\n\r\n return df_test_result\r\n\r\n\r\ndef model_evaluation_tag_detail(clf, mlb, x_test, y_test, i_test, df_sample, proba=False):\r\n # テスト結果の詳細を表示\r\n y_predict = clf.predict(x_test)\r\n y_predict_tag = mlb.inverse_transform(y_predict)\r\n # y_proba = clf.predict_proba(x_test)\r\n y_test_tag = mlb.inverse_transform(y_test)\r\n\r\n if proba:\r\n y_proba = [clf.predict_proba([ele]) for ele in x_test]\r\n df_test_result = pd.DataFrame({'vector':x_test.tolist(),\r\n 'Test':y_test.tolist(),\r\n 'Predict':y_predict.tolist(),\r\n 'Proba':y_proba,\r\n 'Test_tag':y_test_tag,\r\n 'Predict_tag':y_predict_tag}, index=i_test)\r\n \r\n df_test_result = df_test_result.join(df_sample['dataset_title']).join(df_sample['dataset_description'])\r\n df_test_result = df_test_result[['dataset_title', 'dataset_description', 'vector', 'Test_tag', 'Predict_tag', 'Test', 'Predict', 'Proba']]\r\n else:\r\n df_test_result = pd.DataFrame({'vector': x_test.tolist(),\r\n 'Test': y_test.tolist(),\r\n 'Predict': y_predict.tolist(),\r\n 'Test_tag': y_test_tag,\r\n 'Predict_tag': y_predict_tag}, index=i_test)\r\n\r\n df_test_result = df_test_result.join(df_sample['dataset_title']).join(df_sample['dataset_description'])\r\n df_test_result = df_test_result[\r\n ['dataset_title', 'dataset_description', 'vector', 'Test_tag', 'Predict_tag', 'Test', 'Predict']]\r\n\r\n return df_test_result\r\n","repo_name":"CADDE-sip/catalog_tool","sub_path":"volumes/catalog_tool_ml/model_evaluation.py","file_name":"model_evaluation.py","file_ext":"py","file_size_in_byte":9756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"9336274588","text":"import subprocess\nimport json\nimport os\n\ndef test(params):\n with open('params.json', 'wt') as file:\n json.dump(params, file)\n\n args = [\n 'python3',\n '-m',\n 'multiproc',\n 'train_cifar10.py ',\n '--cycle-len ',\n '40',\n ' -j',\n ' 16',\n ' -b',\n ' 128',\n ' --loss-scale',\n ' 512',\n ' --use-tta',\n ' 1',\n ' --fp16',\n ' --arch',\n ' resnet18',\n ' --wd',\n str(params['weight_decay']),\n ' --lr',\n str(params['learning_rate']),\n ' --use-clr',\n ' 50,12.5,0.95,0.85',\n ' data/'\n ]\n\n subprocess.run([' '.join(args)], cwd=os.getcwd(), shell=True)\n\n with open('tta_accuracy.txt') as file:\n lines = file.readlines()\n accuracies = [float(line) for line in lines]\n total = 0\n for accuracy in accuracies:\n total += accuracy\n averageAccuracy = total / len(accuracies)\n\n subprocess.run(['rm', 'tta_accuracy.txt'])\n\n return {\"loss\": 1.0 - averageAccuracy}\n","repo_name":"genixpro/hypermax","sub_path":"research/searches/cifar_resnet/cifar_test.py","file_name":"cifar_test.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"12"} +{"seq_id":"38167552102","text":"from botbuilder.schema import Attachment\n\n\nclass PatientDemographics:\n \"\"\"\n This is our application state. Just a regular serializable Python class.\n \"\"\"\n\n def __init__(self, gender_at_birth: str = None, transport: str = None, age: int = 0, age_unit: str = \"years\"):\n self.gender_at_birth = gender_at_birth\n self.transport = transport\n self.age = age\n self.age_unit = age_unit\n","repo_name":"ericmc21/infer_azure_bot","sub_path":"infer_bot/weather/data_models/patient_demographics.py","file_name":"patient_demographics.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34679662960","text":"from django.contrib import admin\r\n\r\nfrom .models import Category , Product\r\n\r\nadmin.site.site_header = 'My Django Panel'\r\n\r\n\r\n# admin panel actions\r\n\r\ndef make_published(modeladmin , request , queryset):\r\n\r\n rows_updated = queryset.update(status='p')\r\n if rows_updated == 1 :\r\n message_bit = \" one product was published\"\r\n\r\n else:\r\n message_bit = f'{rows_updated} products were published '\r\n\r\n modeladmin.message_user(request , message_bit)\r\n\r\n \r\n\r\nmake_published.short_description = 'make published'\r\n\r\n\r\ndef make_draft(modeladmin , request , queryset):\r\n\r\n rows_updated = queryset.update(status='d')\r\n if rows_updated == 1 :\r\n message_bit = \" one product was drafted\"\r\n\r\n else:\r\n message_bit = f'{rows_updated} products were drafted '\r\n\r\n modeladmin.message_user(request , message_bit)\r\n\r\n\r\n# ============================================================================================\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass ProductAdmin(admin.ModelAdmin):\r\n list_display = ['title' ,'get_thumbnail', 'status', 'price','admin', 'created' , 'cat_to_str' ]\r\n list_filter = ['status']\r\n search_fields = ['title' , 'slug']\r\n prepopulated_fields ={ 'slug' :('title' , )}\r\n actions = [make_published , make_draft]\r\n\r\n\r\n\r\n\r\n\r\n\r\nadmin.site.register(Product , ProductAdmin)\r\n\r\n\r\n\r\nclass CategoryAdmin(admin.ModelAdmin):\r\n list_display = ['title' ,'get_thumbnail', 'status','parent' ]\r\n list_filter = ['status']\r\n search_fields = ['title' , 'slug']\r\n prepopulated_fields ={ 'slug' :('title' , )}\r\n\r\n\r\n\r\nadmin.site.register(Category , CategoryAdmin)\r\n","repo_name":"Arshiwya/FirstDjango","sub_path":"products/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"3372461114","text":"import socket # 导入 socket 模块\nfrom multiprocessing.dummy import Pool as ThreadPool # 导入线程池模块ThreadPool,用于多线程处理请求\nimport io # 导入io模块,用于发送和接收HTTP报文\nimport traceback # 导入traceback模块,用于打印错误信息\nimport logging # 导入logging模块,用于记录日志\n\n\nclass Server(object):\n # 定义一个类变量 SERVER_STRING,存储服务器的名称和版本信息\n SERVER_STRING = b\"Server: SimpleHttpd/1.0.0\\r\\n\"\n\n def __init__(self, host, port, worker_count=4):\n self._host = host # 存储主机名\n self._port = port # 存储端口号\n self._listen_fd = None # 存储监听套接字\n self._worker_count = worker_count # 存储工作线程数\n self._worker_pool = ThreadPool(worker_count) # 创建指定数量的工作线程池\n self._logger = logging.getLogger(\"simple.httpd\") # 创建日志记录器\n self._logger.setLevel(logging.DEBUG) # 设置日志级别为DEBUG\n self._logger.addHandler(logging.StreamHandler()) # 添加输��日志到控制台的处理器\n\n def run(self):\n self._listen_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建TCP/IP套接字\n self._listen_fd.bind((self._host, self._port)) # 绑定IP地址和端口号\n self._listen_fd.listen(self._worker_count) # 监听客户端连接请求\n try:\n while True:\n conn, addr = self._listen_fd.accept() # 接受客户端连接并返回新的套接字和地址\n self._worker_pool.apply_async(self.accept_request, (conn, addr,)) # 将连接套接字和地址交给工作线程异步执行处理\n except Exception as e:\n traceback.print_exc() # 打印异常堆栈信息\n finally:\n self._listen_fd.close() # 关闭监听套接字\n\n def accept_request(self, conn: socket.socket, addr):\n try:\n method, path, http_version, req_headers, req_body = self.recv_request(conn)\n status = \"\"\n environ = {\n 'REQUEST_METHOD': method, # 请求方法,例如 'POST'\n 'SCRIPT_NAME': '', # 脚本名称\n 'PATH_INFO': path, # 路径信息,例如'/hello/world'\n 'QUERY_STRING': '', # 查询字符串,例如'a=1&b=2'\n 'CONTENT_TYPE': '', # 请求体的类型,例如'application/json'\n 'CONTENT_LENGTH': len(req_body), # 请求体的长度(单位为字节)\n 'SERVER_NAME': self._host, # 服务器主机名\n 'SERVER_PORT': str(self._port), # 服务器端口号\n 'HTTP_HOST': req_headers.get('Host', ''),\n 'HTTP_USER_AGENT': req_headers.get('User-Agent', ''), # HTTP请求头中的'User-Agent'字段\n 'HTTP_ACCEPT': req_headers.get('Accept', ''), # HTTP请求头中的'Accept'字段\n 'HTTP_ACCEPT_LANGUAGE': req_headers.get('Accept-Language', ''), # HTTP请求头中的'Accept-Language'字段\n 'HTTP_ACCEPT_ENCODING': req_headers.get('Accept-Encoding', ''), # HTTP请求头中的'Accept-Encoding'字段\n 'HTTP_CONNECTION': req_headers.get('Connection', '') # HTTP请求头中的'Connection'字段\n }\n environ['wsgi.input'] = io.BytesIO(req_body)\n body = self.wsgi_application(environ, self.build_start_response(conn, status))\n for bt in body:\n self.send_response(conn, None, bt, None)\n self._logger.info(\"{}:{} {} {} {} {}\".format(addr[0], addr[1], http_version, method, path, status)) # 记录日志信息\n except Exception as e:\n traceback.print_exc() # 打印异常堆栈信息\n finally:\n conn.close() # 关闭连接套接字\n\n def wsgi_application(self, environ, start_response):\n data = b\"Hello, World!\\n\"\n start_response(\"200 OK\", [\n (\"Content-Type\", \"text/plain\"),\n (\"Content-Length\", str(len(data)))\n ])\n return [data]\n\n def build_start_response(self, conn, the_status):\n def start_response(status, headers):\n the_status = status\n self.send_response(conn, status=status, headers=headers)\n return start_response\n\n def send_response(self, conn: socket.socket, status=None, body=None, headers=None):\n if not status is None:\n conn.sendall(\"HTTP/1.0 {}\\r\\n\".format(status).encode())\n conn.sendall(self.SERVER_STRING)\n if not headers is None:\n for header in headers:\n conn.sendall(\"{}: {}\\r\\n\".format(*header).encode())\n if not body is None:\n if not isinstance(body, bytes):\n body = str(body).encode()\n conn.sendall(b\"\\r\\n\")\n conn.sendall(body)\n\n def recv_request(self, conn: socket.socket):\n # 读取请求行\n line = b''\n while not line.endswith(b'\\r\\n'):\n data = conn.recv(1)\n if not data:\n raise ConnectionError('Connection closed unexpectedly')\n line += data\n method, path, version = line.strip().decode().split(' ', 2)\n\n # 读取请求头\n headers = {}\n while True:\n line = b''\n while not line.endswith(b'\\r\\n'):\n data = conn.recv(1)\n if not data:\n raise ConnectionError('Connection closed unexpectedly')\n line += data\n if line == b'\\r\\n':\n break\n key, value = line.strip().decode().split(': ', 1)\n headers[key] = value\n\n # 读取请求体\n content_length = int(headers.get('Content-Length', '0'))\n if content_length > 0:\n body = conn.recv(content_length)\n else:\n body = b\"\"\n\n # 返回请求行、请求头和请求体\n return method, path, version, headers, body\n\n\nif __name__ == \"__main__\":\n server = Server(\"0.0.0.0\", 3000) # 创建服务器实例\n server.run() # 启动服务器\n","repo_name":"Arvintian/learn-python-web-in-hard-way","sub_path":"src/simple_wsgi.py","file_name":"simple_wsgi.py","file_ext":"py","file_size_in_byte":6125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"628612779","text":"class Solution:\n def findKDistantIndices(self, nums: List[int], key: int, k: int) -> List[int]:\n idxs = []\n for i in range(len(nums)):\n if nums[i] == key: idxs += [i]\n \n res = []\n for i in range(len(nums)):\n if not idxs: break\n if i > idxs[0]+k: idxs.pop(0)\n \n if idxs and (idxs[0]-k <= i <= idxs[0]+k):\n res += [i]\n \n return res","repo_name":"LYcheck/competitive-programming","sub_path":"LeetCode/Find All K-Distant Indices in an Array (E).py","file_name":"Find All K-Distant Indices in an Array (E).py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"19377159662","text":"import xarray as xr\r\n\r\nrain_netCDF = r'C:\\LUCI_data\\Aparima\\VCSN\\vcsn_rain_aparima_19900101_20181015.nc'\r\npet_netCDF = r'C:\\LUCI_data\\Aparima\\VCSN\\vcsn_pet_aparima_19900101_20181015.nc'\r\n\r\nds_rain = xr.open_dataset(rain_netCDF)\r\nrain = ds_rain['rain']\r\n\r\nds_pet = xr.open_dataset(pet_netCDF)\r\npet = ds_pet['pet']\r\n\r\nrain_time = []\r\nfor t in range(0, 10470):\r\n\train_time.append((t, float(rain[t][10][10])))\r\n\r\nrain_time.sort(key=lambda tup: tup[1])\r\nprint(rain_time)\r\n'''\r\n\r\npet_time = []\r\nfor t in range(0, 10470):\r\n\tpet_time.append((t, float(pet[t][0][0])))\r\n\r\npet_time.sort(key=lambda tup: tup[1])\r\nprint(pet_time)\r\n'''\r\n\r\nds_rain.close()\r\nds_pet.close()\r\n\r\n\r\n","repo_name":"niwa/interoperable_land_water_models","sub_path":"Components/LUCI_rainfall-runoff/find_max_value.py","file_name":"find_max_value.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"24230204803","text":"import os\nimport time\nimport config\nimport tensorflow as tf\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom IPython import display\nfrom src.wgan_gp.model import build_generator, build_critic\nfrom src.utils.utils import write_loss_summaries, get_folder_or_create\n\n# Build models.\ngenerator = build_generator(latent_dim=config.NOISE_DIM, out_activation='sigmoid')\ndiscriminator = build_critic(input_shape=config.INPUT_SIZE + (3,))\n\n# Set optimizers.\ngen_optimizer = tf.keras.optimizers.Adam(config.LEARNING_RATE, 0.5, 0.9)\ndisc_optimizer = tf.keras.optimizers.Adam(config.LEARNING_RATE, 0.5, 0.9)\n\n# Generate noise for image generation. To be used to check\n# produced images over time.\nseed = tf.random.normal([config.NUM_EXAMPLES_TO_GENERATE, config.NOISE_DIM])\n\nimage = tf.Variable(\n initial_value=tf.ones((config.BATCH_SIZE, 128, 128, 3)),\n shape=tf.TensorShape((config.BATCH_SIZE, 128, 128, 3)),\n trainable=False)\n\nnum_epochs = tf.Variable(0, trainable=False)\n\n# Define summary paths.\nsummary_dir = get_folder_or_create(path=config.SUMMARY_PATH, name=config.MODEL_NAME)\nsummary_train_path = os.path.join(summary_dir, \"train\")\n\n# Define checkpoint object.\ncheckpoint_dir = get_folder_or_create(path=config.SAVE_PATH, name=config.MODEL_NAME)\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\ncheckpoint = tf.train.Checkpoint(\n generator_optimizer=gen_optimizer,\n discriminator_optimizer=disc_optimizer,\n generator=generator,\n discriminator=discriminator,\n num_epoch_used=num_epochs)\n\n\ndef gradient_penalty_loss(gradients):\n \"\"\"Returns the gradient penalty loss, which is a soft\n version of the Lipschitz constraint. It improves stability\n by penalizing gradients with large norm values.\"\"\"\n gradients_sqr = tf.square(gradients)\n gradients_sqr_sum = tf.reduce_sum(gradients_sqr, axis=[1, 2, 3]) # sum over 3 last dimensions of image\n gradients_l2_norm = tf.sqrt(gradients_sqr_sum)\n gradient_penalty = tf.square(1. - gradients_l2_norm)\n return tf.reduce_mean(gradient_penalty)\n\n\ndef discriminator_loss(real_output, generated_output, gradients, lambda_=10):\n \"\"\"Returns the negative Wasserstein distance between the\n generator distribution and the data distribution, with\n gradient penalty loss.\"\"\"\n real_loss = -tf.reduce_mean(real_output)\n fake_loss = tf.reduce_mean(generated_output)\n gp_loss = gradient_penalty_loss(gradients)\n return real_loss + fake_loss + gp_loss * lambda_, gp_loss\n\n\ndef generator_loss(generated_output):\n return -tf.reduce_mean(generated_output)\n\n\ndef random_weighted_average(real_images, fake_images):\n \"\"\"Returns average between real and fakes images\n (weighted by alpha).\"\"\"\n alpha = tf.random.uniform((real_images.shape[0], 1, 1, 1))\n return (alpha * real_images) + ((1 - alpha) * fake_images)\n\n\ndef generate_and_save_images(model, epoch, test_input, img_path):\n \"\"\" Saves generated image.\n NOTE:`training` is set to False. This is so all\n layers run in inference mode (batch_norm).\"\"\"\n matplotlib.use(\"Agg\")\n predictions = model(test_input, training=False) # (num_examples, 128, 128, 3)\n\n _ = plt.figure(figsize=(4, 4))\n for i in range(predictions.shape[0]):\n plt.subplot(4, 4, i + 1)\n plt.imshow(tf.cast(predictions[i] * 255, tf.int32))\n plt.axis('off')\n\n img_name = 'image_at_epoch_{:04d}.png'.format(epoch)\n plt.savefig(os.path.join(img_path, img_name), dpi=500, bbox_inches=\"tight\")\n plt.close()\n\n\n@tf.function\ndef train_discriminator(images, noise, gp_lambda=10, optimize=True):\n \"\"\"Optimizes discriminator model and return associated loss.\"\"\"\n with tf.GradientTape() as disc_tape, tf.GradientTape() as _:\n # Get discriminator image from fake and real images.\n fakes = generator(noise)\n disc_fake = discriminator(fakes)\n disc_real = discriminator(images)\n\n # Get interpolated image between fake and real\n # discriminations (NOTE: used for gradient penalty).\n interpolated_img = random_weighted_average(images, fakes)\n image.assign(interpolated_img)\n\n with tf.GradientTape() as nested_tape:\n nested_tape.watch(image)\n # Run the forward pass of the layer.\n # The operations that the layer applies\n # to its inputs are going to be recorded\n # on the GradientTape.\n disc_validity_interpolated = discriminator(image)\n nested_gradients = nested_tape.gradient(disc_validity_interpolated, image)\n\n # Get discriminator loss with gradient penalty.\n disc_loss, gp_loss = discriminator_loss(disc_real, disc_fake, tf.stack(nested_gradients), gp_lambda)\n\n if optimize:\n # Optimize.\n grad_disc = disc_tape.gradient(disc_loss, discriminator.trainable_variables)\n disc_optimizer.apply_gradients(zip(grad_disc, discriminator.trainable_variables))\n\n return disc_loss, gp_loss\n\n\n@tf.function\ndef train_generator(noise, optimize=True):\n \"\"\"Optimizes generator model and returns associated loss.\"\"\"\n with tf.GradientTape() as gen_tape:\n fakes = generator(noise)\n generated_output = discriminator(fakes)\n gen_loss = generator_loss(generated_output)\n\n if optimize:\n grad_gen = gen_tape.gradient(gen_loss, generator.trainable_variables)\n gen_optimizer.apply_gradients(zip(grad_gen, generator.trainable_variables))\n\n return gen_loss\n\n\ndef train(data_generator, batch_size, epochs, checkpoint_model_dir, valid_step,\n noise_dim, gp_lambda, img_interval_saving=50, epoch_interval_saving=5,\n n_critic=5, augment=False, restore_model=None):\n \"\"\"Trains generator and discriminator models alternatively given\n n_critic.\n\n Args:\n data_generator: the data generator.\n batch_size (int): the batch size.\n epochs (int): number of training epochs.\n checkpoint_model_dir (str): directory path to save checkpoints.\n valid_step: step for validation and/or displaying the loss\n results (e.g. if the step is 5, loss and metric values\n are averaged over 5 step prior to being displayed).\n noise_dim (int): noise dimension for image generation.\n gp_lambda (float): coefficient controlling the magnitude of the\n gradient penalty added to the discriminator loss.\n img_interval_saving (int): epoch interval at which a generated\n image is saved (for visual inspection).\n epoch_interval_saving (int): epoch interval at which the model\n weights are saved.\n n_critic (int): step at which training shifts between discriminator\n and generator. 5 in original paper, 8 to speed-up training.\n augment (bool): if True, apply augmentation to data.\n restore_model (str): the directory path of model weights to restore.\n \"\"\"\n if restore_model is not None:\n # Path to model.\n directory_path = os.path.join(checkpoint_model_dir, restore_model)\n checkpoint.restore(tf.train.latest_checkpoint(directory_path)).expect_partial()\n print(f\"Model {restore_model} restored.\")\n\n # Add number of epochs from previously train\n # model to current number of epochs defined\n # for training.\n try:\n ep0 = checkpoint.num_epoch_used\n epochs = [ep0.numpy(), ep0.numpy() + epochs]\n num_epochs.assign(ep0)\n print(f\"Epoch index starts from {ep0} given restored model.\")\n except AttributeError:\n print(\"Number of epochs in checkpoint does not exist.\")\n epochs = [epochs]\n else:\n epochs = [epochs]\n\n # Delete previous summary event files from given folder.\n # Useful if training experiments require using same\n # summary output directories.\n try:\n for directory in [summary_train_path]:\n existing_summary_files = os.walk(directory).__next__()[-1]\n if existing_summary_files:\n for file in existing_summary_files:\n os.remove(os.path.join(directory, file))\n except (PermissionError, StopIteration):\n pass\n\n # Delete previous summary images.\n try:\n existing_images = os.walk(summary_dir).__next__()[-1]\n existing_images = [img for img in existing_images if img.split(\".\")[-1] == \"png\"]\n if existing_images:\n for file in existing_images:\n os.remove(os.path.join(summary_dir, file))\n except (PermissionError, StopIteration):\n pass\n\n # Create summary writers\n sum_train_writer = tf.summary.create_file_writer(summary_train_path)\n\n print('\\n∎ Training')\n gen_loss = 0.\n num_train_data = (len(data_generator.train_indices) // batch_size) + 1\n for epoch in range(*epochs):\n # Mean losses to display while training given\n # the summary iteration interval.\n avg_losses = tf.zeros(shape=(4,))\n\n for step in range(num_train_data):\n start = time.time()\n\n # Generate batch of training data.\n # objects: (B, W, H, F)\n objects = data_generator.next_batch(\n batch_size=batch_size,\n augment=augment)\n\n # Process training step.\n disc_loss, gp_loss = train_discriminator(\n images=tf.cast(objects, tf.float32),\n noise=tf.random.normal([batch_size, noise_dim]),\n gp_lambda=tf.constant(gp_lambda),\n optimize=tf.constant(True))\n\n if step % n_critic == 0:\n gen_loss = train_generator(\n noise=tf.random.normal([batch_size, noise_dim]),\n optimize=tf.constant(True))\n\n # Make loss and metric vectors.\n loss_vector = tf.concat([[gen_loss + disc_loss], [gen_loss], [disc_loss], [gp_loss]], axis=0)\n loss_names = ['total_loss', 'gen_loss', 'disc_loss', 'gp_loss']\n\n avg_losses += loss_vector / valid_step\n\n # Write summaries with global step.\n global_step = step + (epoch * num_train_data)\n write_loss_summaries(\n values=loss_vector,\n names=loss_names,\n writer=sum_train_writer,\n step=tf.cast(global_step, tf.int64))\n\n # Measure training loop execution time\n end = time.time()\n speed = round(end - start, 2)\n\n # Display results at given interval.\n if (step % valid_step) == 0:\n if step == 0:\n avg_losses = loss_vector\n\n # Display current losses.\n text = f\"{step + 1}/{num_train_data} (epoch: {epoch + 1}): \"\n text += 'total_loss: {:.3f} - gen_loss: {:.3f} - disc_loss: {:.3f}'.format(*avg_losses)\n text += f\" ({speed} sec)\"\n tf.print(text)\n\n # Reset loss vector and metric states.\n avg_losses = tf.zeros(shape=(4,))\n\n if ((epoch % epoch_interval_saving) == 0) and (epoch != 0):\n # Save the model every x epochs.\n checkpoint.save(file_prefix=checkpoint_prefix)\n\n if (epoch % img_interval_saving) == 0:\n # Produce images for the GIF as you go.\n display.clear_output(wait=True)\n generate_and_save_images(\n generator, epoch + 1,\n test_input=seed,\n img_path=summary_dir)\n\n # Increment number of epochs passed.\n num_epochs.assign_add(1)\n","repo_name":"cjuliani/Wasserstein-Generative-Adversarial-Network-TensorFlow-2","sub_path":"src/wgan_gp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"3046734564","text":"import asyncio\nimport aiohttp\nimport logging\nimport pprint\nimport config\n\nTOKEN = config.token\npp = pprint.PrettyPrinter(indent=4)\n\nlogger = logging.getLogger(__name__)\n\n\nclass TelegramError(Exception):\n pass\n\n\nasync def api_request(method_name, method='GET', **kwargs):\n \"\"\"\n Telegram API access\n \"\"\"\n url = f'https://api.telegram.org/bot{TOKEN}/{method_name}'\n request = await aiohttp.request(method, url, params=kwargs)\n result = await request.json()\n if result[\"ok\"]:\n return result[\"result\"]\n else:\n raise TelegramError()\n\n\nasync def get_updates(offset=0, limit=100, timeout=0):\n \"\"\"\n Telegram method getUpdates\n \"\"\"\n updates = api_request('getUpdates', offset=offset, limit=limit, timeout=timeout)\n return await updates\n\n\nasync def send_message(**kwargs):\n \"\"\"\n Telegram method sendMessage\n \"\"\"\n request = api_request(\"sendMessage\", **kwargs)\n await request\n\n\nasync def print_message(message):\n \"\"\"\n Repeat message\n \"\"\"\n chat_id = message[\"chat\"][\"id\"]\n message_id = message[\"message_id\"]\n text = message[\"text\"]\n result = await send_message(chat_id=chat_id,\n text=text,\n reply_to_message_id=message_id)\n return result\n\n\nasync def process_update(update):\n \"\"\"\n Send update\n \"\"\"\n pp.pprint(update)\n pp.pprint((await print_message(update[\"message\"])))\n return update[\"update_id\"]\n\n\nasync def process_updates():\n \"\"\"\n Getting updates and processes them\n \"\"\"\n offset = 0\n while True:\n updates = await get_updates(offset, timeout=5)\n tasks = [\n asyncio.ensure_future(process_update(update)) for update in updates\n ]\n for future in asyncio.as_completed(tasks):\n update_id = await future\n offset = max(offset, update_id + 1)\n\n\ndef main():\n \"\"\"\n Main bot entry point\n \"\"\"\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(process_updates())\n except KeyboardInterrupt:\n loop.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DennTerentyev/asyncBot","sub_path":"core/asyncbot.py","file_name":"asyncbot.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"70053129621","text":"# -*- coding: utf-8 -*-\n# Author: tigflanker\n# Date: 31 Oct 2018\n# Update: 04 Nov 2018:Add oversampling function \n\nimport re\nimport pandas as pd \nimport numpy as np\n\ndef Data_Sampling(datain,\n sample_rule = {},\n stratify_by = [],\n group_by = ''\n ): \n \n '''\n # 1. 单纯抽样:全部数据按照一定比例或指定数量进行抽样:sample_rule = 100; sample_rule = 0.3\n # 2. 分层抽样:按照某个变量进行排序,按照等频分箱,每箱内抽取均匀样本量:stratify_by = ['age', 20] + sample_rule = 100 or 0.3\n # 3. 非均衡抽样:① 设定某个分组变量,对其包含值个数取最小,按此最小值对所有分组 1:1 抽样:sample_rule = 'align' + group_by = 'survived' \n # ② 指定规则分组抽样,按照分组变量的每个值设定规则(如:100、0.3、'max'):sample_rule = {0:100, 1:'max'} + group_by = 'survived'\n # ③ 非均衡 + 分层抽样:sample_rule = {0:100, 1:0.3} + group_by = 'survived' + stratify_by = ['age', 20]\n # 系数中若倍数大于1,或例数大于样本量本身,即为过采样 \n \n # 使用示例:\n datain = pd.read_csv('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic.txt')\n \n #print(datain.describe())\n #print(datain['survived'].value_counts())\n \n # 1. 简单抽样:sample_rule\n sample_out = Data_Sampling(datain,\n sample_rule = 2.3,\n stratify_by = [],\n group_by = '')\n \n print(sample_out.describe())\n print(sample_out['survived'].value_counts())\n \n # 1. 简单抽样:sample_rule\n sample_out = Data_Sampling(datain,\n sample_rule = 2000,\n stratify_by = [],\n group_by = '')\n \n print(sample_out.describe())\n print(sample_out['survived'].value_counts())\n \n # 2. 分层抽样:sample_rule + stratify_by\n sample_out = Data_Sampling(datain,\n sample_rule = 2.3,\n stratify_by = ['age', 20],\n group_by = '')\n \n print(sample_out.describe())\n print(sample_out['survived'].value_counts())\n \n # 3. 非均衡抽样:sample_rule + group_by\n sample_out = Data_Sampling(datain,\n sample_rule = 'align',\n stratify_by = [],\n group_by = 'survived')\n \n print(sample_out.describe())\n print(sample_out['survived'].value_counts())\n \n # 3. 非均衡抽样:sample_rule + group_by\n sample_out = Data_Sampling(datain,\n sample_rule = {0:'max',1:4000},\n stratify_by = [],\n group_by = 'survived')\n \n print(sample_out.describe())\n print(sample_out['survived'].value_counts())\n \n # 3. 非均衡抽样:sample_rule + group_by\n sample_out = Data_Sampling(datain,\n sample_rule = {0:2.0,1:4000},\n stratify_by = ['age', 20],\n group_by = 'survived')\n \n print(sample_out.describe())\n print(sample_out['survived'].value_counts())\n '''\n\n # Copy to final output dataset. \n sample_out = datain.copy()\n os_times = 0 # Initialize rounds of oversampling.\n \n # Sub-macro for stratified sampling.\n def stratify_sample(sample_set_in = 'sample_out', \n stratify_n = 'stratify_by[1]', \n _stratify_by = 'stratify_by[0]', \n stratify_rule = 'sample_rule'):\n sample_set_in = sample_set_in.sort_values(_stratify_by).reset_index(drop = True)\n \n def _stra(l):\n _sample_rate = stratify_rule if type(stratify_rule) is float \\\n else min(stratify_rule / stratify_n / l, 1)\n return list(np.random.choice([True,False],size=l,p=[_sample_rate,1-_sample_rate]))\n \n _bkt_n = [0] * stratify_n\n for x in range(len(sample_set_in)):\n _bkt_n[x%stratify_n] += 1\n \n keep_index = list(map(_stra, _bkt_n))\n keep_index = eval('[' + re.sub('[\\[\\]]','',str(keep_index)) + ']') # list flattening\n while (type(stratify_rule) is not float) and (sum(keep_index) != stratify_rule):\n keep_index = list(map(_stra, _bkt_n))\n keep_index = eval('[' + re.sub('[\\[\\]]','',str(keep_index)) + ']') # list flattening\n \n return keep_index\n \n # 均衡抽样 \n if type(sample_rule) not in (dict, str):\n \n # 提取过采样需求 \n os_times = int(sample_rule) if type(sample_rule) is float else sample_rule // len(sample_out)\n sample_rule = sample_rule % 1 if type(sample_rule) is float else sample_rule % len(sample_out)\n \n # 单纯抽样\n if (type(sample_rule) is float) and (len(stratify_by) == 0):\n print('>>> 当前正在执行简单抽样,抽样比例为:',os_times + sample_rule,'(过采样)' if os_times > 0 else '')\n keep_index = np.random.choice([True,False],\n size=len(sample_out),p=[sample_rule,1-sample_rule])\n if (type(sample_rule) is not float) and (len(stratify_by) == 0): \n print('>>> 当前正在执行简单抽样,定样数量为:',os_times * len(sample_out) + sample_rule,'(过采样)' if os_times > 0 else '')\n keep_index = []\n while sum(keep_index) != sample_rule:\n keep_index = np.random.choice([True,False],\n size=len(sample_out),p=[sample_rule/len(sample_out),\n 1-sample_rule/len(sample_out)])\n \n # 分层抽样 \n if len(stratify_by) > 0: \n print('>>> 当前正在执行分层抽样,分层变量为:',stratify_by[0],',层数:',stratify_by[1],\n ',抽样比例为:' if type(sample_rule) is float else ',定样数量为:',\n os_times + sample_rule if type(sample_rule) is float else os_times * len(sample_out) + sample_rule,\n '(过采样)' if os_times > 0 else '')\n keep_index = stratify_sample(sample_set_in = sample_out, stratify_n = stratify_by[1], _stratify_by = stratify_by[0], stratify_rule = sample_rule)\n \n # 均衡抽样部分的抽样实施动作 \n _sample_out = sample_out[keep_index]\n for _os in range(os_times):\n _sample_out = pd.concat([_sample_out, sample_out], axis=0, ignore_index=True, sort=False)\n \n sample_out = _sample_out\n \n # 非均衡抽样\n else :\n if sample_rule == 'align':\n _group_vc = sample_out[group_by].value_counts()\n _group_vc[:] = _group_vc.min()\n sample_rule = dict(_group_vc)\n print('>>> 已选择对齐方式,将按照',group_by,'组值中最小例数标齐各组例数,此例数为:',_group_vc.min())\n \n _sample_out = pd.DataFrame()\n for _value in sample_rule:\n # 划分数据集\n _sub_sample_set = sample_out.loc[sample_out[group_by] == _value]\n \n # 提取过采样需求\n if sample_rule[_value] != 'max':\n os_times = int(sample_rule[_value]) if type(sample_rule[_value]) is float else sample_rule[_value] // len(_sub_sample_set)\n sample_rule[_value] = sample_rule[_value] % 1 if type(sample_rule[_value]) is float else sample_rule[_value] % len(_sub_sample_set)\n \n # case\n if sample_rule[_value] == 'max':\n print('>>> 当前正在执行分组抽样,当前组值为:',group_by,'=',_value,',按全量保留')\n keep_index = [True] * len(_sub_sample_set)\n elif len(stratify_by) == 0:\n if type(sample_rule[_value]) is float:\n print('>>> 当前正在执行分组抽样,当前组值为:',group_by,'=',_value,',抽样比例为:',os_times + sample_rule[_value],\n '(过采样)' if os_times > 0 else '')\n keep_index = np.random.choice([True,False],\n size=len(_sub_sample_set),p=[sample_rule[_value],1-sample_rule[_value]])\n if type(sample_rule[_value]) is not float:\n print('>>> 当前正在执行分组抽样,当前组值为:',group_by,'=',_value,',定样数量为:',\n os_times * len(_sub_sample_set) + sample_rule[_value],'(过采样)' if (os_times > 0) & (sample_rule[_value] > 0) else '')\n keep_index = []\n while sum(keep_index) != sample_rule[_value]:\n keep_index = np.random.choice([True,False],\n size=len(_sub_sample_set),p=[sample_rule[_value]/len(_sub_sample_set),\n 1-sample_rule[_value]/len(_sub_sample_set)])\n elif len(stratify_by) > 0:\n print('>>> 当前正在执行分组 + 分层抽样,当前组值为:',group_by,'=',_value,\n ',分层变量为:',stratify_by[0],',层数:',stratify_by[1],\n ',抽样比例为:' if type(sample_rule[_value]) is float else ',定样数量为:',\n sample_rule[_value] + os_times if type(sample_rule[_value]) is float else os_times * len(_sub_sample_set) + sample_rule[_value],\n '(过采样)' if os_times > 0 else '')\n keep_index = stratify_sample(sample_set_in = _sub_sample_set, stratify_n = stratify_by[1], \n _stratify_by = stratify_by[0], stratify_rule = sample_rule[_value])\n \n __sub_sample_set = _sub_sample_set[keep_index]\n for _os in range(os_times):\n __sub_sample_set = pd.concat([__sub_sample_set, _sub_sample_set], axis=0, ignore_index=True, sort=False)\n _sub_sample_set = __sub_sample_set\n \n _sample_out = pd.concat([_sample_out, _sub_sample_set], axis=0, ignore_index=True, sort=False)\n \n sample_out = _sample_out\n \n return sample_out\n ","repo_name":"tigflanker/Python","sub_path":"Risk control modeling framework/build/lib/RCMF/Data_Sampling.py","file_name":"Data_Sampling.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"42733611735","text":"from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException\nfrom tencentcloud.essbasic.v20210526 import models\n\nfrom common.CreateFlowUtils import initClient, fillAgent\n\n\ndef channelDescribeEmployees(agent, filters, limit, offset):\n\n try:\n # 实例化一个client\n client = initClient()\n\n # 实例化一个请求对象,每个接口都会对应一个request对象\n req = models.ChannelDescribeEmployeesRequest()\n\n req.Agent = agent\n\n req.Filters = filters\n\n req.Limit = limit\n\n req.Offset = offset\n\n # 返回的resp是一个ChannelDescribeEmployeesResponse的实例,与请求对象对应\n return client.ChannelDescribeEmployees(req)\n except TencentCloudSDKException as err:\n print(err)\n\n\n'''\n 测试\n'''\nif __name__ == '__main__':\n # 第三方平台应用相关信息\n Agent = fillAgent()\n\n Filter = models.Filter()\n Filter.Key = \"IsVerified\"\n Filter.Values = []\n\n Filters = [Filter]\n Limit = 10\n Offset = 0\n\n resp = channelDescribeEmployees(Agent, Filters, Limit, Offset)\n # 输出json格式的字符串回包\n print(resp.to_json_string())\n","repo_name":"tencentess/essbasic-python-kit","sub_path":"api/ChannelDescribeEmployees.py","file_name":"ChannelDescribeEmployees.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"72998994262","text":"\"\"\"\r\nCreated on Sun Mar 26 2023\r\n\r\n@author: Adan Alvarez\r\n\"\"\"\r\n\r\nimport random\r\n\r\n# Generar una lista de 10 números aleatorios entre 1 y 100\r\nnumeros = [random.randint(1, 100) for _ in range(10)]\r\nprint(\"\\n\\nLista original:\", numeros)\r\n\r\n# Algoritmo de ordenamiento por selección\r\ndef selection_sort(lista):\r\n n = len(lista)\r\n for i in range(n):\r\n # Encontrar el elemento mínimo en la lista no ordenada\r\n min_idx = i\r\n for j in range(i+1, n):\r\n if lista[j] < lista[min_idx]:\r\n min_idx = j\r\n # Intercambiar el elemento mínimo con el primer elemento de la lista no ordenada\r\n lista[i], lista[min_idx] = lista[min_idx], lista[i]\r\n\r\nselection_sort(numeros)\r\nprint(\"Lista ordenada:\", numeros)","repo_name":"Adan4lva/Metodos-Ordenamiento","sub_path":"002-SeleccionSort.py","file_name":"002-SeleccionSort.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"8842191612","text":"from flask import Flask\nimport os\nimport time\n\n#PORT = os.environ.get('PORT')\n#name = os.environ.get('NAME')\n#if name == None or len(name) == 0:\n# name = \"world\"\n\n#if PORT == None or len(PORT) == 0:\n# PORT = 5000\nPORT = 80\nMESSAGE = \"\\nFrontend Server - Port:\" + str(PORT)\nprint(\"Message: '\" + MESSAGE + \"'\")\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef root():\n print(\"Handling web request. Returning message.\")\n result = MESSAGE + \"\\nMy Flask Application\"\n result = result.encode(\"utf-8\")\n return result\n\n\nif __name__ == \"__main__\":\n print('Preparing to sleep for 5 minutes')\n #time.sleep(305)\n app.run(debug=True, host=\"0.0.0.0\", port=PORT)","repo_name":"GrantThompson1/ecs-ec2","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74889064022","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[79]:\n\n\nfrom selenium import webdriver\nimport pandas as pd\nimport time\nimport sqlalchemy\nimport s3fs\nimport os\nimport glob\n\n\n# In[80]:\n\n#aws parameters\naws_access_key = 'yours3accesskey'\naws_secret_key = 'yous3secretkey'\naws_bucket = 'my_bucket'\naws_path = 'my_path'\naws_file_name = 'file_name.csv'\n\n#server parameters\nusername = 'myuser'\n\n#selenium parameters\ndriver_exe = 'path_to_driver_exe.exe'\n\n# Check if the file you want to download already exists in the destination path. If exists, delete it\nfileList = glob.glob(fr'C:\\Users\\{username}\\Downloads\\*partial_file_name*')\n\nfor filePath in fileList:\n try:\n os.remove(filePath)\n print(f'File deleted: {filePath}')\n except:\n print(f'Error while trying to delete file: {filePath}')\n\n\n# In[81]:\n\n\n#automation to download file from web. In the example, we download an Excel file which contains historical data from currency exchange between dollars and argentinian pesos\nchrome_browser = webdriver.Chrome(f'{driver_exe}')\n\nchrome_browser.maximize_window()\nchrome_browser.get('http://www.rava.com/empresas/perfil.php?e=DOLAR%20CCL')\n\ntime.sleep(10)\n\ndownload_excel = chrome_browser.find_element_by_partial_link_text('Bajar en formato Excel')\ndownload_excel.click()\n\ntime.sleep(10)\nchrome_browser.close()\n\n\n# In[82]:\n\n\n#Read downloaded file to create a pandas data frame\nusd_ccl_csv = glob.glob('PATH\\*partial_file_name*')\nfile_name = usd_ccl_csv[0]\nusd_ccl_df = pd.read_csv(file_name, sep = ',')\n\n\n# In[83]:\n\n\n#establish connection to s3 bucket and write downloaded file to it\ns3 = s3fs.S3FileSystem(key=aws_access_key, secret=aws_secret_key,anon=False)\nfilename = f'{aws_bucket}/{aws_path}/{aws_file_name}'\nwith s3.open(filename, 'w', newline='') as f:\n usd_ccl_df.to_csv(f, index=False, header=False, sep= '|', quotechar='%')\n\n\n# In[84]:\n\n\n#Establish connection to redshift database\ncon = sqlalchemy.create_engine('postgresql://USER:PASSWORD@DNS:PORT/DATABASE')\n\n#Get file from s3 and write it's data to a redshift table\ncon.execute(\"\"\"\n DELETE TABLE_NAME;\n COPY TABLE_NAME\n from 's3://%s'\n credentials 'aws_access_key_id=%s;aws_secret_access_key=%s'\n delimiter '|' \n removequotes\n emptyasnull\n blanksasnull\n ;\"\"\" % (filename, aws_access_key, aws_secret_key))\n\n","repo_name":"jmacera95/python_etl_tasks","sub_path":"atuomation_etl_s3_redshift.py","file_name":"atuomation_etl_s3_redshift.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"21069990115","text":"import collections\n\nfrom typing import List, Tuple\n\ndef unique_paths_dynamic(src: Tuple[int, int], dst: Tuple[int, int]) -> int:\n\n def make_table(src: Tuple[int, int], dst: Tuple[int, int]) -> List[List[int]]:\n src_x, src_y = src\n dst_x, dst_y = dst\n return [[0 for col in range(src_x, dst_x + 1)] for row in range(src_y, dst_y + 1)]\n\n def in_bounds(table: List[List[int]], x: int, y: int) -> bool:\n return 0 <= y and y < len(table) and 0 <= x and x < len(table[y])\n def left(x: int, y: int) -> Tuple[int, int]:\n return x - 1, y\n def up(x: int, y: int) -> Tuple[int, int]:\n return x, y - 1\n\n table = make_table(src, dst)\n table[-1][-1] = 1\n fringe = collections.deque([(len(table[-1]) - 1, len(table) - 1)])\n\n while fringe:\n px, py = fringe.popleft()\n ux, uy = up(px, py)\n if in_bounds(table, ux, uy):\n if table[uy][ux] == 0:\n fringe.append((ux, uy))\n table[uy][ux] += table[py][px]\n lx, ly = left(px, py)\n if in_bounds(table, lx, ly):\n if table[ly][lx] == 0:\n fringe.append((lx, ly))\n table[ly][lx] += table[py][px]\n return table[0][0]\n\ndef unique_paths_recursive(src: Tuple[int, int], dst: Tuple[int, int]) -> int:\n def worker(x: int, y: int) -> int:\n if (x, y) == src:\n return 1\n elif x < 0 or y < 0:\n return 0\n else:\n return worker(x - 1, y) + worker(x, y - 1)\n return worker(*dst)\n\nprint(unique_paths_dynamic((1, 1), (5, 3)))\nprint(unique_paths_recursive((1, 1), (5, 3)))","repo_name":"jacobcohen76/Interview-Questions","sub_path":"dynamic/unique_paths.py","file_name":"unique_paths.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"2724953902","text":"'''\nUseful met formulas\n'''\nimport numpy as n\nimport constants as c\n\ndef goff_gratch(temperature):\n '''\n computes saturation water pressure from temperature (in Kelvin)\n ref Smithsonian Tables 1984, after Goff and Gratch 1946\n usage:\n goff_gratch(temperature)\n '''\n exponent = \\\n - 7.90298 * (373.16/temperature - 1) \\\n + 5.02808 * n.log10(373.16 / temperature) \\\n - 1.3816e-7 * (10**(11.344 * (1-temperature/373.16)) -1) \\\n + 8.1328e-3 * (10**(-3.49149 * (373.16/temperature-1)) -1) \\\n + n.log10(1013.246) \n return 10**exponent\n\ndef water_vapour_pressure_to_mix_ratio(pressure, water_vapour_pressure):\n '''\n computes the mixing ratio given the total pressure and the water vapour\n partial pressure\n usage:\n water_vapour_pressure_to_mix_ratio(pressure, water_vapour_pressure)\n '''\n coeff = c.water_molecular_weight / c.air_molecular_weight\n dry_air_pressure = pressure - water_vapour_pressure\n return coeff * water_vapour_pressure / dry_air_pressure\n","repo_name":"carlos9917/pywrf","sub_path":"misc/met_utils.py","file_name":"met_utils.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42990563221","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef ReadFile(path, fileName):\n\tprint(\"Data File Path : {}\".format(path))\n\tprint(\"File Name : {}\".format(fileName))\n\n\t# read\n\tf = open(path+fileName)\n\tlines = f.readlines()\n\n\txs = []\n\tys = []\n\tzs = []\n\n\tfor line in lines:\n\t\tline = line.strip().split()\n\t\txs.append(float(line[1])*-1.)\n\t\tys.append(float(line[2])*-1.)\n\t\tzs.append(float(line[3]))\n\n\treturn xs, ys, zs\n\n\nif __name__ == '__main__':\n\tprint(\"hello\")\n\n\tpath = './'\n\n\t#fileName = 'PointCloud_FrameID_0.txt'\n\t#xs, ys, zs = ReadFile(path, fileName)\n\n\t#\n\t# plot\n\t#\n\tfor i in range(100):\n\t\tfileName = 'PointCloud_FrameID_'+str(i)+'.txt'\n\n\t\txs, ys, zs = ReadFile(path, fileName)\n\n\t\t# plot\n\t\tfig = plt.figure(figsize=(8,8))\n\t\tplt.plot(xs, ys, label = 'frame 0', lw=2, ls='', marker='s', markersize=2)\n\n\t\tplt.xlim(-6,6)\n\t\tplt.ylim(-6,6)\n\t\t#plt.ylim((6,-6))\n\t\tplt.xlabel('X',fontdict={'family' : 'Times New Roman', 'size': 12})\n\t\tplt.ylabel('Y',fontdict={'family' : 'Times New Roman', 'size': 12})\n\t\tplt.title('Projection of Suzanne')\n\t\tplt.legend(frameon=True)\n\n\t\tplt.tight_layout()\n\t\tplt.savefig('figure-projectionSuzanne-frame'+str(i)+'.png',dpi=300)\n\t\tplt.close('all')\n\n\t\t#plt.show()\n\n","repo_name":"PascalXie/Oct-SLAM-ICP","sub_path":"step4-loopClosure/analysis/alice_projection.py","file_name":"alice_projection.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"18507364216","text":"# tamppa_code_tim.py\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# globals\nlines = []\nagain_func_names = []\n\ndef toDataframe(txtfile_path):\n \"\"\"\n Parses stats from the .txt file and converts it to a pandas dataframe.\n \"\"\"\n f = open(txtfile_path, \"r\")\n txt = f.read()\n\n for line in txt.split('\\n'):\n #print(line)\n if line.startswith('Wrote'): continue\n if line.startswith('Timer'): continue\n\n if line.startswith('Total'): continue\n if line.startswith('File'): continue\n if line.startswith('Function'): continue\n\n if line.startswith('Line'): continue\n if line.startswith('='): continue\n if line == '': continue\n data = [i.strip() for i in line.split()]\n\n #Fix def lines\n if len(data) <= 1: continue\n\n if data[1] == '@profile': continue\n\n if data[1] == 'def':\n data = [data[0],'','','','',' '.join(data[1:4])]\n #print(data)\n temp_str = ''.join(data[5])\n #print(temp_str)\n temp_str = temp_str.split(' ')[1]\n again_func_names.append(temp_str.split('(')[0])\n\n data = [data[0], data[1], data[2], data[3],data[4], ''.join(data[5:])]\n #print(f\"{len(data)}\\t{data}\")\n lines.append(data)\n\n df = pd.DataFrame(lines, columns=['Line #', 'Hits','Total Time', 'Time Per Hit', '% Time', 'Line Contents'])\n return df, again_func_names\n\ndef writeFuncNamesTXT(again_func_names):\n \"\"\"\n Generates a text file of all the functions (names)\n \"\"\"\n with open(\"again_func_names.txt\", \"w\") as f:\n for item in again_func_names:\n f.write(\"%s\\n\" % item)\n\ndef toCSV(df):\n \"\"\"\n Converts the extracted stats from the dataframe to .csv\n \"\"\"\n split_idx = df[df['Line Contents'].str.startswith('def')].index\n\n index_collector_list = list()\n for i, idx in enumerate(split_idx):\n index_collector_list.append(idx)\n\n dataframes = []\n for idx, val in enumerate(index_collector_list):\n try:\n dataframes.append(df.iloc[val:index_collector_list[idx+1]])\n except IndexError:\n dataframes.append(df.iloc[val:])\n\n fn = iter(again_func_names)\n\n for i in dataframes:\n #print(\"===\")\n i = i.iloc[1:]\n #print(i)\n i.to_csv(next(fn)+'_tim_.csv')\n\ndef decode():\n \"\"\"\n Demonstrates how to use the results. Optional Plotting\n \"\"\"\n fun = open(\"again_func_names.txt\",'r')\n df_names = [f.split('\\n')[0]+'_tim_.csv' for f in fun]\n dfs = [pd.read_csv(d) for d in df_names]\n\n # Data to plot\n labels = []\n sizes = []\n colors = []\n\n color_palette_2 = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue']\n\n for idx, d in enumerate(dfs):\n clr = color_palette_2[idx%len(color_palette_2)]\n for c in list(d.loc[:,'% Time']):\n sizes.append(c)\n for l in d['Line #']:\n colors.append(clr)\n labels.append(l)\n #sizes.append()\n\n # Plot\n plt.pie(x=sizes, labels=labels, colors=colors,\n startangle=90,pctdistance=1.125,\n wedgeprops={\"edgecolor\":\"k\",'linewidth': 1},\n textprops={'fontsize': 8})\n #plt.legend(labels, bbox_to_anchor=(0,0) loc=\"best\")\n\n plt.axis('equal')\n plt.rcParams['font.size'] = 10\n fig = plt.gcf()\n fig.set_size_inches(10,10)\n fig.show()\n\ndef tim_parse(txtfile_path):\n \"\"\"\n Driver Function\n \"\"\"\n df, again_func_names = toDataframe(txtfile_path)\n toCSV(df)\n writeFuncNamesTXT(again_func_names)\n decode()\n","repo_name":"pra-dan/TAMPPA","sub_path":"tamppa/tamppa_code_tim.py","file_name":"tamppa_code_tim.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"12"} +{"seq_id":"42012597586","text":"from collections import deque\nfrom tree_node import *\n\n\ndef nodes_the_same(p, q):\n # if both are None\n if not p and not q:\n return True\n # one of p and q is None\n if not q or not p:\n return False\n if p.val != q.val:\n return False\n return True\n\n\nclass TreeAlgorithms:\n\n def do_level_traversal(self):\n tn = TreeNode.build_sample_tree2()\n result = self.show_tree_by_level(tn)\n print(result)\n return result\n\n def do_same_test(self):\n tn, tn2 = TreeNode.build_sample_tree1()\n result = self.is_same_tree(tn, tn2)\n print(result)\n return result\n\n @staticmethod\n def is_same_tree( p, q):\n \"\"\"\n :type p: TreeNode\n :type q: TreeNode\n :rtype: bool\n \"\"\"\n\n deq = deque([(p, q)])\n while deq:\n p, q = deq.popleft()\n if not nodes_the_same(p, q):\n return False\n\n if p:\n print(p.val)\n deq.append((p.left, q.left))\n deq.append((p.right, q.right))\n\n return True\n\n @staticmethod\n def show_tree_by_level(root):\n\n result_array = []\n deq = deque([root])\n\n current_level = 0\n cnt = 0\n curr_list = []\n fact = 1\n while deq:\n\n curr = deq.popleft()\n if curr:\n curr_list.append(curr.val)\n\n cnt += 1\n\n next_level = float(cnt + 1) / float(fact)\n if next_level >= 2.0:\n fact = fact * 2.0\n result_array.append(curr_list)\n curr_list = []\n\n if curr:\n deq.append(curr.left)\n deq.append(curr.right)\n\n if curr_list:\n result_array.append(curr_list)\n\n return result_array\n\n\n\n\n","repo_name":"ScottLaing/simple-image-viewer","sub_path":"tree_algorithms.py","file_name":"tree_algorithms.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25627928721","text":"\"\"\"\nThe 'Power Iteration Method' is a recursive algorithm that finds the\ndominant eigenvalue and eigenvector pair of a matrix.\n\"\"\"\nimport numpy as np\n\n\ndef power_iteration(mat):\n n = mat.shape[0]\n pk = np.ones((n, 1))\n for _ in range(0, 10):\n pn = np.dot(mat, pk)\n pk = pn / np.linalg.norm(pn)\n return pn\n","repo_name":"sstewart0/data_science_projects","sub_path":"python/graph_models/powIter.py","file_name":"powIter.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"38023836164","text":"import cv2\nimport numpy as np\nimport math\n\nimage_dir = \"/Users/pro/Desktop/0_box.png\"\n\nins_mask = cv2.imread(image_dir, cv2.IMREAD_GRAYSCALE)\ncolorimg = cv2.imread(\"/Users/pro/Desktop/0box.jpg\")\n\n# gray_value = 16\n\n# pixels = []\n\n# mask_gray = np.zeros(ins_mask.shape)\n# mask_gray.fill(gray_value)\n# tf = (mask_gray == ins_mask)\n# print(tf)\n# pixels = ins_mask * tf\n# print(pixels)\ncoord = []\nfor i in range(512):\n temp = []\n for j in range(512):\n temp.append([i,j])\n temp = np.array(temp)\n coord.append(temp)\ncoord = np.array(coord)\n\n# for i in range(len(ins_mask)):\n# for j in range(len(ins_mask)):\n# if ins_mask[i][j] == gray_value:\n# pixels.append([i,j])\n# colorimg[i][j] = [0,0,255]\n\nrbboxes = []\ngray_value = 1\nisexists = True\n\nwhile isexists:\n pixels = []\n isexists = False\n # for i in range(len(ins_mask)):\n # temp = []\n # for j in range(len(ins_mask)):\n # temp.append([i,j])\n # if ins_mask[i][j] == gray_value:\n # pixels.append([i,j])\n # isexists = True\n thisgray = np.zeros((512,512))\n thisgray.fill(gray_value)\n pixels = coord[(thisgray == ins_mask)]\n if len(pixels) == 0:\n isexists = False\n else:\n isexists = True\n\n if isexists:\n rbbox = cv2.minAreaRect(np.array(pixels))\n x,y = rbbox[0]\n w,h = rbbox[1]\n angle = rbbox[2] * math.pi / 180\n angle = -angle + math.pi / 2\n # class_id = sem_mask[pixels[0][0]][pixels[0][1]]\n gray_value += 1\n rbboxes.append([x,y,w,h,angle])\n\n# colorins_mask[int(x)][int(y)] = [0,255,0]\n\n# rbbox = ((x,y),(w,h),angle)\n\n# box = cv2.boxPoints(rbbox)\n# box = np.int0(box)\n\n# ur = [x+h/2*math.cos(angle)+w/2*math.sin(angle), y-h/2*math.sin(angle)+w/2*math.cos(angle)]\n# dr = [x+h/2*math.cos(angle)-w/2*math.sin(angle), y-h/2*math.sin(angle)-w/2*math.cos(angle)]\n# ul = [x-h/2*math.cos(angle)+w/2*math.sin(angle), y+h/2*math.sin(angle)+w/2*math.cos(angle)]\n# dl = [x-h/2*math.cos(angle)-w/2*math.sin(angle), y+h/2*math.sin(angle)-w/2*math.cos(angle)]\nfor rbbox in rbboxes:\n\n x,y,w,h,angle = rbbox\n\n ur = [y-h/2*math.sin(angle)+w/2*math.cos(angle), x+h/2*math.cos(angle)+w/2*math.sin(angle)]\n dr = [y-h/2*math.sin(angle)-w/2*math.cos(angle), x+h/2*math.cos(angle)-w/2*math.sin(angle)]\n ul = [y+h/2*math.sin(angle)+w/2*math.cos(angle), x-h/2*math.cos(angle)+w/2*math.sin(angle)]\n dl = [y+h/2*math.sin(angle)-w/2*math.cos(angle), x-h/2*math.cos(angle)-w/2*math.sin(angle)]\n\n box = [dl,ul,ur,dr]\n box = np.int0(box)\n # print(box)\n cv2.drawContours(colorimg, [box], 0, (0,0,255), 2)\n\n# x, y, w, h = bbox\n# cv2.rectangle(colorins_mask, (y, x), (y+h, x+w), (0, 255, 0), 2)\n\ncv2.imwrite('contours.png', colorimg)\n","repo_name":"Debugger001/Synthesize-images-for-generating-datasets","sub_path":"SynImg/rbbox_final_test.py","file_name":"rbbox_final_test.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"6545173543","text":"import click\nimport random\nimport tempfile\nimport os\nimport time\nfrom typing import List, Union\nfrom pyspell.practice.models import Word\n\n_PLAYER_ENV_VAR = \"PYSPELL_PLAYER\"\n\ndef practice_on(words: List[Word]):\n player = \"afplay\" if _PLAYER_ENV_VAR not in os.environ else os.environ.get(_PLAYER_ENV_VAR)\n \n def wrapper(indexes: List[int]) -> Union[float, bool, List[int]]:\n click.clear()\n \n incorrect_indexes = []\n practice_force_complete = False\n cnt_correct_words = 0.0\n \n random_indexes = random.sample(indexes, len(indexes))\n with click.progressbar(random_indexes, color=\"bright_white\") as bar:\n for n, i in enumerate(bar):\n if practice_force_complete: # handle .q command\n # exhaust unprocessed words\n incorrect_indexes += random_indexes[n:]\n break\n \n word = words[i]\n name, desc, audio = word.word\n f = tempfile.NamedTemporaryFile(mode=\"w+b\", suffix=\".mp3\", delete=False) \n f.write(audio)\n fname = f.name\n f.close()\n \n while True:\n # click.clear()\n click.echo(click.style(f\"\\n\\nDESCRIPTION: \\n\\t{desc}\", fg=\"bright_white\"))\n if os.system(f\"{player} {fname}\") != 0:\n click.echo(click.style(f\"PLAYER ERROR!\", fg=\"red\", bold=True))\n exit(1)\n\n answer = str.lower(str.strip(click.prompt(f\"\\nENTER YOUR ANSER\")))\n # hanlde command\n if answer == \".r\":\n continue\n elif answer == \".q\":\n practice_force_complete = True\n break\n \n word.answer = answer\n \n if name == answer:\n cnt_correct_words+=1\n click.echo(click.style(f\"\\n {answer.upper()} = {name.upper()}\", fg=\"green\"))\n time.sleep(1)\n else:\n click.echo(click.style(f\"\\n {answer.upper()} ≠ {name.upper()}\", fg=\"red\")) \n incorrect_indexes.append(i)\n time.sleep(5)\n break\n \n os.remove(fname)\n click.clear()\n return cnt_correct_words, practice_force_complete, sorted(incorrect_indexes)\n return wrapper\n","repo_name":"fafadoboy/pyspell","sub_path":"pyspell/practice/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"34298075700","text":"import sys\r\nimport os\r\n\r\n###Descarga del repositorio ###\r\nos.system(\"git clone https://github.com/CDPS-ETSIT/practica_creativa2.git\")\r\nos.chdir('practica_creativa2/bookinfo/src/productpage')\r\n\r\n### Asignación de la variable global ###\r\nos.environ['GROUP_NUMBER'] = 'Equipo 16'\r\n\r\n### Instalación del pip3 ###\r\nos.system(\"pip3 install -r requirements.txt\") \r\n\r\n### Edicion del archivo HTML y asignación de la variable global GROUP_NUMBER ###\r\n ## Busqueda de la variable grupal y asignación del número de grupo ##\r\n\r\nfin = open(\"templates/productpage.html\", \"r\")\r\nfin2 = open(\"group.html\",\"w\")\r\nfor line in fin:\r\n if '''{% block title %}Simple Bookstore App{% endblock %}''' in line:\r\n fin2.write('''{% block title %}'''+ os.environ['GROUP_NUMBER'] + '''{% endblock %}''')\r\n else:\r\n fin2.write(line)\r\n\r\nfin2.close()\r\nfin.close()\r\n \r\n### Escritura en la web de product page ###\r\nfin3 = open(\"group.html\",\"r\")\r\nfin4 = open(\"templates/productpage.html\",\"w\")\r\n\r\nfor line in fin3:\r\n fin4.write(line)\r\nfin3.close()\r\nfin4.close()\r\n\r\nos.system(\"rm group.html\")\r\n\r\n### Instalación del entorno de las máquinas virtuales pesadas ###\r\n ## Para ejecutar en el navegador : IPexterna:9080/productpage ##\r\n\r\nos.system(\"python3 productpage_monolith.py 9080\")\r\n\r\n","repo_name":"javigarciacespedes/CDPS_P2","sub_path":"PC2/Parte 1/script_p1.py","file_name":"script_p1.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42801824922","text":"import os\nimport glob\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\nENVIRONMENTS_PATH = os.path.join(BASE_PATH, \"environment\")\n\nif not os.path.isfile(os.path.join(ENVIRONMENTS_PATH, \"__init__.py\")):\n \"\"\"\n We now know the _settings file is not yet linked, so create the symlink\n \"\"\"\n settings_files = glob.glob1(ENVIRONMENTS_PATH, \"[a-z]*.py\")\n\n print(\"Available environment settings files:\")\n for idx, file in enumerate(settings_files):\n print(\"{0}.\".format(idx + 1), file)\n\n while True:\n try:\n selected_settings = settings_files[\n int(\n input(\n \"Select one of the above settings files [1-{0}]: \".format(\n len(settings_files)\n )\n )\n )\n - 1\n ]\n print(\"You picked: \", selected_settings)\n os.symlink(\n os.path.join(\".\", selected_settings),\n os.path.join(ENVIRONMENTS_PATH, \"__init__.py\"),\n )\n except IndexError:\n print(\n \"Error, please select a settings file by entering a corresponding number!\"\n )\n continue\n else:\n break\n\n# Now that we know the _settings file exists we can import it\nfrom .environment import *\n","repo_name":"stefanvanderzanden/productivity-backend","sub_path":"_project/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"4661584513","text":"from lib.progLib.programCompile import progComp\n\n'''\nThis script faciliates compiling binaries from the source ('src') folder\nand moving those binaries to the binary folder ('bin'). This script uses\nthe scaffolding provided by 'compile.py'\n'''\n\nsrcBin = (\"run\",)\nsrcRun = \"compile.sh\"\nbinRun = \"run.sh\"\n\ncompInst = progComp(srcBin, dir_name='isov')\nsuccessfulCompilation = compInst.compileFunc(safety_bool=False)\n\nprint(\" \")\nif(successfulCompilation):\n print(\"No fatal errors detected, see above for runtime messesges\")\nelse:\n print(\"Fatal error detected! See above for runtime error(s)\")\nprint(\" \")","repo_name":"Dalcoin/ISOV","sub_path":"compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"31749489082","text":"if 'td' in globals():\n\tprint('common/util.py initializing')\n\nif False:\n\ttry:\n\t\timport td\n\texcept ImportError:\n\t\ttry:\n\t\t\tfrom _stubs import td\n\t\texcept ImportError:\n\t\t\ttry:\n\t\t\t\tfrom common.lib._stubs import td\n\t\t\texcept ImportError:\n\t\t\t\ttd = object()\nif False:\n\ttry:\n\t\tfrom _stubs import op\n\texcept ImportError:\n\t\top = object()\n\n\nimport json\nimport datetime\n# import logging\n#\n# logging.basicConfig(format='%(asctime)s %(message)s')\n# logger = logging.getLogger('tdapp')\n# logger.setLevel(logging.INFO)\n\ndef _interp(x, inrange, outrange):\n\treturn ((outrange[1]-outrange[0])*(x-inrange[0])/(inrange[1]-inrange[0])) + outrange[0]\n\ntry:\n from numpy import interp\nexcept ImportError:\n interp = _interp\n\n# class Logger:\n# \tdef __init__(self, comp):\n# \t\tself.comp = comp\n# \t\tself.buffer = comp.op('./buffer')\n#\n# \t@property\n# \tdef _FilePath(self):\n# \t\tpath = self.comp.par.Folder.eval() or self.comp.par.Folder.default\n# \t\tif not path.endswith('/'):\n# \t\t\tpath += '/'\n# \t\tpath += self.comp.par.Fileprefix.eval() or self.comp.par.Fileprefix.default or ''\n# \t\treturn self._TimestampClean + '.log'\n#\n# \t@property\n# \tdef _Timestamp(self):\n# \t\treturn datetime.datetime.now().strftime('%Y.%m.%d %H:%M:%S')\n#\n# \t@property\n# \tdef _TimestampClean(self):\n# \t\treturn datetime.datetime.now().strftime('%Y%m%d-%H%M%S')\n#\n# \tdef ClearBuffer(self):\n# \t\tself.buffer.text = ''\n#\n# \tdef FlushToFile(self):\n# \t\tpath = self._FilePath\n# \t\tif not self.buffer.text:\n# \t\t\t# nothing to flush\n# \t\t\treturn\n# \t\tself.buffer.save(path)\n# \t\tself.ClearBuffer()\n#\n# \tdef Log(self, message, verbose=False, skipfile=False, skipconsole=False):\n# \t\tif self.comp.par.Silent:\n# \t\t\treturn\n# \t\tif verbose and not self.comp.par.Verbose:\n# \t\t\treturn\n# \t\ttext = '[%s] %s' % (self._Timestamp, message)\n# \t\tif not skipconsole:\n# \t\t\tprint(text)\n# \t\tif skipfile or (verbose and not self.comp.par.Verbosetofile):\n# \t\t\treturn\n# \t\tprint(text, file=self.buffer)\n\ndef Log(msg, file=None):\n\t#logger.info('%s', msg)\n\tprint('[%s]' % datetime.datetime.now().strftime('%m.%d %H:%M:%S'), msg, file=file)\n\nclass IndentedLogger:\n\tdef __init__(self, outfile=None):\n\t\tself._indentLevel = 0\n\t\tself._indentStr = ''\n\t\tself._outFile = outfile\n\n\tdef _AddIndent(self, amount):\n\t\tself._indentLevel += amount\n\t\tself._indentStr = '\\t' * self._indentLevel\n\n\tdef Indent(self):\n\t\tself._AddIndent(1)\n\n\tdef Unindent(self):\n\t\tself._AddIndent(-1)\n\n\tdef LogEvent(self, path, opid, event):\n\t\tif not path and not opid:\n\t\t\tLog('%s %s' % (self._indentStr, event), file=self._outFile)\n\t\telse:\n\t\t\tLog('%s [%s] %s (%s)' % (self._indentStr, opid or '', event, path or ''), file=self._outFile)\n\n\tdef LogBegin(self, path, opid, event):\n\t\tself.LogEvent(path, opid, event)\n\t\tself.Indent()\n\n\tdef LogEnd(self, path, opid, event):\n\t\tself.Unindent()\n\t\tif event:\n\t\t\tself.LogEvent(path, opid, event)\n\ndef dumpobj(obj, underscores=False, methods=False):\n\tprint('Dump %r type: %r' % (obj, type(obj)))\n\tif isinstance(obj, (list, tuple)):\n\t\tfor i in range(len(obj)):\n\t\t\tprint(' [' + str(i) + ']: ' + repr(obj[i]))\n\telse:\n\t\tfor key in dir(obj):\n\t\t\tif key.startswith('_') and not underscores:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tval = getattr(obj, key)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(' ' + key + ': [ERROR]', e)\n\t\t\t\tcontinue\n\t\t\tif callable(val) and not methods:\n\t\t\t\tcontinue\n\t\t\tprint(' ' + key + ': ' + repr(val))\n\n\ndef setattrs(obj, **attrs):\n\tif isinstance(obj, (tuple, list)):\n\t\tfor o in obj:\n\t\t\tsetattrs(o, **attrs)\n\telse:\n\t\tfor key in attrs:\n\t\t\tsetattr(obj, key, attrs[key])\n\ndef _ProcessClones(master, action, predicate=None):\n\tif not master or not hasattr(master, 'clones'):\n\t\treturn\n\tfor c in master.clones:\n\t\tif predicate is not None and not predicate(c):\n\t\t\tcontinue\n\t\taction(c)\n\ndef DumpClones(master, predicate=None):\n\t_ProcessClones(master, lambda c: print(' ' + c.path), predicate=predicate)\n\nclass TableMenuSource:\n\tdef __init__(self, dat, nameCol='name', labelCol='label'):\n\t\tself.dat = dat\n\t\tself.nameCol = nameCol\n\t\tself.labelCol = labelCol\n\n\tdef _GetCol(self, name):\n\t\tif not self.dat:\n\t\t\treturn []\n\t\tcells = self.dat.col(name)\n\t\treturn [x.val for x in cells[1:]] if cells else []\n\n\t@property\n\tdef menuNames(self):\n\t\treturn self._GetCol(self.nameCol)\n\n\t@property\n\tdef menuLabels(self):\n\t\treturn self._GetCol(self.labelCol)\n\ndef GetVisibleCOMPsHeight(comps):\n\treturn sum([o.par.h for o in comps if getattr(o, 'isPanel', False) and o.par.display])\n\ndef GetVisibleChildCOMPsHeight(parentOp):\n\treturn GetVisibleCOMPsHeight([c.owner for c in parentOp.outputCOMPConnectors[0].connections])\n\ndef GetOrAddCell(dat, row, col):\n\tif dat[row, col] is None:\n\t\tif not dat.row(row):\n\t\t\tdat.appendRow([row])\n\t\tif not dat.col(col):\n\t\t\tdat.appendCol([col])\n\treturn dat[row, col]\n\ndef ParseStringList(val):\n\tif not val:\n\t\treturn []\n\tif val.startswith('['):\n\t\treturn json.loads(val)\n\telse:\n\t\tfor sep in [',', ' ']:\n\t\t\tif sep in val:\n\t\t\t\treturn [v.strip() for v in val.split(sep) if v.strip()]\n\t\treturn [val]\n\ndef ToJson(val):\n\treturn json.dumps(val)\n\ndef FromJson(val, defaultVal=None):\n\tif val is None or val == '':\n\t\treturn defaultVal\n\ttry:\n\t\treturn json.loads(val)\n\texcept ValueError or TypeError:\n\t\treturn defaultVal\n\ndef IsTupleOrList(val):\n\treturn isinstance(val, (tuple, list))\n\ndef CopyParOpts(frompars, topars, includeMenuSource=False, includeLabel=True):\n\t# Log('CopyParOpts(frompars=%r, topars=%r, ...)' % (frompars, topars))\n\tif not IsTupleOrList(frompars):\n\t\tfrompars = [frompars] * (len(topars) if IsTupleOrList(topars) else 1)\n\tif not IsTupleOrList(topars):\n\t\ttopars = [topars] * len(frompars)\n\t# Log('CopyParOpts() .. after arg preproc: frompars=%r, topars=%r' % (frompars, topars))\n\tfor frompar, topar in zip(frompars, topars):\n\t\tattrs = []\n\t\tif includeLabel:\n\t\t\tattrs += ['label']\n\t\tif topar.isNumber:\n\t\t\tattrs += ['min', 'max', 'normMin', 'normMax', 'clampMin', 'clampMax']\n\t\tif topar.isMenu:\n\t\t\tattrs += ['menuNames', 'menuLabels']\n\t\t\tif includeMenuSource:\n\t\t\t\tattrs += ['menuSource']\n\t\tattrs += ['default']\n\t\tfor attrname in attrs:\n\t\t\ttry:\n\t\t\t\tsetattr(topar, attrname, getattr(frompar, attrname))\n\t\t\texcept td.error as e:\n\t\t\t\tLog('CopyParOpts() .. unable to set %s on %r from %r - %r' % (attrname, topar, frompar, e))\n\treturn topars\n\ndef CopyPar(page, sourceOp, label, style, labelPrefix='', menuSourcePath='', namePrefix='', sourceName=None, name=None, size=None, fromPattern=None, defaultVal=None):\n\t# Log('CopyPar(page=%r, sourceOp=%r, label=%r, style=%r, labelPrefix=%r, menuSourcePath=%r, namePrefix=%r, sourceName=%r, name=%r, size=%r, fromPattern=%r, defaultVal=%r)' % (\n\t# \tpage, sourceOp, label, style, labelPrefix, menuSourcePath, namePrefix, sourceName, name, size, fromPattern, defaultVal))\n\tif not sourceName:\n\t\tsourceName = label.replace(' ', '').replace('-', '').lower()\n\tif not name:\n\t\tname = sourceName\n\tappendkwargs = {'label': labelPrefix + label}\n\tif size is not None:\n\t\tappendkwargs['size'] = size\n\tattrs = {}\n\tif style == 'Menu' and menuSourcePath:\n\t\tattrs['menuSource'] = \"op(%r).par.%s\" % (menuSourcePath, sourceName if sourceName else name)\n\tif defaultVal is not None:\n\t\tattrs['default'] = defaultVal\n\tif not fromPattern:\n\t\tfromPattern = [sourceName]\n\telif not IsTupleOrList(fromPattern):\n\t\tfromPattern = [fromPattern]\n\tif namePrefix:\n\t\tname = namePrefix + name.lower()\n\telse:\n\t\tname = name.capitalize()\n\tsetattrs(\n\t\tCopyParOpts(\n\t\t\tsourceOp.pars(*fromPattern),\n\t\t\tgetattr(page, 'append' + style)(name, **appendkwargs),\n\t\t\tincludeLabel=False),\n\t\t**attrs)\n\ndef MergeDicts(*dicts):\n\tout = dict()\n\tfor d in dicts:\n\t\tout.update(d)\n\treturn out\n\ndef ParseFloat(text, defaultVal=None):\n\tif text is None or text == '':\n\t\treturn defaultVal\n\ttry:\n\t\treturn float(text)\n\texcept ValueError:\n\t\treturn defaultVal\n\ndef _AddParsToTable(dat, *pars, quotestrings=False):\n\tfor par in pars:\n\t\tif quotestrings and (par.isString or par.isMenu):\n\t\t\tval = repr(par.eval())\n\t\telse:\n\t\t\tval = par.eval()\n\t\tdat.appendRow([par.name, val])\n\ndef CopyParPagesToTable(dat, *pages, quotestrings=False):\n\tdat.clear()\n\tfor page in pages:\n\t\t_AddParsToTable(dat, *page.pars, quotestrings=quotestrings)\n\n_EXPORTS = {\n\t'dumpobj': dumpobj,\n\t'setattrs': setattrs,\n\t'DumpClones': DumpClones,\n}\n\ndef EXPORT(console_locals):\n\t\"\"\"Export utility functions to the console.\n\tUsage: op.core.mod.core_utils.EXPORT(locals())\n\t:param console_locals: dict of local variables to export into\n\t\"\"\"\n\tconsole_locals.update(_EXPORTS)\n","repo_name":"t3kt/tektcommon","sub_path":"lib/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"2849901882","text":"from app import app, socketio, db\nfrom flask import render_template, flash, redirect, url_for, jsonify\nfrom flask_socketio import send\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom app.models import User, Message, get_previous_messages, Chat, chats_users\nfrom app.forms import RegistrationForm, LoginForm, AddChatForm\nfrom app.stats import average_words_in_message, user_activity, time_stat\nfrom datetime import datetime\nfrom app.errors import *\nfrom flask_restplus import Resource, Api\n\n\n\napi = Api(app)\nname_space = api.namespace('api', description='Main APIs')\n\n\n\n@app.route('/')\ndef index():\n return render_template('indexx.html')\n\n\n@app.route('/stats')\ndef stats():\n AWiM = average_words_in_message()\n UA = user_activity()\n TS = time_stat()\n return render_template('stats.html',av_words=AWiM,user_activity=UA,time_stats=TS)\n\n@name_space.route(\"/chat\")\n@app.route('/chat')\ndef chat():\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n\n for_sending = []\n for i in get_previous_messages():\n for_sending.append([(User.query.filter_by(id = i.user_id).first().username),i.body])\n #print(Message.query.order_by(Message.id.desc()).all())for_sending.append([(User.query.filter_by(id = i.user_id).first().username),i.body])\n\n #handleMessage(for_sending, broadcast=True)\n for_sending = for_sending[::-1]\n return render_template('chat.html',previous_messages=for_sending)\n\n\n\n\n@socketio.on('message')\ndef handleMessage(msg):\n if current_user.is_authenticated:\n cur_time = str(datetime.now().strftime('%H:%M:%S'))\n msg_user = [current_user.username,msg[0],cur_time]\n\n chat_id = (msg[1].split('/'))[-1]\n #print(chat_id)\n if msg[0] == 'voice message':\n print('\\n\\n\\nVOICE\\n\\n\\n')\n msg[0] = voice()\n if msg[0] != '':\n msg_user = [current_user.username,msg[0],cur_time]\n message = Message(body=msg[0], user_id=current_user.id, chat_id=chat_id)\n if message.body != ' has connected!':\n db.session.add(message)\n print(message.body)\n db.session.commit()\n #print(msg_user)\n send(msg_user, broadcast=True)\n else:\n print('Message: ' + msg[0])\n send(msg[0], broadcast=True)\n\n@app.route('/chat_id/')\ndef chat_id(id):\n this_chat = Chat.query.filter_by(id=id).first()\n if current_user in this_chat.users:\n for_sending = []\n for i in get_previous_messages(chat_id=id):\n for_sending.append([(User.query.filter_by(id = i.user_id).first().username),i.body])\n for_sending = for_sending[::-1]\n return render_template('chat.html',previous_messages=for_sending)\n else:\n return '

Go away!

'\n\n\n@app.route('/create_new_chat', methods=['GET','POST'])\ndef create_new_chat():\n form = AddChatForm()\n if form.validate_on_submit():\n users = form.users.data\n print('\\n\\n\\nUsers are:',users,'\\n\\n\\n')\n for user in users.split(' '):\n print(user)\n if (User.query.filter_by(username=user).first()) is None:\n flash('Invalid users selected')\n return redirect(url_for('chat'))\n users = users.split(' ')\n u = []\n for user in users:\n u.append(User.query.filter_by(username=user).first())\n chat = Chat(name=form.name.data, users=u)\n print(chat)\n db.session.add(chat)\n db.session.commit()\n flash('Congratulations, you created a new chat!')\n return redirect(url_for('chat_id',id=chat.id))\n return render_template('create_new_chat.html',title='Create new chat', form=form)\n\n\"\"\"@socketio.on('connect')\ndef handleConnect():\n user = current_user\n print('User that connected: ', user)\n send(user, broadcast=True)\n\"\"\"\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('chat'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user is None or not user.check_password(form.password.data):\n flash('Invalid username or password')\n return redirect(url_for('chat'))\n login_user(user, remember=form.remember_me.data)\n return redirect(url_for('chat'))\n return render_template('login.html', title='Sign In', form=form)\n\n\n@app.route('/voice', methods=['POST'])\ndef voice_rec():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print(\"You said : {}\".format(text))\n except:\n print(\"Sorry could not recognize what you said\")\n return text\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('index'))\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data, email=form.email.data)\n user.set_password(form.password.data)\n db.session.add(user)\n db.session.commit()\n flash('Congratulations, you are now a registered user!')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n","repo_name":"doctorblinch/ipz_chat","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"27500262780","text":"# execute the block while condition remains true\n\n# #syntax:\n# while boolean_condition:\n# execute1\n# else:\n# execute2\n\nx = 0\nwhile x < 5:\n print(f'the current value of x is {x}')\n x += 1\nelse:\n print('x is not less than 5')\n","repo_name":"Ukabix/python-basic","sub_path":"core ideas/statements/loops/loops while.py","file_name":"loops while.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"8017039494","text":"from sys import stdin\nk, n = map(int, stdin.readline().split())\nline = [int(stdin.readline()) for _ in range(k)]\n\nleft, right = 1, max(line)\n\n\n\nwhile left <= right:\n mid = (left + right) // 2\n count = 0\n for i in line:\n count += (i // mid)\n \n if n <= count:\n left = mid + 1\n else:\n right = mid - 1\nprint(right)","repo_name":"joyjhm/study-Algorithm","sub_path":"baekjoon/python/silver/silver3/1654.py","file_name":"1654.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"43681489720","text":"from django.conf import settings\nfrom django.urls import path\n\nfrom api.v1.views import ObtainTokenView, UsersView, RefreshView, SignUpView, StripeWebhook\n\nurlpatterns = [\n path('webhooks/stripe/', StripeWebhook.as_view(), name='stripe_webhook'),\n]\n\n\nif settings.ENABLE_API:\n urlpatterns += [\n path('auth/tokens/obtain/', ObtainTokenView.as_view(), name='auth-sign-in'),\n path('auth/sign-up/', SignUpView.as_view(), name='auth-sign-up'),\n path('auth/tokens/refresh/', RefreshView.as_view(), name='auth-tokens-refresh'),\n path('users/', UsersView.as_view(), name='users'),\n ]\n","repo_name":"AivGitHub/brosfiles","sub_path":"api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"22418287881","text":"from docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.shared import Pt\nfrom mongoengine import EmbeddedDocumentListField, StringField\nfrom ..models import Request, Subject\nfrom .case_utils import add_analysis_paragraph, table_english, get_academic_program\n\n\nclass HOID(Request):\n\n full_name = 'Homologación de Idioma'\n\n CT_EXAMEN = 'EX'\n CT_CURSO_NAL = 'CN'\n CT_CURSO_INAL = 'CI'\n CT_CHOICES = (\n (CT_EXAMEN, 'Examen'),\n (CT_CURSO_NAL, 'Curso Nacional'),\n (CT_CURSO_INAL, 'Curso Internacional')\n )\n\n min_grade = StringField(required=True, default='B1',\n display='Nivel Requerido')\n certification_type = StringField(\n required=True, choices=CT_CHOICES, display='Tipo de certificación', default=CT_EXAMEN)\n institution = StringField(\n required=True, display='Institución/Examen', default='')\n grade_got = StringField(required=True, default='B1',\n display='Nivel Obtenido')\n\n subs = []\n subs.append(Subject(name=\"Inglés I- Semestral\", code='1000044', credits=3, \n tipology=Subject.TIP_PRE_NIVELACION))\n subs.append(Subject(name=\"Inglés II- Semestral\", code='1000045', credits=3, \n tipology=Subject.TIP_PRE_NIVELACION))\n subs.append(Subject(name=\"Inglés III- Semestral\", code='1000046', credits=3, \n tipology=Subject.TIP_PRE_NIVELACION))\n subs.append(Subject(name=\"Inglés IV- Semestral\", code='1000047', credits=3, \n tipology=Subject.TIP_PRE_NIVELACION))\n\n subjects = EmbeddedDocumentListField(\n Subject,\n display='Asignaturas Homologadas',\n default=list(subs)\n )\n\n regulation_list = ['102|2013|CSU', '001|2016|VAC']\n\n str_cm = []\n\n str_pcm = ['Alcanzó el nivel {} en el {}.']\n\n str_ans = [\n 'homologar en el periodo académico {}, el requisito de idioma inglés, ',\n 'por obtener una calificación de {} en el exámen {}, siendo {} el mínimo exigido.',\n 'teniendo en cuenta que presenta un certificado de estudios expedido por una ' +\n 'institución de educación superior {}, indicando que ha cursado un total ' +\n 'acumulado de horas equivalente al requerido para alcanzar el nivel {} (375 horas).'\n ]\n\n def cm(self, docx):\n paragraph = docx.add_paragraph()\n paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n paragraph.paragraph_format.space_after = Pt(0)\n paragraph.add_run(self.str_council_header + ' ')\n self.cm_answer(paragraph)\n if self.is_affirmative_response_approval_status():\n self.add_subjects(docx)\n\n def cm_answer(self, paragraph):\n # pylint: disable=no-member\n paragraph.add_run(\n self.get_approval_status_display().upper() + ' ').font.bold = True\n self.add_answer(paragraph)\n\n def pcm(self, docx):\n final_analysis = []\n analysis = self.str_pcm[0].format(self.grade_got, self.institution)\n final_analysis.append(analysis)\n if len(self.extra_analysis) > 0:\n for i in self.extra_analysis:\n final_analysis.append(i)\n add_analysis_paragraph(docx, final_analysis)\n paragraph = docx.add_paragraph()\n paragraph.alignment = WD_ALIGN_PARAGRAPH.JUSTIFY\n paragraph.paragraph_format.space_after = Pt(0)\n paragraph.add_run(self.str_answer + ':\\n').font.bold = True\n paragraph.add_run(self.str_comittee_header + ' ')\n self.pcm_answer(paragraph)\n if self.is_affirmative_response_advisor_response() or self.is_waiting_response_advisor_response():\n self.add_subjects(docx)\n\n def pcm_answer(self, paragraph):\n paragraph.add_run(\n # pylint: disable=no-member\n self.get_advisor_response_display().upper() + ' ').font.bold = True\n self.add_answer(paragraph)\n\n def add_answer(self, paragraph):\n paragraph.add_run(self.str_ans[0].format(self.academic_period))\n if self.certification_type == self.CT_EXAMEN:\n paragraph.add_run(self.str_ans[1].format(\n self.grade_got, self.institution, self.min_grade\n ))\n else:\n mod = 'nacional' if self.certification_type == self.CT_CURSO_NAL else 'INTERNACIONAL'\n paragraph.add_run(self.str_ans[2].format(mod, self.grade_got))\n\n def add_subjects(self, docx):\n data = Subject.subjects_to_array(self.subjects)\n details = []\n details.append(self.certification_type)\n details.append(self.grade_got)\n print(self.student_name)\n details.append(self.student_name)\n details.append(self.student_dni)\n details.append(get_academic_program(self.academic_program))\n details.append(self.academic_program)\n table_english(docx, data, details)\n\n def resource_analysis(self, docx):\n last_paragraph = docx.paragraphs[-1]\n self.pcm_answer(last_paragraph)\n \n def resource_pre_answer(self, docx):\n last_paragraph = docx.paragraphs[-1]\n self.pcm_answer(last_paragraph)\n\n def resource_answer(self, docx):\n last_paragraph = docx.paragraphs[-1]\n self.cm_answer(last_paragraph)\n\n","repo_name":"uapa-team/actas_backend","sub_path":"council_minutes/cases/HOID.py","file_name":"HOID.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"13493823002","text":"from odoo import api, fields, models\nfrom odoo.fields import Datetime\n\n\nclass TaskReminder(models.Model):\n _inherit = 'project.task'\n\n task_reminder = fields.Boolean(string=\"Task Reminder\", default=False)\n\n def send_cron_mail(self):\n for task in self.env['project.task'].search(\n [('date_deadline', '!=', None), ('task_reminder', '=', True), ('user_ids', '!=', None)]):\n reminder_date = task.date_deadline\n today = Datetime.now().date()\n if today == reminder_date and task:\n template_id = self.env.ref('task_deadline_reminder.mail_template_task_reminder').id\n if template_id:\n email_template_obj = self.env['mail.template'].browse(template_id)\n values = email_template_obj.generate_email(task.id,\n ['subject', 'body_html', 'email_from', 'email_to',\n 'partner_to', 'email_cc', 'reply_to', 'scheduled_date'])\n msg_id = self.env['mail.mail'].create(values)\n if msg_id:\n msg_id.send()\n return True\n\n def get_user_email(self):\n self.ensure_one()\n return \",\".join([e for e in self.user_ids.mapped(\"email\") if e])\n\n def get_user_name(self):\n self.ensure_one()\n return \",\".join([e for e in self.user_ids.mapped(\"name\") if e])\n","repo_name":"phamkhacson/Odoo_Intern_TaskReminder","sub_path":"customaddons/task_deadline_reminder/models/task_reminder_inherit.py","file_name":"task_reminder_inherit.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"25686024648","text":"n, m = map(int, input().split())\npath = [0] * m # m 자릿수\nused = [0] * (n + 1) # index n까지의 used 생성\n\ndef abc(level):\n if level == m: # m자릿수 도달하면 stop\n for i in range(m):\n print(path[i], end=' ')\n print()\n return\n for i in range(1, n + 1): # used의 범위를 1~n으로 만듦\n if used[i] == 1: # 사용한 번호는 건너띄기\n continue\n\n path[level] = i\n used[i] = 1\n abc(level + 1)\n used[i] = 0\n\nabc(0)\n","repo_name":"KINHYEONJI/mad-algorithm","sub_path":"AUGUST/seonghoho/BOJ15649.py","file_name":"BOJ15649.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"13139950815","text":"### Programa 3: Información medica de paciente:\n\n#El JSON generado deberá ser:\n\n#{\"nombre\": \"Pedro\", \"apellido\": \"Pérez\", \"edad\": 45, \"peso\": 80.5, \"altura\": 1.75, \"historial_medico\": {\"alergias\": [\"penicilina\", \"mariscos\"], \"problemas_cardiacos\": false, \"medicamentos\": [{\"nombre\": \"Ibuprofeno\", \"dosis\": \"200mg\"}, {\"nombre\": \"Paracetamol\", \"dosis\": \"500mg\"}]}, \"ultima_revision\": \"2022-10-01\", \"proximo_turno\": \"2023-05-15\"}\n\nimport json\ndef principal():\n nombre = input(\"Ingrese el nombre del paciente: \")\n apellido = input(\"Ingrese el apellido del paciente: \")\n edad = int(input(\"Ingrese la edad del paciente: \"))\n peso = float(input(\"Ingrese el peso del paciente en kg: \"))\n altura = float(input(\"Ingrese la altura del paciente en metros: \"))\n alergias = input(\"Ingrese las alergias del paciente separadas por coma (si no tiene alergias, escriba 'ninguna'): \")\n problemas_cardiacos = input(\"¿Tiene el paciente problemas cardiacos? (sí o no): \").lower() == \"sí\"\n medicamentos = []\n\n while True:\n nombre_medicamento = input(\"Ingrese el nombre del medicamento (o escriba 'ninguno' para terminar): \")\n if nombre_medicamento == \"ninguno\":\n break\n dosis_medicamento = input(\"Ingrese la dosis del medicamento: \")\n medicamento = {\n \"nombre\": nombre_medicamento,\n \"dosis\": dosis_medicamento\n }\n medicamentos.append(medicamento)\n\n ultima_revision = input(\"Ingrese la fecha de la última revisión médica en formato ISO 8601 (ejemplo: 2022-10-01): \")\n proximo_turno = input(\"Ingrese la fecha del próximo turno médico en formato ISO 8601 (ejemplo: 2023-05-15): \")\n\n historial_medico = {\n \"alergias\": alergias.split(\",\"),\n \"problemas_cardiacos\": problemas_cardiacos,\n \"medicamentos\": medicamentos\n }\n\n paciente = {\n \"nombre\": nombre,\n \"apellido\": apellido,\n \"edad\": edad,\n \"peso\": peso,\n \"altura\": altura,\n \"historial_medico\": historial_medico,\n \"ultima_revision\": ultima_revision,\n \"proximo_turno\": proximo_turno\n }\n\n json_paciente = json.dumps(paciente)\n print(json_paciente)\nif __name__ == \"__main__\":\n principal()","repo_name":"miguelbemon/MiddlePython","sub_path":"EjerciciosJSON/EjercicioJSON3.py","file_name":"EjercicioJSON3.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"33649110510","text":"#filepath should include filename!\r\n\r\nimport os\r\nimport numpy as np\r\n\r\nclass OCamCalib_model:\r\n def __init__(self):\r\n self.polyCoefs = []\r\n self.invPolyCoefs = []\r\n self.centerCords = []\r\n self.affineCoefs = []\r\n self.imSize = []\r\n\r\n def readOCamFile(self, filepath):\r\n if os.path.isfile(filepath):\r\n file = open(filepath, \"r\")\r\n\r\n for idx, line in enumerate(file):\r\n # Read polynomial coefficients\r\n if idx == 2:\r\n polyCoefs_temp = line.split()\r\n # For some reason they are saved as the wrong sign (+ve should be -ve and vice versa)\r\n for idx, value in enumerate(polyCoefs_temp):\r\n if not idx == 0:\r\n self.polyCoefs.append(float(value))\r\n\r\n # Read inverse polynomial coefficients\r\n if idx == 6:\r\n invPolyCoefs_temp = line.split()\r\n # Might also be inverted? Not sure if i'll need these\r\n for idx, value in enumerate(invPolyCoefs_temp):\r\n if not idx == 0:\r\n self.invPolyCoefs.append(float(value))\r\n\r\n # Read center coordinates\r\n if idx == 10:\r\n self.centerCords = line.split()\r\n self.centerCords = [float(self.centerCords[0]), float(self.centerCords[1])]\r\n\r\n # Read affine coefficients\r\n if idx == 14:\r\n self.affineCoefs = line.split()\r\n self.affineCoefs = [float(self.affineCoefs[0]), float(self.affineCoefs[1]), float(self.affineCoefs[2])]\r\n\r\n # Read image size\r\n if idx == 18:\r\n self.imSize = line.split()\r\n self.imSize = [int(self.imSize[0]), int(self.imSize[1])]\r\n\r\n file.close()\r\n\r\n else:\r\n print(\"The OCamCalib text file at \" + str(filepath) + \" could not be opened!\")\r\n raise Exception()","repo_name":"mikegroom765/Omnidirectional-Stereo-Masters-Project","sub_path":"OmnidirProject/OCamCalib_model.py","file_name":"OCamCalib_model.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"12"} +{"seq_id":"23631063797","text":"from sys import stdin\n\nx = 0\nfor line in stdin:\n x = line[:-1]\n\ndef asdf(a):\n b = \"\"\n pchar = a[0]\n n = 0\n for char in a:\n if char == pchar:\n n += 1\n else:\n b += str(n)\n b += str(pchar)\n n = 1\n pchar = char\n b += str(n)\n b += str(pchar)\n return b\n\nfor i in range(50):\n x = asdf(x)\n\nprint(len(x))\n","repo_name":"lce4113/Comp-Prog","sub_path":"Advent of Code/2015/Day 10/main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"738781725","text":"#爬取动漫部分多页壁纸的下载\n\n#分析翻页的网址\n#https://pic.netbian.com/4kdongman/index.html\n#https://pic.netbian.com/4kdongman/index_2.html\n#https://pic.netbian.com/4kdongman/index_3.html\n#https://pic.netbian.com/4kdongman/index_4.html\n#可得翻页网址遵循规律(除第一页外):https://pic.netbian.com/4kdongman/index_{i}.html\nimport os\nimport time\nimport requests\nfrom lxml import etree\n#彼岸图网首页网址\nurl_first=\"https://pic.netbian.com\"\nheaders_={\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36\"\n}\n#1.获取到实时标题栏,并且保存入字典dict_title,以便输入的标题查找到对应的标题\nresponse=requests.get(url_first,headers=headers_)\nstr_data=response.content.decode(encoding=\"gbk\")\nhtml_obj=etree.HTML(str_data)\ntitle_4k_all=html_obj.xpath(\"//div[@class='classify clearfix']/a[@href]/@title\")\ntitle_4k_all_url_b=html_obj.xpath(\"//div[@class='classify clearfix']/a/@href\")\ndict_title={}\nprint(\"以下是可以批量爬取壁纸的标题栏\")\nfor i in range(3):\n print(\" | \")\nfor i in range(len(title_4k_all)):\n dict_title[title_4k_all[i]]=title_4k_all_url_b[i]\n print(title_4k_all[i])\nfor i in range(3):\n print(\" | \")\n#批量创建标题栏文件夹\ntry:\n path=r\"D:\"\n path=path+\"\\\\\"+\"彼岸图网\"\n for i in dict_title:\n os.makedirs(path+\"\\\\\"+i)\nexcept:\n pass\n\nwhile True:\n #某个标题栏的输入\n title_4k=input(\"请在上述标题中选一个复制粘贴作为输入-------(建议选择:4K美女图片):\")\n #想要爬取的页数\n page=int(input(f\"请输入想要爬取{title_4k}部分的壁纸的页数:\"))\n if title_4k in dict_title:\n url1=url_first+dict_title[title_4k]\n # url_jpg_b=html_obj.xpath(\"//div/ul/li/a/img/@src\")\n #爬取想要的页数\n for i in range(1,page+1):\n if i==1:\n url=url1+\"index.html\"\n else:\n url=url1+f\"index_{i}.html\"\n response = requests.get(url, headers=headers_)\n str_data = response.content.decode(\"gbk\")\n html_obj = etree.HTML(str_data)\n url_jpg_b=html_obj.xpath(\"//div/ul/li/a/img/@src\")\n # print(url_jpg_b)\n name=html_obj.xpath(\"//div/ul/li/a/b/text()\")\n print(f\"第{i}页下载中\")\n for j in range(len(url_jpg_b)):\n url_jpg_b_pic=url_first+url_jpg_b[j]\n response=requests.get(url_jpg_b_pic,headers=headers_)\n bytes_data=response.content\n print(\"-----正在下载中-----\")\n with open(f\"D:\\彼岸图网\\{title_4k}\\{name[j]}.jpg\",\"wb\") as f:\n f.write(bytes_data)\n\n print(f\"-----您已经成功爬取-----(可以在此电脑的D盘文件夹>彼岸图网>{title_4k}文件夹中打开哦!!!里面绝对有惊喜!!!)\")\n print(\"这里建议亲亲们一定要打开文件夹看看,如果不喜欢可以在上述文件夹页面ctrl+A,右键删除,同时清空回收站,不会对电脑有任何影响哦!!!\")\n time.sleep(3)\n print(\"\")\n print(\"请问您是否还要继续爬取壁纸图片?\")\n time.sleep(0.3)\n #判断是否继续\n core=input(\"是-------------------请输入数字1 (输入完毕后按回车)\\n\"\n \"否-------------------请输入数字2 (输入完毕后按回车)\\n\")\n time.sleep(0.3)\n # exit1 =input(\"否-------------------请输入数字2 (输入完毕后按回车)\")\n if core==\"1\":\n continue\n elif core==\"2\":\n print(\"\")\n print(\"您选择退出,界面即将在3秒后退出-----(也可以按右上角x键自行退出哦)\")\n time.sleep(3)\n break\n else:\n print(\"\")\n print(\"您选择是否继续爬取输入有误,我们即将在3秒后退出\")\n time.sleep(3)\n break\n else:\n print(\"您想要爬取的标题栏输入错误,请重新输入\") #需要一个大循环,以重新输入\n\n\n","repo_name":"2674399125/bianTW_photo_capture","sub_path":"彼岸图网图片批量爬取程序.py","file_name":"彼岸图网图片批量爬取程序.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"12"} +{"seq_id":"12144252606","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport pandas as pd\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport random\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nINPUT = pd.read_csv(\"input_data.csv\")\nOUTPUT = pd.read_csv(\"output_data.csv\")\n\nOUTPUT.columns = [\"outputx\", \"outputy\"]\nINPUT.columns = [\"lx\", \"ly\", \"ll1x\", \"ll1y\", \"ll2x\", \"ll2y\", \"ll3x\", \"ll3y\", \"ll4x\", \"ll4y\", \"ll5x\", \"ll5y\"\n, \"ll6x\", \"ll6y\", \"rx\", \"ry\", \"rl1x\", \"rl1y\", \"rl2x\", \"rl2y\", \"rl3x\", \"rl3y\", \"rl4x\", \"rl4y\", \"rl5x\", \"rl5y\"\n, \"rl6x\", \"rl6y\"]\n\nOUTPUT.insert(0, 'id', range(0, len(OUTPUT)))\nINPUT.insert(0, 'id', range(0, len(INPUT)))\n\nDATA = pd.merge(INPUT, OUTPUT, on = \"id\")\nDATA = DATA.apply (pd.to_numeric, errors='coerce')\nDATA = DATA.dropna()\ndel DATA['id']\n\n# 60% for training purpose\ntrain_dataset = DATA.sample(frac = 0.6)\n# 40% for testing purpose\ntest_dataset = pd.concat([train_dataset, DATA]).drop_duplicates(keep = False) \n\n# Reset the index\ntrain_dataset = train_dataset.reset_index(drop=True)\ntest_dataset = test_dataset.reset_index(drop=True)\n\ntrain_x_labels = train_dataset.pop(\"outputx\")\ntrain_y_labels = train_dataset.pop(\"outputy\")\ntest_x_labels = test_dataset.pop(\"outputx\")\ntest_y_labels = test_dataset.pop(\"outputy\")\n\ntrain_stats = train_dataset.describe()\ntrain_stats = train_stats.transpose()\n\ndef norm(x):\n\treturn (x - train_stats['mean']) / train_stats['std']\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)\n\nmodelX = keras.models.load_model('predict_xindex.h5')\nmodelY = keras.models.load_model('predict_yindex.h5')\n\nrandom_index = random.randint(0, normed_test_data.shape[0])\n\npredictX = modelX.predict(normed_test_data.loc[[random_index]]).flatten().tolist()[0]\npredictY = modelY.predict(normed_test_data.loc[[random_index]]).flatten().tolist()[0]\n\nactualX = test_x_labels.loc[[random_index]].tolist()[0]\nactualY = test_y_labels.loc[[random_index]].tolist()[0] \n\nprint(\"predictX: \" + str(predictX))\nprint(\"actualX: \" + str(actualX))\nprint(\"predictY: \" + str(predictY))\nprint(\"actualY: \" + str(actualY))\n\n# loss, mae, mse = modelX.evaluate(normed_test_data, test_x_labels, verbose=2)\n# print(\"Testing set Mean Abs Error: {:5.2f} x pixels\".format(mae))\n# loss, mae, mse = modelY.evaluate(normed_test_data, test_y_labels, verbose=2)\n# print(\"Testing set Mean Abs Error: {:5.2f} y pixels\".format(mae))\n\n#\n\n\n","repo_name":"AdamKGordon/eyeTracker","sub_path":"GazeTracking/gaze_tracking/cursor_predict.py","file_name":"cursor_predict.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"42606458124","text":"'''\n我写的前缀树比这个官方题解的要简洁,它是之前648题代码的样子,没有运行报错,但是返回值为0,\n没搞懂哪里有bug,那份被我删掉了。下面这个官方题解代码就很直白了,像我平时写的风格,ONO\n\n这个题知道用前缀树后,其实伪算法就很明晰了,但是又可以有实现的差异,也可以不在每个结点都保存值。\n因为前缀树最后的结点空间一般都有标记嘛,就可以只设置标记为val,然后每次sum, 就dfs去计算即可。\n下面这个方案也可以。\n'''\nclass TrieNode:\n def __init__(self):\n self.val = 0\n self.next = [None for _ in range(26)]\n\nclass MapSum:\n def __init__(self):\n self.root = TrieNode()\n self.map = {}\n\n def insert(self, key: str, val: int) -> None:\n delta = val\n if key in self.map:\n delta -= self.map[key]\n self.map[key] = val\n node = self.root\n for c in key:\n if node.next[ord(c) - ord('a')] is None:\n node.next[ord(c) - ord('a')] = TrieNode()\n node = node.next[ord(c) - ord('a')]\n node.val += delta\n\n def sum(self, prefix: str) -> int:\n node = self.root\n for c in prefix:\n if node.next[ord(c) - ord('a')] is None:\n return 0\n node = node.next[ord(c) - ord('a')]\n return node.val\n\n","repo_name":"z472/ProblemLeecode","sub_path":"650-699/677. 键值映射(前缀树但不会写).py","file_name":"677. 键值映射(前缀树但不会写).py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"12"} +{"seq_id":"4942280728","text":"class Solution:\n def peakIndexInMountainArray(self, arr: List[int]) -> int:\n # Boundary Index Binary Search\n left, right = 0,len(arr) - 1\n peak = -1\n \n while left <= right:\n mid = (left+right)//2\n if mid == len(arr)-1 or arr[mid] > arr[mid+1]:\n peak = mid\n right = mid - 1\n else:\n left = mid + 1\n return peak","repo_name":"suhashollakc/LeetCode-PY","sub_path":"852-peak-index-in-a-mountain-array/852-peak-index-in-a-mountain-array.py","file_name":"852-peak-index-in-a-mountain-array.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"23638894294","text":"import json\nimport openai\nimport promptStorage as prompts\nimport embeddingSimilarity\nimport time\n\ndef main():\n pass\n\ndef searching_stage(similar_queries_list):\n print(\"Starting search stage...\")\n similar_content_rows = []\n legal_text_list = []\n legal_text_tokens_list = []\n\n print(\" - Searching relevant sections for lawful template\")\n begin = time.time()\n lawful = search_similar_content_sections(similar_queries_list[0], matches=40)\n legal_text, legal_text_tokens_l = accumulate_legal_text_from_sections(lawful, used_model=\"gpt-3.5-turbo-16k\")\n legal_text_lawful, citation_list = embeddingSimilarity.format_sql_rows(legal_text)\n end = time.time()\n print(\" * Total time for vector similarity: {}\".format(round(end-begin, 2)))\n\n '''\n print(\" - Searching relevant sections for unlawful template\")\n begin = time.time()\n unlawful = search_similar_content_sections(similar_queries_list[4], matches=40)\n legal_text, legal_text_tokens_u = accumulate_legal_text_from_sections(unlawful, used_model=\"gpt-3.5-turbo-16k\")\n legal_text_unlawful = embeddingSimilarity.format_sql_rows(legal_text)\n end = time.time()\n print(\" * Total time for vector similarity: {}\".format(round(end-begin, 2)))\n '''\n legal_text_tokens_list = [legal_text_tokens_l, legal_text_tokens_l, legal_text_tokens_l, legal_text_tokens_l, legal_text_tokens_l]\n similar_content_rows = [lawful, lawful, lawful, None, None]\n legal_text_list = [legal_text_lawful,legal_text_lawful,legal_text_lawful,None, None]\n\n return similar_content_rows, legal_text_list, legal_text_tokens_list, citation_list\n\ndef search_similar_content_sections(modified_user_query, matches=20):\n \n # Get cosine similarity score of related queries to all content embeddings\n return embeddingSimilarity.compare_content_embeddings(modified_user_query, match_count=matches)\n\ndef accumulate_legal_text_from_sections(sections, used_model):\n current_tokens = 0\n row = 0\n legal_text = []\n used_model = \"gpt-3.5-turbo-16k\"\n if used_model == \"gpt-4-32k\":\n max_tokens = 24000\n elif used_model == \"gpt-4\":\n max_tokens = 5000\n elif used_model == \"gpt-3.5-turbo-16k\":\n max_tokens = 12000\n elif used_model == \"gpt-3.5-turbo\":\n max_tokens = 2000\n max_tokens = 24000\n while current_tokens < max_tokens and row < len(sections):\n #print(sections[row])\n current_tokens += sections[row][12]\n legal_text.append(sections[row])\n row += 1\n return legal_text, current_tokens\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"spartypkp/legalAI","sub_path":"searchRelevantSections.py","file_name":"searchRelevantSections.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"12"} +{"seq_id":"21531260553","text":"import math\nimport random\nimport re\n\nfrom read_n_train import read, train, read_sentence\nfrom bigram import prob_b\n\n\n# all functions same as unigram but more parameters\n\n\ndef trigram(files):\n tokens_h = list()\n tokens_m = list()\n for file in files:\n f = open(\"data/\" + str(file) + \".txt\", \"r\")\n line = f.readline()\n temp = read(f, 3)\n if \"HAMILTON\" in line:\n for i in range(len(temp)):\n tokens_h.append(temp[i])\n if \"MADISON\" in line:\n for i in range(len(temp)):\n tokens_m.append(temp[i])\n trigramH = train(tokens_h)\n trigramM = train(tokens_m)\n return trigramH, trigramM\n\n\ndef prob_t(word, big, tri):\n word1 = word.split(\" \")\n previous = word1[0] + \" \" + word1[1]\n if word in tri:\n if previous in big:\n return tri[word] / big[previous]\n\n\ndef laplace_smoothing(word, big, tri):\n word1 = word.split(\" \")\n previous = word1[0] + \" \" + word1[1]\n if word in tri:\n if previous in big:\n return math.log2((tri[word] + 1) / (big[previous] + len(big)))\n else:\n if previous in big:\n return math.log2(1 / (big[previous] + len(big)))\n else:\n return math.log2(1 / (len(big)))\n\n\ndef for_head_of_sentence(big, cum, unig): # in head of sentence we have to do bigram prob\n value = 0.0\n sentence = ''\n a = sentence.split(\" \")\n for word in big:\n token = word.split(\" \")\n if token[0] == a[len(a) - 1]:\n prb = prob_b(word, unig, big) # in this line i called prob_b (bigram prob)\n cum[word] = value + prb\n value = value + prb\n rand = random.random()\n for word in cum:\n if rand <= cum[word]:\n token = word.split(\" \")\n sentence = word\n a.append(token[1])\n cum.clear()\n break\n return sentence, a\n\n\ndef generate_sentence_t(unig, big, tri):\n cum = {}\n value = 0.0\n sentence, a = for_head_of_sentence(big, cum, unig)\n for i in range(28):\n for word in tri:\n token = word.split(\" \")\n p = token[0] + \" \" + token[1]\n if p == a[len(a) - 2] + \" \" + a[len(a) - 1]:\n prb = prob_t(word, big, tri)\n cum[word] = value + prb\n value = value + prb\n rand = random.random()\n for word in cum:\n if rand <= cum[word]:\n token = word.split(\" \")\n if token[1] != '':\n if i == 0:\n sentence = word\n a.append(token[2])\n cum.clear()\n value = 0.0\n else:\n sentence = sentence + ' ' + token[2]\n a.append(token[2])\n cum.clear()\n value = 0.0\n break\n else:\n sentence = sentence + ' ' + token[1]\n sentence = re.sub(' ', '.', sentence)\n return sentence\n\n return sentence\n\n\ndef prob_of_sentence_t(sentence, big, tri):\n tokens = read_sentence(sentence, 3)\n prob = 1\n for i in range(len(tokens)):\n prob = prob + laplace_smoothing(tokens[i], big, tri)\n\n return prob, len(tokens)\n","repo_name":"orhanyilmaz/Generating-sentences-and-classification","sub_path":"trigram.py","file_name":"trigram.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"72739635222","text":"import unittest\nimport uuid\n\nfrom dotenv import dotenv_values\n\nfrom src.gst_irn import (\n Session,\n get_doc_dtls,\n get_ewb_dtls,\n get_item,\n get_tran_dtls,\n get_val_dtls,\n qr,\n)\nfrom src.gst_irn.codes import States\nfrom src.gst_irn.converters import to_buyer\nfrom src.gst_irn.generators import get_invoice, get_seller_dtls\nfrom src.gst_irn.session import RequestError\nfrom tests.snapshot import compare_snapshot\n\nCONFIG = dotenv_values(\".env\")\n\n\nclass AuthTokenTestCase(unittest.TestCase):\n def test_get_auth_token(self):\n session = Session(\n gstin=CONFIG[\"GSTIN\"],\n client_id=CONFIG[\"CLIENT_ID\"],\n client_secret=CONFIG[\"CLIENT_SECRET\"],\n username=CONFIG[\"API_USERNAME\"],\n password=CONFIG[\"API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n )\n session.generate_token()\n self.assertTrue(session._auth_token)\n self.assertTrue(session._auth_sek)\n\n def test_pass_through_apis(self):\n gsp_headers = {\n \"aspid\": CONFIG[\"GSP_ASP_ID\"],\n \"password\": CONFIG[\"GSP_ASP_PASSWORD\"],\n }\n session = Session(\n gstin=CONFIG[\"GSP_GSTIN\"],\n client_id=\"\",\n client_secret=\"\",\n username=CONFIG[\"GSP_API_USERNAME\"],\n password=CONFIG[\"GSP_API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n gsp_headers=gsp_headers,\n base_url=\"https://gstsandbox.charteredinfo.com/\",\n )\n session.generate_token()\n details = session.get_gst_info(\"29AAACP7879D1Z0\")\n self.assertEqual(\n details.get(\"TradeName\"), \"TALLY SOLUTIONS PVT LTD\", msg=details\n )\n self.assertEqual(\n details.get(\"AddrBnm\"), \"AMR TECH PARK II B\", msg=details\n )\n self.assertEqual(details.get(\"AddrLoc\"), \"HONGASANDRA\")\n self.assertEqual(details.get(\"DtReg\"), \"2017-07-01\")\n\n def test_get_party_details(self):\n session = Session(\n gstin=CONFIG[\"GSTIN\"],\n client_id=CONFIG[\"CLIENT_ID\"],\n client_secret=CONFIG[\"CLIENT_SECRET\"],\n username=CONFIG[\"API_USERNAME\"],\n password=CONFIG[\"API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n )\n session.generate_token()\n details = session.get_gst_info(\"29AAACP7879D1Z0\")\n self.assertEqual(\n details.get(\"TradeName\"), \"TALLY SOLUTIONS PVT LTD\", msg=details\n )\n self.assertEqual(\n details.get(\"AddrBnm\"), \"AMR TECH PARK II B\", msg=details\n )\n self.assertEqual(details.get(\"AddrLoc\"), \"HONGASANDRA\")\n self.assertEqual(details.get(\"DtReg\"), \"2017-07-01\")\n\n def test_generate_e_invoice(self):\n session = Session(\n gstin=CONFIG[\"GSTIN\"],\n client_id=CONFIG[\"CLIENT_ID\"],\n client_secret=CONFIG[\"CLIENT_SECRET\"],\n username=CONFIG[\"API_USERNAME\"],\n password=CONFIG[\"API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n )\n session.generate_token()\n\n place_of_supply = States.KARNATAKA.value\n seller_dtls = get_seller_dtls(\n gstin=CONFIG[\"GSTIN\"],\n lgl_nm=\"Foobar\",\n addr1=\"foobar\",\n loc=\"foobar\",\n pin=226001,\n stcd=States.UTTAR_PRADESH.value,\n )\n buyer_info = session.get_gst_info(\"29AWGPV7107B1Z1\")\n buyer_dtls = to_buyer(buyer_info, place_of_supply)\n\n invoice = get_invoice(\n tran_dtls=get_tran_dtls(),\n doc_dtls=get_doc_dtls(\n typ=\"inv\",\n no=str(uuid.uuid4())[:16],\n dt=\"12/11/2021\",\n ),\n seller_dtls=seller_dtls,\n buyer_dtls=buyer_dtls,\n item_list=[\n get_item(\n sl_no=\"1\",\n is_servc=\"Y\",\n hsn_cd=\"998431\",\n unit_price=100,\n tot_amt=100,\n ass_amt=100,\n gst_rt=12.0,\n igst_amt=12,\n tot_item_val=112,\n )\n ],\n val_dtls=get_val_dtls(\n ass_val=100,\n igst_val=12,\n tot_inv_val=112,\n ),\n )\n einvoice = session.generate_e_invoice(invoice)\n self.assertTrue(\"Irn\" in einvoice, msg=einvoice)\n\n def test_qr_code(self):\n qr_code = \"eyJhbGciOiJSUzI1NiIsImtpZCI6IkVEQzU3REUxMzU4QjMwMEJBOUY3OTM0MEE2Njk2ODMxRjNDODUwNDciLCJ0eXAiOiJKV1QiLCJ4NXQiOiI3Y1Y5NFRXTE1BdXA5NU5BcG1sb01mUElVRWMifQ.eyJkYXRhIjoie1wiU2VsbGVyR3N0aW5cIjpcIjA5QUFKQ003MTkxRTFaNVwiLFwiQnV5ZXJHc3RpblwiOlwiMjlBV0dQVjcxMDdCMVoxXCIsXCJEb2NOb1wiOlwiMzVkN2RmOGQtZmNlNy00OVwiLFwiRG9jVHlwXCI6XCJJTlZcIixcIkRvY0R0XCI6XCIxMi8xMS8yMDIxXCIsXCJUb3RJbnZWYWxcIjoxMTIsXCJJdGVtQ250XCI6MSxcIk1haW5Ic25Db2RlXCI6XCI5OTg0MzFcIixcIklyblwiOlwiMzk5N2Q2ZGJlNTg1ZGJmYzkzYTg1NWNmMmFhZDFhNDEyYWM3ZGYwMjMxYWI3ODc1ODUxYTE1ZTFiYTNmNGRmNFwiLFwiSXJuRHRcIjpcIjIwMjItMDktMDkgMTM6MzM6MDBcIn0iLCJpc3MiOiJOSUMifQ.RPd1hjjuky7Xcs550YTUXXISjrd-g11OrUZn1pS9uDq1Er-wHNeFmWmI72kEbYsL-tofo5mepnqAVKfJDeUZlGk_s597IiZMobmJb2yvEtbPiOs5Hy7lTQav3iD3XtdWIoKp26WqH1RBSCAQQEpzRwMCVO6G7oh9Uq5kf4GI1wuyj0aJT7ThNOrsM5cEyAoDTfdWvkr9MJdNLFt7mBaLMfEAyHe4DJEWJaPENJoicRwifon6FV7zGXcz1Wbxjg12o31470vaaKs2niOD-GBpkQ7W0p-Ac47CG8u2Z_q6QdFflAAWYVzGINwff_bioyXFDVdzt7RJwCDY_a7RKVvd8g\"\n\n html = qr.get_qr_code_image_html(qr_code)\n compare_snapshot(html, \"tests/test_assets/qr_code.html\")\n\n def test_get_e_invoice_by_irn(self):\n session = Session(\n gstin=CONFIG[\"GSTIN\"],\n client_id=CONFIG[\"CLIENT_ID\"],\n client_secret=CONFIG[\"CLIENT_SECRET\"],\n username=CONFIG[\"API_USERNAME\"],\n password=CONFIG[\"API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n )\n session.generate_token()\n\n invoice = {\n \"Version\": \"1.1\",\n \"TranDtls\": {\"TaxSch\": \"GST\", \"SupTyp\": \"B2B\"},\n \"DocDtls\": get_doc_dtls(\n typ=\"inv\",\n no=str(uuid.uuid4())[:16],\n dt=\"12/09/2022\",\n ),\n \"SellerDtls\": {\n \"Gstin\": \"09AAJCM7191E1Z5\",\n \"LglNm\": \"MITTAL ANALYTICS PRIVATE LIMITED\",\n \"Addr1\": \"NIKHILESH PALACE FIRST FLOOR 17/4 ASHOK MARG\",\n \"Loc\": \"LUCKNOW\",\n \"Pin\": 226001,\n \"Stcd\": \"9\",\n },\n \"BuyerDtls\": {\n \"Gstin\": \"37AABCA7365E2ZP\",\n \"LglNm\": \"AVANTI FEEDS LIMITED\",\n \"Pos\": \"37\",\n \"Addr1\": \", VEMULURU ROAD\",\n \"Loc\": \"VEMULURU\",\n \"Pin\": 534350,\n \"Stcd\": \"37\",\n },\n \"ItemList\": [\n {\n \"SlNo\": \"4\",\n \"IsServc\": \"Y\",\n \"HsnCd\": \"998431\",\n \"UnitPrice\": 100,\n \"IgstAmt\": 12,\n \"TotAmt\": 100,\n \"AssAmt\": 100,\n \"GstRt\": 12.0,\n \"TotItemVal\": 112,\n }\n ],\n \"ValDtls\": {\"TotInvVal\": 112, \"AssVal\": 100, \"IgstVal\": 12},\n }\n einvoice = session.generate_e_invoice(invoice)\n\n # assert duplicate IRN is raised for same document\n with self.assertLogs(\"src.gst_irn.session\", level=\"ERROR\") as cm:\n with self.assertRaises(RequestError) as err:\n _ = session.generate_e_invoice(invoice)\n msg, resp = err.exception.args\n self.assertEqual(msg, \"action failed\")\n self.assertTrue(resp[\"InfoDtls\"][0][\"Desc\"][\"Irn\"], einvoice[\"Irn\"])\n\n # verify document by sending irn\n duplicate = session.get_e_invoice_by_irn(einvoice[\"Irn\"])\n self.assertEqual(duplicate, einvoice)\n\n def test_get_irn_by_doc_details(self):\n session = Session(\n gstin=CONFIG[\"GSTIN\"],\n client_id=CONFIG[\"CLIENT_ID\"],\n client_secret=CONFIG[\"CLIENT_SECRET\"],\n username=CONFIG[\"API_USERNAME\"],\n password=CONFIG[\"API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n )\n session.generate_token()\n doc_type = \"inv\"\n doc_number = str(uuid.uuid4())[:16]\n doc_date = \"12/09/2022\"\n invoice = {\n \"Version\": \"1.1\",\n \"TranDtls\": {\"TaxSch\": \"GST\", \"SupTyp\": \"B2B\"},\n \"DocDtls\": get_doc_dtls(\n typ=doc_type,\n no=doc_number,\n dt=doc_date,\n ),\n \"SellerDtls\": {\n \"Gstin\": \"09AAJCM7191E1Z5\",\n \"LglNm\": \"MITTAL ANALYTICS PRIVATE LIMITED\",\n \"Addr1\": \"NIKHILESH PALACE FIRST FLOOR 17/4 ASHOK MARG\",\n \"Loc\": \"LUCKNOW\",\n \"Pin\": 226001,\n \"Stcd\": \"9\",\n },\n \"BuyerDtls\": {\n \"Gstin\": \"37AABCA7365E2ZP\",\n \"LglNm\": \"AVANTI FEEDS LIMITED\",\n \"Pos\": \"37\",\n \"Addr1\": \", VEMULURU ROAD\",\n \"Loc\": \"VEMULURU\",\n \"Pin\": 534350,\n \"Stcd\": \"37\",\n },\n \"ItemList\": [\n {\n \"SlNo\": \"4\",\n \"IsServc\": \"Y\",\n \"HsnCd\": \"998431\",\n \"UnitPrice\": 100,\n \"IgstAmt\": 12,\n \"TotAmt\": 100,\n \"AssAmt\": 100,\n \"GstRt\": 12.0,\n \"TotItemVal\": 112,\n }\n ],\n \"ValDtls\": {\"TotInvVal\": 112, \"AssVal\": 100, \"IgstVal\": 12},\n \"EwbDtls\": {\"Distance\": 10},\n }\n einvoice = session.generate_e_invoice(invoice)\n invoice_irn = einvoice[\"Irn\"]\n url = (\n f\"{session.base_url}/eicore/v1.03/Invoice/irnbydocdetails?\"\n f\"doctype={doc_type}&docnum={doc_number}&docdate={doc_date}\"\n )\n response = session.get(url)\n canecel_irn = response[\"Irn\"]\n self.assertEqual(canecel_irn, invoice_irn)\n\n def test_cancel_e_invoice(self):\n session = Session(\n gstin=CONFIG[\"GSTIN\"],\n client_id=CONFIG[\"CLIENT_ID\"],\n client_secret=CONFIG[\"CLIENT_SECRET\"],\n username=CONFIG[\"API_USERNAME\"],\n password=CONFIG[\"API_PASSWORD\"],\n public_key=CONFIG[\"PUBLIC_KEY\"],\n )\n session.generate_token()\n\n invoice = {\n \"Version\": \"1.1\",\n \"TranDtls\": {\"TaxSch\": \"GST\", \"SupTyp\": \"B2B\"},\n \"DocDtls\": get_doc_dtls(\n typ=\"inv\",\n no=str(uuid.uuid4())[:16],\n dt=\"12/09/2022\",\n ),\n \"SellerDtls\": {\n \"Gstin\": \"09AAJCM7191E1Z5\",\n \"LglNm\": \"MITTAL ANALYTICS PRIVATE LIMITED\",\n \"Addr1\": \"NIKHILESH PALACE FIRST FLOOR 17/4 ASHOK MARG\",\n \"Loc\": \"LUCKNOW\",\n \"Pin\": 226001,\n \"Stcd\": \"9\",\n },\n \"BuyerDtls\": {\n \"Gstin\": \"37AABCA7365E2ZP\",\n \"LglNm\": \"AVANTI FEEDS LIMITED\",\n \"Pos\": \"37\",\n \"Addr1\": \", VEMULURU ROAD\",\n \"Loc\": \"VEMULURU\",\n \"Pin\": 534350,\n \"Stcd\": \"37\",\n },\n \"ItemList\": [\n {\n \"SlNo\": \"4\",\n \"IsServc\": \"Y\",\n \"HsnCd\": \"998431\",\n \"UnitPrice\": 100,\n \"IgstAmt\": 12,\n \"TotAmt\": 100,\n \"AssAmt\": 100,\n \"GstRt\": 12.0,\n \"TotItemVal\": 112,\n }\n ],\n \"ValDtls\": {\"TotInvVal\": 112, \"AssVal\": 100, \"IgstVal\": 12},\n \"EwbDtls\": {\"Distance\": 10},\n }\n einvoice = session.generate_e_invoice(invoice)\n url = f\"{session.base_url}/eicore/v1.03/Invoice/Cancel\"\n data = {\n \"Irn\": einvoice[\"Irn\"],\n \"CnlRsn\": \"1\",\n \"CnlRem\": \"Wrong entry\",\n }\n response = session.post(url, data=data)\n self.assertTrue(\"CancelDate\" in response)\n\n def test_to_buyer(self):\n testing_gst_info = {\n \"Gstin\": \"34AACCC1596Q002\",\n \"TradeName\": None,\n \"LegalName\": \"Chartered Information Systems Private Limited\",\n \"AddrBnm\": None,\n \"AddrBno\": None,\n \"AddrFlno\": None,\n \"AddrSt\": None,\n \"AddrLoc\": None,\n \"StateCode\": 34,\n \"AddrPncd\": 560009,\n \"TxpType\": None,\n \"Status\": \"ACT\",\n \"BlkStatus\": \"U\",\n \"DtReg\": None,\n \"DtDReg\": None,\n }\n buyer = to_buyer(testing_gst_info, States.ASSAM)\n self.assertEqual(buyer[\"Addr1\"], \"\")\n","repo_name":"Mittal-Analytics/gst-e-invoicing","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":12986,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"12"} +{"seq_id":"30692848927","text":"import random\nimport argparse\nimport string\nfrom argparse import RawTextHelpFormatter\n\n\ndef sprecyzowane_haslo(**kwargs):\n p = ''\n for k, v in kwargs.items():\n if k == 'c' and v != 0:\n for i in range(v):\n p += random.choice(string.digits)\n if k == 'd' and v != 0:\n for i in range(v):\n p += random.choice(string.ascii_uppercase)\n if k == 'm' and v != 0:\n for i in range(v):\n p += random.choice(string.ascii_lowercase)\n if k == 'z' and v != 0:\n for i in range(v):\n p += random.choice(string.punctuation)\n p = ''.join(random.sample(p, len(p)))\n return print(f\"Twoje sprecyzowane hasło: {p}\")\n\n\ndef losowo_wygenerowane_haslo(y):\n x = string.digits + string.ascii_uppercase + string.ascii_lowercase + string.punctuation\n x = ''.join(random.sample(x, len(x)))\n x = x[:y]\n return f\"Twoje losowe hasło to: {x}\"\n\n\nparser = argparse.ArgumentParser(description=\"Wygeneruj losowe haslo\", formatter_class=RawTextHelpFormatter)\n\nparser.add_argument(\"o\", metavar=\"Opcje\", type=str, help=\"l - losowe hasło\\n\"\n \"[-n liczba] opcjonalna ilość znaków w całkowicie losowym haśle, domyślnie 8\\n\"\n \"s - sprecyzowane hasło\\n\"\n \"[-c liczba] opcjonalna ilość cyfr w sprecyzowanym haśle, domyślnie 0\\n\"\n \"[-m liczba] opcjonalna ilość małych liter w sprecyzowanym haśle, domyślnie 0\\n\"\n \"[-d liczba] opcjonalna ilość duzych liter w sprecyzowanym haśle, domyślnie 0\\n\"\n \"[-z liczba] opcjonalna ilość znaków specjalnych w sprecyzowanym haśle, domyślnie 0\\n\",\n choices=[\"l\", \"s\"], nargs=\"?\")\n\nargs, sub_args = parser.parse_known_args()\n\nif args.o == 'l':\n parser.add_argument('-n', type=int, default=8)\n args = parser.parse_args(sub_args)\n print(losowo_wygenerowane_haslo(args.n))\nelif args.o == 's':\n parser.add_argument('-c', type=int, default=0)\n parser.add_argument('-m', type=int, default=0)\n parser.add_argument('-d', type=int, default=0)\n parser.add_argument('-z', type=int, default=0)\n args = parser.parse_args(sub_args)\n sprecyzowane_haslo(c=args.c, d=args.d, m=args.m, z=args.z)\nelse:\n pass\n","repo_name":"FilipGieraga/Python-PL","sub_path":"4. Generator haseł + argparse version/argaparse.py","file_name":"argaparse.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"13923267884","text":"# -*- coding: UTF-8 -*-\n\n# 入力\nH, W = map(int, input().split())\nc = [list(input()) for h in range(H)]\n# 答えの最大値を保持(初期値: -1)\nans = -1\n\ndef DFS(nh, nw, sh, sw):\n # 再帰関数内でも 最大値 を更新できるように\n global ans\n # 隣接マスが始点かどうか\n if nh == sh:\n if (nw+1 == sw) or (nw-1 == sw):\n if len(route) >= 3:\n ans = max(ans, len(route))\n if nw == sw:\n if (nh+1 == sh) or (nh-1 == sh):\n if len(route) >= 3:\n ans = max(ans, len(route))\n # DOWN\n if nh < H-1 and c[nh+1][nw] == '.' and seen[nh+1][nw]:\n seen[nh+1][nw] = False\n route.append((nh+1, nw))\n DFS(nh+1, nw, sh, sw)\n # UP\n if nh > 0 and c[nh-1][nw] == '.' and seen[nh-1][nw]:\n seen[nh-1][nw] = False\n route.append((nh-1, nw))\n DFS(nh-1, nw, sh, sw)\n # RIGHT\n if nw < W-1 and c[nh][nw+1] == '.' and seen[nh][nw+1]:\n seen[nh][nw+1] = False\n route.append((nh, nw+1))\n DFS(nh, nw+1, sh, sw)\n # LEFT\n if nw > 0 and c[nh][nw-1] == '.' and seen[nh][nw-1]:\n seen[nh][nw-1] = False\n route.append((nh, nw-1))\n DFS(nh, nw-1, sh, sw)\n # BACK-TRACK\n h, w = route.pop()\n seen[h][w] = True\n\n# 各マスを始点としたとき\nfor h in range(H):\n for w in range(W):\n # 山マスは始点にならない\n if c[h][w] == '#':\n continue\n seen = [[True] * W for h in range(H)]\n seen[h][w] = False\n route = [(h, w)]\n DFS(h, w, h, w)\n# 出力\nprint(ans)","repo_name":"nishiwakki/atCoder","sub_path":"tenkei90/072.py","file_name":"072.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"24629887519","text":"import GPS\nfrom .dialogs import Dialog\nfrom gi.repository import Gtk\nfrom pygps import get_widgets_by_type\nfrom pygps.tree import select_in_tree\nfrom workflows.promises import wait_tasks, hook\n\n\nclass Commits(Dialog):\n\n COLUMN_FILE = 0\n COLUMN_STAGED = 1\n COLUMN_NAME = 2\n COLUMN_INCONSISTENT = 4\n COLUMN_FOREGROUND = 6\n\n COLOR_TITLE = 'rgb(0,0,102)'\n COLOR_GRAY = 'rgb(0,0,153)'\n COLOR_BLACK = 'rgb(0,0,0)'\n\n def open_and_yield(self):\n yield self._open_and_yield('open Commits')\n self.view = GPS.MDI.get(\"Commits\")\n self.tree = get_widgets_by_type(Gtk.TreeView, self.view.pywidget())[0]\n self.msg = get_widgets_by_type(Gtk.TextView, self.view.pywidget())[0]\n\n def dump(self, columns=[COLUMN_NAME]):\n \"\"\"\n Show the contents of the Commits view\n \"\"\"\n m = self.tree.get_model()\n\n def _get_col(iter, col):\n if col == Commits.COLUMN_FOREGROUND:\n v = m[iter][col]\n v = v.to_string()\n if v == Commits.COLOR_TITLE:\n return 'titleColor'\n elif v == Commits.COLOR_GRAY:\n return 'grayColor'\n elif v == Commits.COLOR_BLACK:\n return 'blackColor'\n else:\n return v\n else:\n return m[iter][col]\n\n def internal(iter):\n result = []\n while iter is not None:\n result.append(tuple(_get_col(iter, c) for c in columns))\n if m.iter_has_child(iter):\n result.append(internal(m.iter_children(iter)))\n iter = m.iter_next(iter)\n return result\n return internal(m.get_iter_first())\n\n def stage_via_name(self, names):\n for name in names:\n select_in_tree(self.tree, column=Commits.COLUMN_NAME, key=name)\n GPS.execute_action(\"vcs toggle stage selected files\")\n yield hook(\"vcs_file_status_finished\")\n\n def stage(self, files):\n \"\"\"\n Stage one or more files for commit, by clicking in the tree\n\n :param [GPS.File] files:\n \"\"\"\n for f in files:\n select_in_tree(self.tree, column=Commits.COLUMN_FILE, key=f)\n GPS.execute_action(\"vcs toggle stage selected files\")\n yield hook(\"vcs_file_status_finished\")\n\n def set_message(self, msg):\n b = self.msg.get_buffer()\n b.insert(b.get_start_iter(), msg)\n\n def commit_staged(self):\n GPS.execute_action('vcs commit staged files')\n yield wait_tasks()\n\n\nclass Branches(Dialog):\n\n def open_and_yield(self):\n yield self._open_and_yield('open Branches')\n\n\nclass History(Dialog):\n\n def open_and_yield(self):\n yield self._open_and_yield('open History')\n self.view = GPS.MDI.get(\"History\")\n self.tree = get_widgets_by_type(Gtk.TreeView, self.view.pywidget())[0]\n","repo_name":"AdaCore/gnatstudio","sub_path":"share/support/core/gs_utils/internal/vcs.py","file_name":"vcs.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":355,"dataset":"github-code","pt":"12"} +{"seq_id":"17470482282","text":"import os, json\n\n\ndef load_config(filename):\n json_file = open(os.path.join(os.path.dirname(__file__), \".\", filename))\n return json.load(json_file)\n\n\n# ========= Overall =============================\nPROJECT_ID = os.environ.get(\"PROJECT_ID\", \"\")\nif PROJECT_ID != \"\":\n os.environ[\"GOOGLE_CLOUD_PROJECT\"] = PROJECT_ID\nassert PROJECT_ID, \"Env var PROJECT_ID is not set.\"\n\nREGOIN = \"us-central1\"\nPROCESS_TIMEOUT_SECONDS = 600\n\n#List of application forms and supporting documents\nAPPLICATION_FORMS = [\"unemployment_form\"]\nSUPPORTING_DOCS = [\"driver_license\", \"claims_form\", \"utility_bill\", \"pay_stub\"]\n\n# Doc approval status, will reflect on the Frontend app.\nSTATUS_APPROVED = \"Approved\"\nSTATUS_REVIEW = \"Need Review\"\nSTATUS_REJECTED = \"Rejected\"\nSTATUS_PENDING = \"Pending\"\nSTATUS_IN_PROGRESS = \"Processing\"\n\nSTATUS_SUCCESS = \"Complete\"\nSTATUS_ERROR = \"Error\"\nSTATUS_TIMEOUT = \"Timeout\"\n\n# ========= Document upload ======================\nBUCKET_NAME = f\"{PROJECT_ID}-document-upload\"\nTOPIC_ID = \"queue-topic\"\nPROCESS_TASK_API_PATH = \"/upload_service/v1/process_task\"\n\n# ========= Validation ===========================\nBUCKET_NAME_VALIDATION = PROJECT_ID\nPATH = f\"gs://{PROJECT_ID}/Validation/rules.json\"\nPATH_TEMPLATE = f\"gs://{PROJECT_ID}/Validation/templates.json\"\nBIGQUERY_DB = \"validation.validation_table\"\nVALIDATION_TABLE = f\"{PROJECT_ID}.validation.validation_table\"\n\n# ========= Classification =======================\n# Endpoint Id where model is deployed.\n# TODO: Please update this to your deployed VertexAI model ID.\nVERTEX_AI_CONFIG = load_config(\"vertex_ai_config.json\")\nassert VERTEX_AI_CONFIG, \"Unable to locate 'vertex_ai_config.json'\"\n\nCLASSIFICATION_ENDPOINT_ID = VERTEX_AI_CONFIG[\"endpoint_id\"]\nassert CLASSIFICATION_ENDPOINT_ID, \"CLASSIFICATION_ENDPOINT_ID is not defined.\"\n\n#Prediction Confidence threshold for the classifier to reject any prediction\n#less than the threshold value.\nCLASSIFICATION_CONFIDENCE_THRESHOLD = 0.85\n\n# Map to standardise predicted document class from classifier to\n# standard document_class values\nDOC_CLASS_STANDARDISATION_MAP = {\n \"UE\": \"unemployment_form\",\n \"DL\": \"driver_license\",\n \"Claim\": \"claims_form\",\n \"Utility\": \"utility_bill\",\n \"PayStub\": \"pay_stub\"\n}\n\n# ========= DocAI Parsers =======================\n\n# To add parsers, edit /terraform/enviroments/dev/main.tf\n\nPARSER_CONFIG = load_config(\"parser_config.json\")\nassert PARSER_CONFIG, \"Unable to locate 'parser_config.json'\"\n\n# ========= HITL and Frontend UI =======================\n\n# List of database keys and extracted entities that are searchable\nDB_KEYS = [\n \"active\",\n \"auto_approval\",\n \"is_autoapproved\",\n \"matching_score\",\n \"case_id\",\n \"uid\",\n \"url\",\n \"context\",\n \"document_class\",\n \"document_type\",\n \"upload_timestamp\",\n \"extraction_score\",\n \"is_hitl_classified\",\n]\nENTITY_KEYS = [\n \"name\",\n \"dob\",\n \"residential_address\",\n \"email\",\n \"phone_no\",\n]\n\n### Misc\n\n# Used by E2E testing. Leave as blank by default.\nDATABASE_PREFIX = os.getenv(\"DATABASE_PREFIX\", \"\")\n","repo_name":"GoogleCloudPlatform/document-intake-accelerator","sub_path":"common/src/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"12"} +{"seq_id":"36370545855","text":"import random\r\n\r\nimport tkinter\r\nroot=tkinter.Tk()\r\nroot.geometry(\"700x450\")\r\nX=tkinter.Label(root,text='',font=(\"times\",200))\r\ndef roll():\r\n number=['\\u2680','\\u2681','\\u2682','\\u2683','\\u2684','\\u2685']\r\n X.config(text=f'{random.choice(number)}')\r\n X.pack()\r\nb=tkinter.Button(root,text=\"Roll\",command=roll)\r\nb.place(x=330,y=0)\r\n\r\n\r\nroot.mainloop()\r\n\r\n","repo_name":"atishay-gwari/2D-Dice_Simulator","sub_path":"Dice SImulator.py","file_name":"Dice SImulator.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"74773498579","text":"class Solution:\n #solution1 Iteratively\n # def subsets(self, nums):\n # \"\"\"\n # :type nums: List[int]\n # :rtype: List[List[int]]\n # \"\"\"\n # ans=[[]]\n # for num in nums:\n # ans+=[item+[num] for item in ans]\n # return ans\n\n #solution2 dfs\n def dfs(self,nums,start,path,ans):\n ans.append(path)\n for i in range(start,len(nums)):\n self.dfs(nums,i+1,path+[nums[i]],ans)\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n ans=[]\n self.dfs(nums,0,[],ans)\n return ans\n\na=Solution()\nprint(a.subsets([1,2,3]))","repo_name":"Luolingwei/LeetCode","sub_path":"Array/Q78_Subsets.py","file_name":"Q78_Subsets.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"12"} +{"seq_id":"6287501304","text":"from tkinter import *\n\n\ndef summary(e):\n a = int(input('a = '))\n b = int(input('b = '))\n print(' a + b = ', a + b)\n\n\nroot = Tk()\nbutton = Button(root)\nbutton['text'] = 'Додавання'\nbutton.bind('