diff --git "a/4854.jsonl" "b/4854.jsonl" new file mode 100644--- /dev/null +++ "b/4854.jsonl" @@ -0,0 +1,684 @@ +{"seq_id":"249652907","text":"import os\r\nimport socket\r\nfrom time import sleep\r\n\r\n\r\ndef sendfile(name, who):\r\n with open(name, 'r') as file:\r\n for raw in file:\r\n who.send(raw.encode())\r\n who.recv(1024)\r\n who.send('end'.encode())\r\n\r\ndef checkfile(name, who, maindir, usname):\r\n while True:\r\n msg = who.recv(1024).decode()\r\n if msg == 'end':\r\n return True\r\n elif get_size(maindir+f'/{usname}') > 10240:\r\n return False\r\n else:\r\n with open (name, 'a') as file:\r\n file.write(msg)\r\n sleep(0.001)\r\n who.send('next'.encode())\r\n\r\n\r\ndef get_size(start_path): # Стырена в тырнетах с:\r\n total_size = 0\r\n for dirpath, dirnames, filenames in os.walk(start_path):\r\n for f in filenames:\r\n fp = os.path.join(dirpath, f)\r\n \r\n if not os.path.islink(fp):\r\n total_size += os.path.getsize(fp)\r\n\r\n return total_size\r\n ","sub_path":"sendcheck.py","file_name":"sendcheck.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187693910","text":"import clr\nclr.AddReference('RevitAPI')\nfrom Autodesk.Revit.DB import *\n\nclr.AddReference(\"RevitNodes\")\nimport Revit\nclr.ImportExtensions(Revit.Elements)\n\nclr.AddReference(\"RevitServices\")\nimport RevitServices\nfrom RevitServices.Persistence import DocumentManager\nfrom RevitServices.Transactions import TransactionManager\n\ndoc = DocumentManager.Instance.CurrentDBDocument\nitems = UnwrapElement(IN[0])\nnames = IN[1]\ngoodlist = list()\nbadlist = list()\ncounter = 0\n\nTransactionManager.Instance.EnsureInTransaction(doc)\nwhile counter < len(items):\n\ttry:\n\t\titems[counter].Name = names[counter]\n\t\tgoodlist.append(items[counter].ToDSType(True))\n\texcept:\n\t\tbadlist.append(items[counter].ToDSType(True))\n\tcounter += 1\nTransactionManager.Instance.TransactionTaskDone()\n\nOUT = (goodlist,badlist)","sub_path":"nodes/0.8.x/python/Element.SetName.py","file_name":"Element.SetName.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617902476","text":"#!/usr/bin/python3\nfrom sys import stdin\n \ndef main ():\n read = stdin.readline\n n, h = map (int, read ().split ())\n a = list (map (int, read ().split ()))\n si = 2 * 10 ** 9 + 1\n for i in range (1, n):\n a [i] += a [i - 1]\n if (h - (i * (i + 1)) // 2 <= a [i - 1]):\n print (i)\n return\n l = 1;\n r = si;\n while (l < r):\n o = (l + r) // 2;\n if (h - (o * (o + 1)) // 2 <= ((o - 1) // n) * a [n - 1] + a [(o - 1) % n]): r = o;\n else: l = o + 1;\n for i in range (r - n, r + 1):\n if (h - (i * (i + 1)) // 2 <= ((i - 1) // n) * a [n - 1] + a [(i - 1) % n]):\n print (i)\n break\n\nif __name__ == \"__main__\": main ()\n","sub_path":"_gukiz_height.py","file_name":"_gukiz_height.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560524679","text":"import subprocess\nfrom tm.apple_scripts import AppleScript\nimport loc\n\nSCREEN_OVERVIEW = 'Overview'\nSCREEN_SCAN = 'Scans'\nSCREEN_LOG = 'Logs'\n\nclass WFBSSMacMainWindow:\n def __init__(self):\n self._app_name = \"MainUI\"\n self._app_proc_name = 'Trend Micro Security'\n self._apscript = AppleScript()\n\n def select_section(self, screen):\n poc = self._apscript.get_position(self._app_name, self._app_proc_name, loc.main_window.window)\n mapping = {SCREEN_OVERVIEW: loc.main_window.corresponding_overview,\n SCREEN_SCAN: loc.main_window.corresponding_scans,\n SCREEN_LOG: loc.main_window.corresponding_logs}\n self._apscript.mouse_click(self._app_name, self._app_proc_name, poc[0]+mapping[screen][0], poc[1]+mapping[screen][1])\n\n def get_selected_section(self):\n mapping = {SCREEN_OVERVIEW: loc.overview.assert_in_section,\n SCREEN_SCAN: loc.scans.assert_in_section,\n SCREEN_LOG: loc.logs.assert_in_section}\n for key in mapping:\n if(self._apscript.element_exists(self._app_name, self._app_proc_name, mapping[key])):\n return key\n\n def should_be_on_main_window(self):\n self._apscript.wait_for_element(self._app_name, self._app_proc_name, loc.main_window.assert_in_window)\n\n def is_on_main_window(self):\n return self._apscript.element_exists(self._app_name, self._app_proc_name,loc.main_window.assert_in_window)\n\n def open_trend_micro_security(self):\n child = subprocess.Popen([\"open\",\"-a\",\"/Applications/MainUI.app\"],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n child.wait()\n if(child.returncode != 0):\n raise RuntimeError(child.communicate()[1])\n\n def close_trend_micro_security(self):\n self._apscript.close_app(self._app_name, self._app_proc_name)\n\n","sub_path":"lib/tm/appleutil/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320674158","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, cross_val_score, KFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, \\\n GradientBoostingClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression\n\ndata = pd.read_csv(\"titanic_data.csv\")\n\ndata.drop([\"PassengerId\", \"Cabin\", \"Ticket\", \"Name\"], axis=1, inplace=True)\n\ndata['Embarked'] = data['Embarked'].replace(np.nan, 'S')\nmedian = data['Age'].median()\ndata['Age'] = data['Age'].replace(np.nan, median)\n\nle_Sex = LabelEncoder()\ndata[\"Sex\"] = le_Sex.fit_transform(data[\"Sex\"])\n\nle_Embarked = LabelEncoder()\ndata[\"Embarked\"] = le_Embarked.fit_transform(data[\"Embarked\"])\n\nreal_x = data.drop([\"Survived\"], axis=1)\nreal_x = real_x.values\nreal_y = data.Survived.values\n\nX_train, X_test, Y_train, Y_test = train_test_split(real_x, real_y, test_size=0.2, random_state=0)\nkneigbour = KNeighborsClassifier(n_neighbors=51)\nkneigbour.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, kneigbour.predict(X_train)))\nprint(accuracy_score(Y_test, kneigbour.predict(X_test)))\n\nlogistic = LogisticRegression()\nlogistic.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, logistic.predict(X_train)))\nprint(accuracy_score(Y_test, logistic.predict(X_test)))\n\ndeciontree = DecisionTreeClassifier()\ndeciontree.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, deciontree.predict(X_train)))\nprint(accuracy_score(Y_test, deciontree.predict(X_test)))\n\nrandom = RandomForestClassifier(n_estimators=500)\nrandom.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, random.predict(X_train)))\nprint(accuracy_score(Y_test, random.predict(X_test)))\n\nrandomtree = ExtraTreesClassifier(n_estimators=500)\nrandomtree.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, randomtree.predict(X_train)))\nprint(accuracy_score(Y_test, randomtree.predict(X_test)))\n\nadaboost = AdaBoostClassifier(n_estimators=300)\nadaboost.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, adaboost.predict(X_train)))\nprint(accuracy_score(Y_test, adaboost.predict(X_test)))\n\ngardientboost = GradientBoostingClassifier(n_estimators=500)\ngardientboost.fit(X_train, Y_train)\nprint(accuracy_score(Y_train, gardientboost.predict(X_train)))\nprint(accuracy_score(Y_test, gardientboost.predict(X_test)))\n\nkfold = KFold(n_splits=10, random_state=0)\nresult = cross_val_score(RandomForestClassifier(), X_train, Y_train, cv=10, scoring=\"accuracy\")\nprint(result.mean())\n\n# RandomForest gave the best accuracy\n","sub_path":"titanticdataset.py","file_name":"titanticdataset.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254509817","text":"\"\"\"Util that calls Google Search.\"\"\"\nfrom typing import Any, Dict, List, Optional\n\nfrom pydantic import BaseModel, Extra, root_validator\n\nfrom langchain.utils import get_from_dict_or_env\n\n\nclass GoogleSearchAPIWrapper(BaseModel):\n \"\"\"Wrapper for Google Search API.\n\n Adapted from: Instructions adapted from https://stackoverflow.com/questions/\n 37083058/\n programmatically-searching-google-in-python-using-custom-search\n\n TODO: DOCS for using it\n 1. Install google-api-python-client\n - If you don't already have a Google account, sign up.\n - If you have never created a Google APIs Console project,\n read the Managing Projects page and create a project in the Google API Console.\n - Install the library using pip install google-api-python-client\n The current version of the library is 2.70.0 at this time\n\n 2. To create an API key:\n - Navigate to the APIs & Services→Credentials panel in Cloud Console.\n - Select Create credentials, then select API key from the drop-down menu.\n - The API key created dialog box displays your newly created key.\n - You now have an API_KEY\n\n 3. Setup Custom Search Engine so you can search the entire web\n - Create a custom search engine in this link.\n - In Sites to search, add any valid URL (i.e. www.stackoverflow.com).\n - That’s all you have to fill up, the rest doesn’t matter.\n In the left-side menu, click Edit search engine → {your search engine name}\n → Setup Set Search the entire web to ON. Remove the URL you added from\n the list of Sites to search.\n - Under Search engine ID you’ll find the search-engine-ID.\n\n 4. Enable the Custom Search API\n - Navigate to the APIs & Services→Dashboard panel in Cloud Console.\n - Click Enable APIs and Services.\n - Search for Custom Search API and click on it.\n - Click Enable.\n URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis\n .com\n \"\"\"\n\n search_engine: Any #: :meta private:\n google_api_key: Optional[str] = None\n google_cse_id: Optional[str] = None\n k: int = 10\n siterestrict: bool = False\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n\n def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]:\n cse = self.search_engine.cse()\n if self.siterestrict:\n cse = cse.siterestrict()\n res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()\n return res.get(\"items\", [])\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n google_api_key = get_from_dict_or_env(\n values, \"google_api_key\", \"GOOGLE_API_KEY\"\n )\n values[\"google_api_key\"] = google_api_key\n\n google_cse_id = get_from_dict_or_env(values, \"google_cse_id\", \"GOOGLE_CSE_ID\")\n values[\"google_cse_id\"] = google_cse_id\n\n try:\n from googleapiclient.discovery import build\n\n except ImportError:\n raise ImportError(\n \"google-api-python-client is not installed. \"\n \"Please install it with `pip install google-api-python-client`\"\n )\n\n service = build(\"customsearch\", \"v1\", developerKey=google_api_key)\n values[\"search_engine\"] = service\n\n return values\n\n def run(self, query: str) -> str:\n \"\"\"Run query through GoogleSearch and parse result.\"\"\"\n snippets = []\n results = self._google_search_results(query, num=self.k)\n if len(results) == 0:\n return \"No good Google Search Result was found\"\n for result in results:\n if \"snippet\" in result:\n snippets.append(result[\"snippet\"])\n\n return \" \".join(snippets)\n\n def results(\n self,\n query: str,\n num_results: int,\n search_params: Optional[Dict[str, str]] = None,\n ) -> List[Dict]:\n \"\"\"Run query through GoogleSearch and return metadata.\n\n Args:\n query: The query to search for.\n num_results: The number of results to return.\n search_params: Parameters to be passed on search\n\n Returns:\n A list of dictionaries with the following keys:\n snippet - The description of the result.\n title - The title of the result.\n link - The link to the result.\n \"\"\"\n metadata_results = []\n results = self._google_search_results(\n query, num=num_results, **(search_params or {})\n )\n if len(results) == 0:\n return [{\"Result\": \"No good Google Search Result was found\"}]\n for result in results:\n metadata_result = {\n \"title\": result[\"title\"],\n \"link\": result[\"link\"],\n }\n if \"snippet\" in result:\n metadata_result[\"snippet\"] = result[\"snippet\"]\n metadata_results.append(metadata_result)\n\n return metadata_results\n","sub_path":"langchain/utilities/google_search.py","file_name":"google_search.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"360341854","text":"#!/usr/bin/python\n\nimport sys\nfrom collections import namedtuple\n\nItem = namedtuple('Item', ['index', 'size', 'value'])\n\ndef knapsack_solver(items, capacity):\n # !!!! IMPLEMENT ME\n current_set = None\n current_value = 0\n\n for firstSetItem in items:\n tuple = []\n size = 0\n value = 0\n tuple.append(firstSetItem)\n size += firstSetItem.size\n value += firstSetItem.value\n\n for secondSetItem in items:\n if firstSetItem != secondSetItem and secondSetItem.size + size <= capacity:\n tuple.append(secondSetItem)\n size += secondSetItem.size\n value += secondSetItem.value\n\n if value > current_value:\n current_set = tuple\n current_value = value\n\n return 'Items to select: {} \\n Total cost: {} \\n Total value: {}'.format(sorted([item.index for item in current_set]),\n sum([item.size for item in current_set]), current_value)\n\ndef better_knapsack_solver(items, capacity):\n storage = {}\n knapsack = {}\n for weight in range(0, capacity+1):\n storage[0, weight] = 0\n knapsack[0, weight] = []\n\n for index in range(1, len(items)+1):\n for weight in range(0, capacity+1):\n if items[index-1].size > weight:\n storage[index, weight] = storage[index-1, weight]\n knapsack[index, weight] = knapsack[index-1, weight]\n else:\n contains_new = storage[index-1, weight-items[index-1].size] + items[index-1].value\n contains_no_new = storage[index-1, weight]\n \n if contains_new > contains_no_new:\n storage[index, weight] = contains_new\n knapsack[index, weight] = knapsack[index-1, weight-items[index-1].size] + [index]\n #not done yet..\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n capacity = int(sys.argv[2])\n file_location = sys.argv[1].strip()\n file_contents = open(file_location, 'r')\n items = []\n\n for line in file_contents.readlines():\n data = line.rstrip().split()\n items.append(Item(int(data[0]), int(data[1]), int(data[2])))\n \n file_contents.close()\n print(knapsack_solver(items, capacity))\n else:\n print('Usage: knapsack.py [filename] [capacity]')","sub_path":"knapsack.py","file_name":"knapsack.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"104935662","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 23 23:42:37 2020\n\n@author: amanda\n\"\"\"\n\nimport pandas as pd\nimport os\n\n\nif __name__ == \"__main__\":\n for y in range(102, 103):\n for m in range(1, 3):\n web_type_old = True\n if y > 101 and m > 2:\n web_type_old = False\n\n folder = \"./data/\" + str(y) + str(m)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n url = \"https://mops.twse.com.tw/nas/t21/sii/t21sc03_\" + \\\n str(y) + \"_\" + str(m) + \"_0.html\"\n\n if web_type_old:\n html_df = pd.read_html(url, encoding=\"Big5\")\n else:\n html_df = pd.read_html(url)\n #print(\"read as big 5\")\n\n # print(html_df)\n #c = input()\n gap = 1\n if web_type_old:\n gap = 1\n else:\n gap = 2\n if y == 102 and m == 1:\n for i in range(1, len(html_df), gap):\n if \"產業別\" in html_df[i].keys()[0][0]:\n file_name = html_df[i].keys()[0][0]\n # print(file_name)\n #file_name = file_name.split(\":\")[1]\n # print(file_name)\n keys = html_df[i + 1].keys()\n # print(keys)\n #c = input()\n col_name = []\n for j in range(0, len(keys)):\n col_name.append(keys[j][1])\n\n html_df[i + 1].columns = col_name\n html_df[i + 1].to_csv(os.path.join(folder, file_name +\n \".csv\"), encoding=\"utf-8-sig\", index=False)\n\n for i in range(1, len(html_df), gap):\n if \"產業別\" in html_df[i].keys()[0]:\n file_name = html_df[i].keys()[0].split(\" : \")[0]\n # print(html_df[i].keys()[0])\n keys = html_df[i + 1].keys()\n # print(keys)\n #c = input()\n col_name = []\n for j in range(0, len(keys)):\n col_name.append(keys[j][1])\n\n html_df[i + 1].columns = col_name\n html_df[i + 1].to_csv(os.path.join(folder, file_name +\n \".csv\"), encoding=\"utf-8-sig\", index=False)","sub_path":"HW2_1-7.py","file_name":"HW2_1-7.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"465395518","text":"# This is a Enrolment Management System for tutorial classes.\n#\n# Author: \n# Date: \n\n# -*- coding: utf-8 -*-\nimport os\n\nimport unittest\n\nBASE_DIR = os.path.dirname(__file__) + '/'\n\ndef read_lines(filename):\n\t'read a file fome local and return is a list of all lines in its original \\\n\torder,except for lines starting with a \"#\".'\n\twith open(filename) as f:\n\t\tlines = []\n\t\tfor line in f.readlines():\n\t\t\tline = line.strip()\n\t\t\tif not len(line) or line.startswith('#'):\n\t\t\t\tcontinue\n\t\t\tlines.append(line)\n\treturn lines\n\ndef read_table(filename):\n\t'read colon-delimited lines and returns a list of lists in the original \\\n\t order,each line being represented as a list of strings,split on colons'\n\twith open(filename) as f:\n\t\ttables = []\n\t\tfor line in f.readlines():\n\t\t\tline = line.strip()\n\t\t\tif not len(line) or line.startswith('#'):\n\t\t\t\tcontinue\n\t\t\tline_split = line.split(':')\n\t\t\ttables.append(line_split)\n\treturn tables\n\ndef write_lines(filename, lines):\n\t'writes a list of strings safely to the specified file.'\n\twith open(filename) as f1:\n\t\tcontent = f1.read()\n\twith open(filename,'w+') as f2:\t\n\t\tf2.truncate()\n\t\ttry:\n\t\t\tfor line in lines:\n\t\t\t\tf2.write(line)\n\t\t\t\tf2.write('\\n')\n\t\t\treturn 1\n\t\texcept:\n\t\t\tf2.write(content)\n\t\t\treturn 0\n\nclass Enrol:\n\t'''\n\tOperations to tutorial enrolment records.\n\t'''\n\tdef __init__(self, path):\n\t\t# support both absolute and relative path names.\n\t\tif os.path.isabs(path) == True:\n\t\t\tself.abs_path = path\n\t\telse:\n\t\t\tself.abs_path = BASE_DIR + path\n\n\t\t# read records from the specified directory.\n\t\tself.subjects = read_table(os.path.join(self.abs_path, 'SUBJECTS'))\n\t\tself.classes = read_table(os.path.join(self.abs_path, 'CLASSES'))\n\t\tself.venues = read_table(os.path.join(self.abs_path, 'VENUES'))\n\n\t\tself.class_student_list = {} # a dic,key represent class_code,value \\\n\t\t\t\t\t\t\t\t\t # represent a list of student in the class\n\t\tfor classname in self.classes:\n\t\t\tfilename = classname[0] + '.roll'\n\t\t\tself.class_student_list[classname[0]] = read_lines(os.path.join \\\n\t\t\t\t(self.abs_path, filename))\n\t\t\n\tdef _subjects_code(self):\n\t\t'Returns a list of all subjects codes in the enrolment system.'\n\t\tsubjects_code = []\n\t\tfor subject in self.subjects:\n\t\t\tsubjects_code.append(subject[0])\n\t\treturn subjects_code\n\n\tdef _subject_name(self, subject_code):\n\t\t'Returns a string which is the name of specified subject.'\n\t\tfor subject in self.subjects:\n\t\t\tif(subject_code==subject[0]):\n\t\t\t\treturn subject[1]\n\t\traise Exception('subject does not exsit ')\n\n\tdef _classes_code(self, subject_code):\n\t\t'Returns a list of class codes for the specified subject.'\n\t\tclasses_code = []\n\t\tfor line in self.classes:\n\t\t\tif subject_code == line[1]:\n\t\t\t\tclasses_code.append(line[0])\n\t\tif classes_code == []:\n\t\t\traise Exception('class does not exit')\n\t\treturn classes_code\n\n\tdef class_info(self, class_code):\n\t\t'Returns class information in a tuple of the form: \\\n\t\t(subject_code, time, venue, tutor, students).'\n\t\tclass_info = []\n\t\tfor line in self.classes:\n\t\t\tif line[0] == class_code:\n\t\t\t\tclass_info.append(line[1])\n\t\t\t\tclass_info.append(line[2])\n\t\t\t\tclass_info.append(line[3])\n\t\t\t\tclass_info.append(line[4])\n\n\t\tif class_info == []:\n\t\t\traise Exception('class_code dose not exit')\n\n\t\t# add list of student to class_info\n\t\tfilename = class_code + '.roll'\n\t\tstudents = read_lines(os.path.join(self.abs_path, filename))\n\t\tclass_info.append(students)\n\t\tclass_info = tuple(class_info) # change list to tuple\n\n\t\treturn class_info\n\t\t\n\n\tdef check_student(self, student_id, subject_code=''):\n\t\t'If subject_code is specified,returns the class_code which the student \\\n\t\t enrolled if no subject_code is specified,returns a list of class_code \\\n\t\t the student is enrolled in across all possible subjects'\n\t\tif subject_code == '':\n\t\t\tclasses_student_select = []\n\t\t\tfor key1 in self.class_student_list:\n\t\t\t\tif student_id in self.class_student_list[key1]:\n\t\t\t\t\tclasses_student_select.append(key1)\n\t\t\treturn classes_student_select\n\t\telse:\n\t\t\tfor key2 in self.class_student_list:\n\t\t\t\tif subject_code == key2.split('.')[0] and student_id in \\\n\t\t\t\t\tself.class_student_list[key2]:\n\t\t\t\t\treturn key2\n\t\t\treturn None\n\n\tdef enrol(self, student_id, class_code):\n\t\t'Enrol a student into a class,returns 1 if successful,None if not.'\n\t\tclass_info = self.class_info(class_code)\n\t\tclasses_code_student_choose = self.check_student(student_id, \\\n\t\t\tclass_code.split('.')[0])\n\n\t\tif classes_code_student_choose == None:\n\t\t\tclass_info[4].append(student_id)\n\t\t\twrite_lines(os.path.join(self.abs_path, class_code + '.roll'), \\\n\t\t\t\tclass_info[4])\n\t\t\treturn 1\n\n\t\t# check the number of students in the class\n\t\telif len(class_info[4]) >= self.venues[1]:\n\t\t\treturn None\n\n\t\telse:\n\t\t\tclass_info = self.class_info(classes_code_student_choose)\n\t\t\t\n\t\t\tclass_info[4].remove(student_id)\n\t\t\twrite_lines(os.path.join(self.abs_path, class_code + '.roll'), \\\n\t\t\t\tclass_info[4])\n\t\t\t\n\t\t\tclass_info[4].append(student_id)\n\t\t\twrite_lines(os.path.join(self.abs_path, class_code + '.roll'), \\\n\t\t\t\tset(class_info[4]))\n\t\t\treturn 1\n\n\nclass EnrolTest(unittest.TestCase):\n\t'''\n\ttest utility functions and methods in enrol module. \n\t'''\n\tdef setUp(self):\n\t\tself.enroltest = Enrol(\"../newdata\")\n\n\tdef tearDown(self):\n\t\tself.enroltest = None\n\n\tdef test_read_lines(self):\n\t\tself.assertEqual(read_lines(os.path.join(self.enroltest.abs_path, 'SUBJECTS')), \\\n\t\t\t['scr101:Intro to Scaring', 'scr102:History of Scaring', 'scr202:Disappearing & Hiding'])\n\n\tdef test_read_table(self):\n\t\tself.assertEqual(read_table(os.path.join(self.enroltest.abs_path, 'SUBJECTS')), \\\n\t\t\t[['scr101','Intro to Scaring'], ['scr102','History of Scaring'], ['scr202','Disappearing & Hiding']])\n\n\tdef test_write_lines(self):\n\t\ttest = read_lines(os.path.join(self.enroltest.abs_path, 'SUBJECTS'))\n\t\tself.assertEqual(write_lines(os.path.join(self.enroltest.abs_path, 'SUBJECTS'),test), 1)\n\n\tdef test_subjects_code(self):\n\t\tself.assertEqual(self.enroltest._subjects_code(), ['scr101', 'scr102', 'scr202'])\n\n\tdef test_subject_name(self):\n\t\tself.assertEqual(self.enroltest._subject_name('scr101'),'Intro to Scaring')\n\n\tdef test_classes(self):\n\t\tself.assertEqual(self.enroltest._classes_code('scr102'),['scr102.1','scr102.2'])\n\n\t# def test_class_info(self):\n\t# \tself.assertEqual(self.enroltest.class_info('scr101.1'), \\\n\t# \t\t('scr101', 'Mon 9.30', '2.5.10', 'Dr. Sullivan', \\ \n #\t\t\t['s1109202', 's1124355', 's1136607', 's1124395']))\n\n\tdef test_check_student(self):\n\t\tself.assertEqual(self.enroltest.check_student('s1124395'), ['scr101.1'])\n\t\tself.assertEqual(self.enroltest.check_student('s1124395', 'scr101'), 'scr101.1')\n\t\tself.assertEqual(self.enroltest.check_student('s1124395','scr111'), None)\n\t\tself.assertEqual(self.enroltest.check_student('s11243952'), [])\n\n\tdef test_enrol(self):\n\t\tself.assertEqual(self.enroltest.enrol('s1124333','scr101.1'),1)\n\t\tself.assertEqual(self.enroltest.enrol('s1122333','scr101.1'),1)\n\n\n\nif __name__ == '__main__':\n\t\n\t# e = Enrol(\"../newdata\")\n\n\t# print e._subjects_code()\n\t# print e._subject_name('scr102')\n\t# print e._classes_code('scr102')\n\t# print e.class_info('scr101.1')\n\t# print e.check_student('s1124395')\n\t# print e.check_student('s1124395','scr101')\n\t# print e.enrol('s1124395','scr101.1')\n\t# print e.enrol('s1124355','scr101.1')\n\n\tunittest.main()\n","sub_path":"EMS/code/enrol.py","file_name":"enrol.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167541488","text":"import dash\nfrom dash.dependencies import State, Input, Output\nimport plotly.express as px\nimport numpy as np\nimport pandas as pd\n\nfrom datetime import datetime as dt\nfrom datetime import timezone\nimport re\n\n\n#df = read_news()\n#df = df.iloc[0:10]\n\ndef pp(start, end, n):\n start_u = start.value//10**9\n end_u = end.value//10**9\n\n return pd.DatetimeIndex((10**9*np.random.randint(start_u, end_u, n, dtype=np.int64)).view('M8[ns]'))\n\nsamples = 10000\ndates = pp(pd.to_datetime('2015-01-01'),pd.to_datetime('2019-02-01'),samples)\ncoords = np.random.rand(samples, 2) * 10\nsize = np.random.rand(samples, 1) * 480\ncolor = np.random.randint(low=1, high = 5, size=samples)\n\nd = {'dates': dates , 'x': coords[:,0],'y': coords[:,1], 'frequencies': size.flatten(), 'color': color.flatten().astype(str)}\ndf_test = pd.DataFrame(data=d)\n\n#start_date = dt(2018,8,1)\n#end_date = dt(2019,1,20)\n\ndef register_callbacks(app):\n\t@app.callback(\n\t Output(\"cluster-graph\", \"figure\"),\n\t [\n\t dash.dependencies.Input('my-date-picker-range', 'start_date'),\n\t dash.dependencies.Input('my-date-picker-range', 'end_date'),\n\t Input(\"clase_value\", \"value\")\n\t ],\n\t)\n\tdef make_graph(date_start, date_end, class_value):\n\t toplot = df_test[(df_test['dates'] > date_start) & (df_test['dates'] <= date_end)].copy()\n\t toplot['frequencies'] = (toplot['frequencies']/(toplot['frequencies'].max()-toplot['frequencies'].min()))*10\n\n\t fig = px.scatter(toplot, x=\"x\", y=\"y\",\n\t size=\"frequencies\", color=\"color\",\n\t hover_name=\"color\")\n\n\t fig.update_traces(marker=dict(line=dict(width=2,\n\t color='DarkSlateGrey')),\n\t selector=dict(mode='markers'))\n\t return fig","sub_path":"my_app/semantic_callbacks.py","file_name":"semantic_callbacks.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9662883","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom edward.models import Categorical, Normal\nimport edward as ed\nimport pandas as pd\n\n# Use the TensorFlow method to download and/or load the data.\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\ned.set_seed(314159)\nN = 100 # number of images in a mini-batch.\nD = 784 # number of features.\nK = 10 # number of classes.\n# Create a placeholder to hold the data (in mini-batches) in a TensorFlow graph.\nx = tf.placeholder(tf.float32, [None, D])\n# Normal(0,1) priors for the variables. Note that the syntax assumes TensorFlow 1.1.\n# place a normal Gaussian prior on the weights and biases\nw = Normal(loc=tf.zeros([D, K]), scale=tf.ones([D, K]))\nb = Normal(loc=tf.zeros(K), scale=tf.ones(K))\n# Categorical likelihood for classification\ny = Categorical(tf.matmul(x,w)+b)\n\n#VARIATIONAL INFERENCE\n# Construct the q(w) and q(b). in this case we assume Normal distributions.\nqw = Normal(loc=tf.Variable(tf.random_normal([D, K])),\n scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, K]))))\nqb = Normal(loc=tf.Variable(tf.random_normal([K])),\n scale=tf.nn.softplus(tf.Variable(tf.random_normal([K]))))\n\n# We use a placeholder for the labels in anticipation of the training data.\ny_ph = tf.placeholder(tf.int32, [N])\n# Define the VI inference technique, ie. minimise the KL divergence between q and p.\ninference = ed.KLqp({w: qw, b: qb}, data={y:y_ph})#make the inference model\n\n# Initialise the inference variables\ninference.initialize(n_iter=5000, n_print=100, scale={y: float(mnist.train.num_examples) / N})\n\n# We will use an interactive session.\nsess = tf.InteractiveSession()\n# Initialise all the variables in the session.\ntf.global_variables_initializer().run()\n\n# Let the training begin. We load the data in mini-batches and update the VI inference using each new batch.\nfor _ in range(inference.n_iter):\n X_batch, Y_batch = mnist.train.next_batch(N)\n # TensorFlow method gives the label data in a one hot vector format. We convert that into a single label.\n Y_batch = np.argmax(Y_batch,axis=1)\n info_dict = inference.update(feed_dict={x: X_batch, y_ph: Y_batch})\n inference.print_progress(info_dict)\n\n#EVALUATING OUR MODEL\n# Load the test images.\nX_test = mnist.test.images\n# TensorFlow method gives the label data in a one hot vetor format. We convert that into a single label.\nY_test = np.argmax(mnist.test.labels,axis=1)\n\n# Generate samples the posterior and store them.\nn_samples = 100\nprob_lst = []\nsamples = []\nw_samples = []\nb_samples = []\nfor _ in range(n_samples):\n w_samp = qw.sample()\n b_samp = qb.sample()\n w_samples.append(w_samp)\n b_samples.append(b_samp)\n # Also compute the probability of each class for each (w,b) sample.\n prob = tf.nn.softmax(tf.matmul( X_test,w_samp ) + b_samp)\n prob_lst.append(prob.eval())\n sample = tf.concat([tf.reshape(w_samp,[-1]),b_samp],0)\n samples.append(sample.eval())\n\n# Compute the accuracy of the model.\n# For each sample we compute the predicted class and compare with the test labels.\n# Predicted class is defined as the one which as maximum probability.\n# We perform this test for each (w,b) in the posterior giving us a set of accuracies\n# Finally we make a histogram of accuracies for the test data.\naccy_test = []\nfor prob in prob_lst:\n y_trn_prd = np.argmax(prob,axis=1).astype(np.float32)\n acc = (y_trn_prd == Y_test).mean()*100\n accy_test.append(acc)\n\nplt.hist(accy_test)\nplt.title(\"Histogram of prediction accuracies in the MNIST test data\")\nplt.xlabel(\"Accuracy\")\nplt.ylabel(\"Frequency\")\n\n# Here we compute the mean of probabilities for each class for all the (w,b) samples.\n# We then use the class with maximum of the mean probabilities as the prediction.\n# In other words, we have used (w,b) samples to construct a set of models and\n# used their combined outputs to make the predictions.\nY_pred = np.argmax(np.mean(prob_lst,axis=0),axis=1)\nprint(\"accuracy in predicting the test data = \", (Y_pred == Y_test).mean()*100)\n\n# Create a Pandas DataFrame of posterior samples.\nsamples_df = pd.DataFrame(data = samples, index=range(n_samples))\n# Now create a small subset by taking the first 5 weights, labelled as W_0, ... , W_4.\nsamples_5 = pd.DataFrame(data = samples_df[list(range(5))].values,columns=[\"W_0\", \"W_1\", \"W_2\", \"W_3\", \"W_4\"])\n# We use Seaborn PairGrid to make a triangle plot to show auto and cross correlations.\ng = sns.PairGrid(samples_5, diag_sharey=False)\ng.map_lower(sns.kdeplot, n_levels = 4,cmap=\"Blues_d\")\ng.map_upper(plt.scatter)\ng.map_diag(sns.kdeplot,legend=False)\nplt.subplots_adjust(top=0.95)\ng.fig.suptitle('Joint posterior distribution of the first 5 weights')\n\n# Load the first image from the test data and its label.\ntest_image = X_test[0:1]\ntest_label = Y_test[0]\nprint('truth = ',test_label)\npixels = test_image.reshape((28, 28))\nplt.imshow(pixels,cmap='Blues')\n\n# Now the check what the model predicts for each (w,b) sample from the posterior. This may take a few seconds...\nsing_img_probs = []\nfor w_samp,b_samp in zip(w_samples,b_samples):\n prob = tf.nn.softmax(tf.matmul( X_test[0:1],w_samp ) + b_samp)\n sing_img_probs.append(prob.eval())\n\n# Create a histogram of these predictions.\nplt.hist(np.argmax(sing_img_probs,axis=2),bins=range(10))\nplt.xticks(np.arange(0,10))\nplt.xlim(0,10)\nplt.xlabel(\"Accuracy of the prediction of the test digit\")\nplt.ylabel(\"Frequency\")\n\n\n#HOW DOES THE MODEL HANDLE UNFAMILIAR DATA?\n# As the nonMNIST data is in the same format as MNIST, we can use the TensorFlow functions.\n# Please make sure that notMNIST_data directory is in the same directory as this notebook.\n# Otherwise, please provide the full path.\n\n### Note that if you haven't executed the bash commands above, TensorFlow method\n### WILL download the MNIST data into notMNIST data below and you will get the\n### digit 7 as the first number!\nnot_mnist = input_data.read_data_sets(\"./notMNIST_data/\", one_hot=True)\n\n# Load the test images from the data and their lables.\nXnm_test = not_mnist.test.images\n# Once again convert one-hot-vector to the corresponding labels.\nYnm_test = np.argmax(not_mnist.test.labels,axis=1)\n\n# Load the first image from the notMNIST.\ntest_image = Xnm_test[0]\ntest_label = Ynm_test[0]\nprint('truth = ',test_label)\npixels = test_image.reshape((28, 28))\nplt.imshow(pixels,cmap='Blues')\n\n# As before, compute the probabilities for each class for all (w,b) samples from the posterior.\nnm_sing_img_probs = []\nfor w_samp,b_samp in zip(w_samples,b_samples):\n prob = tf.nn.softmax(tf.matmul( Xnm_test[0:1],w_samp ) + b_samp)\n nm_sing_img_probs.append(prob.eval())\n\n# Now compute the histogram of predictions from the (w,b) samples.\n# In our previous test, all the weights from the posterior was able get the correct prediction.\n# However, here we see that the model gives a wide range of possibilities.\n# Hence we conclude that its confidence is lower.\nplt.hist(np.argmax(nm_sing_img_probs,axis=2),bins=range(10))\nplt.xticks(np.arange(0,10))\nplt.xlim(0,10)\nplt.xlabel(\"Accuracy of the prediction of the test letter\")\nplt.ylabel(\"Frequency\")\n","sub_path":"bayesian_way.py","file_name":"bayesian_way.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50848380","text":"from operator import attrgetter\n\nimport cv2\n\nfrom parser.index_region import IndexRegion\nfrom parser.text_extractor import extract_plain_text\nfrom parser.utils import temporary_file_name, cleanup\n\n\ndef open_image(image_path):\n return cv2.imread(image_path)\n\n\ndef save_image(file_name, img_mat):\n cv2.imwrite(file_name, img_mat)\n\n\ndef crop(image, x, y, x1, y1):\n return image[y:y1, x:x1]\n\n\ndef swap_colors(img_mat):\n rows, cols = img_mat.shape\n for i in range(rows):\n for j in range(cols):\n img_mat[i, j] = 0 if img_mat[i, j] == 255 else 255\n return img_mat\n\n\ndef convert_color(image, code):\n return cv2.cvtColor(image, code)\n\n\ndef convert_to_black_and_white(image):\n (thresh, im_bw) = cv2.threshold(image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n return im_bw\n\n\ndef sort_regions_by_y_axis(regions):\n return sorted(regions, key=attrgetter('y'))\n\n\ndef crop_regions(image, regions):\n cropped_regions = []\n height, width, mode = image.shape\n for index, index_region in enumerate(regions):\n if index != len(regions) - 1:\n next_region = regions[index + 1]\n else:\n next_region = IndexRegion(\"\", index_region.x, height)\n line_item = crop(image, index_region.x - 5, index_region.y - 10, width, next_region.y)\n cropped_regions.append(line_item)\n return cropped_regions\n\n\ndef crop_line_regions(line_item_region, vertical_lines):\n cropped_regions = []\n width, height, _ = line_item_region.shape\n for index, line in enumerate(vertical_lines):\n x = vertical_lines[index - 1].x1 if index > 0 else 0\n region = crop(line_item_region, x, 0, line.x1, height)\n cropped_regions.append(region)\n return cropped_regions\n\n\ndef extract_text(region):\n file_path = '%s.jpg' % temporary_file_name(prefix=\"tess\")\n save_image(file_path, region)\n text = extract_plain_text(file_path)\n cleanup(file_path)\n return text.strip()\n\n\ndef remove_images_without_text(file_paths):\n text_image_file_paths = []\n for file_path in file_paths:\n image = open_image(file_path)\n text_regions = detect_contours(image)\n if len(text_regions) > 100:\n text_image_file_paths.append(file_path)\n else:\n cleanup(file_path)\n return text_image_file_paths\n\n\ndef detect_contours(image):\n imgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n ret, thresh = cv2.threshold(imgray, 127, 255, 0)\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n return contours\n\n\ndef detect_mser_regions(image):\n mser = cv2.MSER_create()\n regions = mser.detectRegions(image)\n return regions\n\n\ndef detect_mser_regions(image):\n mser = cv2.MSER_create()\n regions = mser.detectRegions(image)\n return regions\n","sub_path":"parser/opencv_utils.py","file_name":"opencv_utils.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320961943","text":"file = pd.read_table('test_GE_Dist_Miara.tab')\nhead = file.head()\nvalues = file.values\n\ngenes = file.index #Liste de tous les gènes\nY = values[:,0] # => première colonne 0\nX = values[:,0:2] # => 2 premières colonnes 1 et 2\ntest1 = file.index[values[:,0] > 10] # => renvoie la liste des gènes dont\n#... la première colonne est supérieur\ngenes1 = values[0,:] #RENVOIE la première ligne\ngenes2 = values[1,:] # 2 em ligne\n#Mettre 0 à la diagonale : Distance.loc['a']['b']\n\n#Technique pour Créer la Matrice :\na = distance_all1(genes1)\nb = distance_all1(genes2)\nc = distance_all1(genes3)\nListe_Distance = []\nListe_Distance.append(a)\nListe_Distance.append(b)\nListe_Distance.append(c)\n\nMatrice_Distance = pd.DataFrame(Liste_Distance)\n#----------------------------------------------------------------------\n#Autre Méthode pour créer une MAtrice de distance :\nfrom scipy.spatial import distance_matrix\nAB = distance_matrix([[5.86],[10.49] , [8.59]], [[5.38],[3.6],[6.7]])\n\n#Plus simple :\n#Pour Heure 1\nA1 = values[:,0]\nA1 = pd.DataFrame(A1)\nMATRIX = distance_matrix(A1,A1)\n\n\n\n#Sélection des 10 gènes les plus proches\n#exemple pour le gène 1 :\n#Sort les résultats\nM1 = Matrice_Distance['PF3D7_0100100']\nM1 = M1.sort_values()\ngene1_10 = M1[1:11]\n\n\nM2 = Matrice_Distance[file.index[1]].sort_values()[1:11]\nM3 = Matrice_Distance[file.index[2]].sort_values()[1:11].index # = gene1_100\n\ndico1 = {}\ndico[file.index[0]] = gene1_10.index\n\n#MULTIPROCESSING\n\nfrom multiprocessing import Pool\ndef f(x):\n return x*x\n\nwith Pool(5) as p :\n print(p.map(f,[1,2,3]))\n\n\nmultiprocessing.cpu_count()\n","sub_path":"cheet.py","file_name":"cheet.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69462182","text":"\"\"\"\nusage: 048-047-assign01-3.py [-h] [-v] [-n NPROCESS] [-o ORDER]\n\noptional arguments:\n -h, --help show this help message and exit\n -v, --verbose Verbose mode\n -n NPROCESS, --nprocess NPROCESS\n Number of processes n where n>=2\n -o ORDER, --order ORDER\n 0 for ascending order, 1 for descending order. Without\n any argument ascending order is used\n\n\"\"\"\nfrom multiprocessing import Process, Pipe, Array\nimport random\nimport time\nimport sys\nimport argparse\n\n\"\"\"\nThree message types are defined:\n\n1. CMP_REQUEST >> Swap request from P_(i) to P_(i+1) or P_(i) to P_(i-1)\n\n2. RESPONSE >> Response message containing minimum/maximum of data at P_(i) and\n P_(i+1) for corresponding swap request\n\n3. PR_CNT_MSG >> Flag for marking the message for purpose of finding number of\n processes in line network\n\"\"\"\n\nPACKET_TYPE = {\n \"CMP_REQUEST\" : 0,\n \"RESPONSE\" : 1,\n \"PR_CNT_MSG\" : 2\n}\n\nclass Packet(object):\n \"\"\"\n Class for encapsulating the message and it's properties\n \"\"\"\n\n def __init__(self, data, ptype):\n\n self.data = data\n self.type = PACKET_TYPE[ptype]\n\nclass ProcessNode(Process):\n \"\"\"\n Process Nodes\n \"\"\"\n\n def __init__(self, lconn=None, rconn=None, shared_array=None, order='asc', data=None):\n \"\"\"\n Initializes the process object with data and connections\n to left and right processes\n \"\"\"\n\n Process.__init__(self)\n if not data:\n self.data = int(random.uniform(-100.0,500.0))\n else:\n self.data = data\n self.lconn = lconn\n self.rconn = rconn\n self.vid = -1\n self.order = order\n\n if shared_array:\n self.shared_array = shared_array\n\n def compare(self, l, r):\n\n if self.order == 'asc':\n return l >= r\n else:\n return l < r\n\n def op_minmax(self, l, r):\n\n if self.order == 'asc':\n return min(l, r)\n else:\n return max(l, r)\n\n def recieve(self, sender_node):\n \"\"\"\n Recieve packet\n \"\"\"\n\n if sender_node == \"left\":\n msg = self.lconn.recv()\n\n if msg.type == PACKET_TYPE[\"CMP_REQUEST\"]:\n self.lconn.send(Packet(self.op_minmax(self.data, msg.data), \"RESPONSE\"))\n if self.compare(msg.data, self.data):\n self.data = msg.data\n\n elif msg.type == PACKET_TYPE[\"PR_CNT_MSG\"]:\n if self.rconn != None:\n self.vid = msg.data + 1\n self.rconn.send(Packet(self.vid, \"PR_CNT_MSG\"))\n else:\n self.vid = msg.data + 1\n self.n_processes = self.vid\n self.lconn.send(Packet(self.n_processes, \"PR_CNT_MSG\"))\n\n elif msg.type == PACKET_TYPE[\"RESPONSE\"]:\n self.data = msg.data\n\n elif sender_node == \"right\":\n msg = self.rconn.recv()\n\n if msg.type == PACKET_TYPE[\"RESPONSE\"]:\n self.data = msg.data\n\n elif msg.type == PACKET_TYPE[\"PR_CNT_MSG\"]:\n self.n_processes = msg.data\n if self.lconn != None:\n self.lconn.send(msg)\n\n elif msg.type == PACKET_TYPE[\"CMP_REQUEST\"]:\n self.rconn.send(Packet(-self.op_minmax(-self.data, -msg.data), \"RESPONSE\"))\n if self.compare(self.data, msg.data):\n self.data = msg.data\n\n def send(self, reciever_node):\n \"\"\"\n Send packet\n \"\"\"\n\n if reciever_node == \"right\":\n if self.rconn != None:\n self.rconn.send(Packet(self.data, \"CMP_REQUEST\"))\n self.recieve(\"right\")\n\n else:\n if self.lconn != None:\n self.lconn.send(Packet(self.data, \"CMP_REQUEST\"))\n self.recieve(\"left\")\n\n def count_processes(self):\n \"\"\"\n Initiate count processes subroutine\n If process id and number of processes are not determined yet,\n then subroutine will be called.\n \"\"\"\n\n if self.lconn == None:\n self.vid = 1\n self.rconn.send(Packet(self.vid, \"PR_CNT_MSG\"))\n self.recieve(\"right\")\n\n else:\n self.recieve(\"left\")\n if self.rconn != None:\n self.recieve(\"right\")\n\n def run(self):\n\n # Count number of processes and relative position of each process\n # in line network\n self.count_processes()\n\n # print(\"process id {}, process data {}\".format(self.vid, self.data))\n # Update process state in shared array\n if hasattr(self, 'shared_array'):\n self.shared_array[0*self.n_processes + (self.vid-1)] = self.data\n\n for i in range(1, self.n_processes):\n # Check if process should generate compare request message\n # in i_th round\n if ((self.vid - i)%2) == 0:\n self.send(\"left\")\n self.send(\"right\")\n else:\n if self.lconn != None:\n self.recieve(\"left\")\n if self.rconn != None:\n self.recieve(\"right\")\n\n # Update process state\n if hasattr(self, 'shared_array'):\n self.shared_array[i*self.n_processes + (self.vid-1)] = self.data\n\n # print(\"Sorted process id {}, process data {}\".format(self.vid, self.data))\n\ndef main(verbose=False, n_processes=None, given_array=None, order='asc'):\n \"\"\"\n Create environment for simulating the distributed n-2 rounds alternative OET\n sorting. Each Process node has it's own resource. Each of the non-terminal\n node is connected to two adjacent processes with a synchronized duplex Pipe\n connection.\n \"\"\"\n\n num_process = 0\n process_list = []\n\n if not given_array:\n\n num_process = int(random.uniform(2.0, 20.0))\n if n_processes:\n num_process = n_processes\n\n connection_list = [Pipe() for i in range(num_process-1)]\n\n shared_array = None\n if verbose:\n shared_array = Array('i', (num_process+1)*num_process)\n\n process_list = [ProcessNode(\n None,\n connection_list[0][0],\n shared_array=shared_array,\n order=order)]\n for i in range(num_process-2):\n p = ProcessNode(\n connection_list[i][1],\n connection_list[i+1][0],\n shared_array=shared_array,\n order=order)\n process_list.append(p)\n process_list.append(\n ProcessNode(\n connection_list[num_process-2][1],\n shared_array=shared_array,\n order=order))\n\n else:\n\n num_process = len(given_array)\n\n connection_list = [Pipe() for i in range(num_process-1)]\n\n shared_array = None\n if verbose:\n shared_array = Array('i', (num_process+1)*num_process)\n\n process_list = [ProcessNode(\n None,\n connection_list[0][0],\n shared_array=shared_array,\n data=given_array[0],\n order=order)]\n\n for i in range(num_process-2):\n p = ProcessNode(\n connection_list[i][1],\n connection_list[i+1][0],\n shared_array=shared_array,\n data=given_array[i],\n order=order)\n process_list.append(p)\n\n process_list.append(\n ProcessNode(\n connection_list[num_process-2][1],\n shared_array=shared_array,\n data=given_array[-1],\n order=order))\n\n # Start time\n start = time.clock()\n\n for p in process_list:\n p.start()\n\n for p in process_list:\n p.join()\n\n # Elapsed time\n time_elapsed = time.clock() - start\n\n if verbose:\n print(\"number of process {}\".format(num_process))\n\n print_factor = 1\n if num_process <= 10 and num_process>=5:\n print_factor = 2\n else:\n print_factor = 5\n\n for i in range(0, num_process):\n if i==0:\n print(\"Initial : \", end=\" \")\n for j in range(0, num_process):\n print(\"P{}({})\".format(j+1, shared_array[i*num_process+j]), end=\" \")\n print(\"\\n\")\n elif (i==(num_process-1)) or (i%print_factor==0):\n print(\"Round {} : \".format(i), end=\" \")\n for j in range(0, num_process):\n print(\"P{}({})\".format(j+1, shared_array[i*num_process+j]), end=\" \")\n print(\"\\n\")\n\n return time_elapsed\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose mode\", action=\"store_true\")\n parser.add_argument(\"-n\", \"--nprocess\", help=\"Number of processes n where n>=2\", action=\"store\", type=int)\n parser.add_argument(\"-o\", \"--order\", help=\"0 for ascending order, 1 for descending order. Without any argument ascending order is used\", action=\"store\", type=int)\n args = parser.parse_args()\n\n order = 'asc'\n if args.order:\n if args.order == 1:\n order = 'dsc'\n\n if args.nprocess:\n if args.nprocess >= 2:\n main(verbose=args.verbose, n_processes = args.nprocess, order=order)\n else:\n print(\"Invalid n\")\n else:\n main(verbose=args.verbose, order=order)\n","sub_path":"assignment1/048-047-assign01-3.py","file_name":"048-047-assign01-3.py","file_ext":"py","file_size_in_byte":9525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433278754","text":"import math\n\nfrom river.stats import Var\nfrom river.drift.adwin import ADWIN\nfrom river.utils.skmultiflow_utils import check_random_state\n\nfrom .base import FoundNode\nfrom .base import SplitNode\nfrom .hatc_nodes import AdaNode\nfrom .htr_nodes import LearningNodeAdaptive\n\n\nclass AdaLearningNodeRegressor(LearningNodeAdaptive, AdaNode):\n \"\"\"Learning Node of the Hoeffding Adaptive Tree regressor.\n\n Parameters\n ----------\n stats\n Initial class observations.\n depth\n The depth of the learning node in the tree.\n attr_obs\n The numeric attribute observer algorithm used to monitor target statistics\n and perform split attempts.\n attr_obs_params\n The parameters passed to the numeric attribute observer algorithm.\n adwin_delta\n The delta parameter of ADWIN.\n seed\n Seed to control the generation of random numbers and support reproducibility.\n \"\"\"\n\n def __init__(self, stats, depth, attr_obs, attr_obs_params, leaf_model, adwin_delta, seed):\n super().__init__(stats, depth, attr_obs, attr_obs_params, leaf_model)\n\n self.adwin_delta = adwin_delta\n self._adwin = ADWIN(delta=self.adwin_delta)\n self.error_change = False\n self._rng = check_random_state(seed)\n\n # Normalization of info monitored by drift detectors (using Welford's algorithm)\n self._error_normalizer = Var(ddof=1)\n\n @property\n def n_leaves(self):\n return 1\n\n @property\n def error_estimation(self):\n return self._adwin.estimation\n\n @property\n def error_width(self):\n return self._adwin.width\n\n def error_is_null(self):\n return self._adwin is None\n\n def kill_tree_children(self, hatr):\n pass\n\n def learn_one(self, x, y, *, sample_weight=1., tree=None, parent=None, parent_branch=-1):\n y_pred = self.leaf_prediction(x, tree=tree)\n normalized_error = normalize_error(y, y_pred, self)\n\n if tree.bootstrap_sampling:\n # Perform bootstrap-sampling\n k = self._rng.poisson(1.0)\n if k > 0:\n sample_weight = sample_weight * k\n\n if self._adwin is None:\n self._adwin = ADWIN(delta=self.adwin_delta)\n\n old_error = self.error_estimation\n\n # Update ADWIN\n self._error_change, _ = self._adwin.update(normalized_error)\n\n # Error is decreasing\n if self._error_change and old_error > self.error_estimation:\n self._error_change = False\n\n # Update learning model\n super().learn_one(x, y, sample_weight=sample_weight, tree=tree)\n\n weight_seen = self.total_weight\n\n if weight_seen - self.last_split_attempt_at >= tree.grace_period:\n if self.depth >= tree.max_depth:\n # Depth-based pre-pruning\n self.deactivate()\n tree._n_inactive_leaves += 1\n tree._n_active_leaves -= 1\n else:\n tree._attempt_to_split(self, parent, parent_branch)\n self.last_split_attempt_at = weight_seen\n\n def leaf_prediction(self, x, *, tree=None):\n prediction_option = tree.leaf_prediction\n if prediction_option == tree._TARGET_MEAN:\n return self.stats.mean.get()\n elif prediction_option == tree._MODEL:\n return self._leaf_model.predict_one(x)\n else: # adaptive node\n return super().leaf_prediction(x, tree=tree)\n\n # Override AdaNode: enable option vote (query potentially more than one leaf for responses)\n def filter_instance_to_leaves(self, x, parent, parent_branch, found_nodes):\n found_nodes.append(FoundNode(self, parent, parent_branch))\n\n\nclass AdaSplitNodeRegressor(SplitNode, AdaNode):\n \"\"\"Node that splits the data in a Hoeffding Adaptive Tree Regression.\n\n Parameters\n ----------\n split_test\n Split test.\n stats\n Target stats.\n depth\n The depth of the node.\n adwin_delta\n The delta parameter of ADWIN.\n seed\n Internal random state used to sample from poisson distributions.\n \"\"\"\n def __init__(self, split_test, stats, depth, adwin_delta, seed):\n stats = stats if stats else Var()\n super().__init__(split_test, stats, depth)\n self.adwin_delta = adwin_delta\n self._adwin = ADWIN(delta=self.adwin_delta)\n self._alternate_tree = None\n self._error_change = False\n\n self._rng = check_random_state(seed)\n\n # Normalization of info monitored by drift detectors (using Welford's algorithm)\n self._error_normalizer = Var(ddof=1)\n\n @property\n def n_leaves(self):\n num_of_leaves = 0\n for child in self._children.values():\n if child is not None:\n num_of_leaves += child.n_leaves\n\n return num_of_leaves\n\n @property\n def error_estimation(self):\n return self._adwin.estimation\n\n @property\n def error_width(self):\n w = 0.0\n if not self.error_is_null():\n w = self._adwin.width\n\n return w\n\n def error_is_null(self):\n return self._adwin is None\n\n # Override AdaSplitNodeClassifier\n def learn_one(self, x, y, sample_weight, tree, parent, parent_branch):\n normalized_error = 0.0\n\n leaf = self.filter_instance_to_leaf(x, parent, parent_branch).node\n if leaf is not None:\n y_pred = leaf.leaf_prediction(x, tree=tree)\n else:\n y_pred = parent.leaf_prediction(x, tree=tree)\n\n normalized_error = normalize_error(y, y_pred, self)\n\n # Update stats as traverse the tree to improve predictions (in case split nodes are used\n # to provide responses)\n self.stats.update(y, sample_weight)\n\n if self._adwin is None:\n self._adwin = ADWIN(self.adwin_delta)\n\n old_error = self.error_estimation\n\n # Update ADWIN\n self._error_change, _ = self._adwin.update(normalized_error)\n\n if self._error_change and old_error > self.error_estimation:\n self._error_change = False\n\n # Condition to build a new alternate tree\n if self._error_change:\n self._alternate_tree = tree._new_learning_node(parent=self)\n self._alternate_tree.depth -= 1 # To ensure we do not skip a tree level\n tree._n_alternate_trees += 1\n\n # Condition to replace alternate tree\n elif self._alternate_tree is not None and not self._alternate_tree.error_is_null():\n if self.error_width > tree.drift_window_threshold \\\n and self._alternate_tree.error_width > tree.drift_window_threshold:\n old_error_rate = self.error_estimation\n alt_error_rate = self._alternate_tree.error_estimation\n f_delta = .05\n f_n = 1.0 / self._alternate_tree.error_width + 1.0 / self.error_width\n\n try:\n bound = math.sqrt(\n 2.0 * old_error_rate * (1.0 - old_error_rate)\n * math.log(2.0 / f_delta) * f_n\n )\n except ValueError: # error rate exceeds 1, so we clip it\n bound = 0.\n if bound < (old_error_rate - alt_error_rate):\n tree._n_active_leaves -= self.n_leaves\n tree._n_active_leaves += self._alternate_tree.n_leaves\n self.kill_tree_children(tree)\n\n if parent is not None:\n parent.set_child(parent_branch, self._alternate_tree)\n self._alternate_tree = None\n else:\n # Switch tree root\n tree._tree_root = tree._tree_root._alternate_tree\n tree._n_switch_alternate_trees += 1\n elif bound < alt_error_rate - old_error_rate:\n if isinstance(self._alternate_tree, SplitNode):\n self._alternate_tree.kill_tree_children(tree)\n self._alternate_tree = None\n tree._n_pruned_alternate_trees += 1\n\n # Learn one sample in alternate tree and child nodes\n if self._alternate_tree is not None:\n self._alternate_tree.learn_one(x, y, sample_weight=sample_weight, tree=tree,\n parent=parent, parent_branch=parent_branch)\n child_branch = self.instance_child_index(x)\n child = self.get_child(child_branch)\n if child is not None:\n child.learn_one(x, y, sample_weight=sample_weight, tree=tree, parent=self,\n parent_branch=child_branch)\n # Instance contains a categorical value previously unseen by the split node\n elif self.split_test.branch_for_instance(x) < 0:\n # Creates a new learning node to encompass the new observed feature\n # value\n leaf_node = tree._new_learning_node(parent=self)\n branch_id = self.split_test.add_new_branch(\n x[self.split_test.attrs_test_depends_on()[0]])\n self.set_child(branch_id, leaf_node)\n tree._n_active_leaves += 1\n leaf_node.learn_one(x, y, sample_weight=sample_weight, tree=tree, parent=parent,\n parent_branch=parent_branch)\n\n def leaf_prediction(self, x, *, tree=None):\n # Called in case an emerging categorical feature has no path down the split node to be\n # sorted\n return self.stats.mean.get()\n\n # Override AdaNode\n def kill_tree_children(self, tree):\n for child_id, child in self._children.items():\n if child is not None:\n # Delete alternate tree if it exists\n if not child.is_leaf():\n if child._alternate_tree is not None:\n child._alternate_tree.kill_tree_children(tree)\n tree._n_pruned_alternate_trees += 1\n child._alternate_tree = None\n\n # Recursive delete of SplitNodes\n child.kill_tree_children(tree)\n tree._n_decision_nodes -= 1\n else:\n if child.is_active():\n tree._n_active_leaves -= 1\n else:\n tree._n_inactive_leaves -= 1\n\n self._children[child_id] = None\n\n # override AdaNode\n def filter_instance_to_leaves(self, x, parent, parent_branch, found_nodes):\n child_index = self.instance_child_index(x)\n if child_index >= 0:\n child = self.get_child(child_index)\n if child is not None:\n child.filter_instance_to_leaves(x, parent, parent_branch, found_nodes)\n else:\n found_nodes.append(FoundNode(None, self, child_index))\n if self._alternate_tree is not None:\n self._alternate_tree.filter_instance_to_leaves(x, self, -999, found_nodes)\n\n\ndef normalize_error(y_true, y_pred, node):\n drift_input = y_true - y_pred\n node._error_normalizer.update(drift_input)\n\n if node._error_normalizer.mean.n == 1:\n return 0.5 # The expected error is the normalized mean error\n\n sd = math.sqrt(node._error_normalizer.sigma)\n\n # We assume the error follows a normal distribution -> (empirical rule) 99.73% of the values\n # lie between [mean - 3*sd, mean + 3*sd]. We assume this range for the normalized data.\n # Hence, we can apply the min-max norm to cope with ADWIN's requirements\n return (drift_input + 3 * sd) / (6 * sd) if sd > 0 else 0.5\n","sub_path":"river/tree/_nodes/hatr_nodes.py","file_name":"hatr_nodes.py","file_ext":"py","file_size_in_byte":11611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221546099","text":"from django.conf.urls import url\n\n\nfrom office.views import OfficeDetailView, \\\n OfficeUserView, OfficeBudgetUpdateView\n\napp_name = 'office'\n\nfrom office.views import OfficeCreateView, OfficeListView, OfficeUpdateView, OfficeDeleteView,\\\n OfficeAddOfficeHeadView, OfficeAddInfoofficerView, Monthly, OfficeDashboard, DistrictDashboard, ProfileDetailView, ProfileUpdateView, OfficeKaryakramList,\\\n OfficeViewDataDetail, OfficePragati\n\n\nurlpatterns = [\n\n url(r'^office/create/$', OfficeCreateView.as_view(), name='office-add'),\n url(r'^office/dashboard/(?P[0-9]+)/$', OfficeDashboard.as_view(), name='office-dashboard'),\n url(r'^office/pragati/(?P[0-9]+)/$', OfficePragati.as_view(), name='office-pragati'),\n url(r'^office/detail/(?P[0-9]+)/$', OfficeDetailView.as_view(), name='office-detail'),\n url(r'^office/view-data-detail/(?P[0-9]+)/$', OfficeViewDataDetail.as_view(), name='view-data-detail'),\n url(r'^office/users/(?P[0-9]+)/$', OfficeUserView.as_view(), name='office-users'),\n\n url(r'^district/dashbaord/(?P[0-9]+)/$', DistrictDashboard.as_view(), name='district-dashboard'),\n url(r'^office/dashboard/(?P[0-9]+)/karyakramlist/$', OfficeKaryakramList.as_view(), name='office-karyakram-list'),\n url(r'^office/update/(?P[0-9]+)/$', OfficeUpdateView.as_view(), name='office-update'),\n url(r'^office/delete/(?P[0-9]+)/$', OfficeDeleteView.as_view(), name='office-delete'),\n url(r'^office/list/$', OfficeListView.as_view(), name='office-list'),\n url(r'^office/(?P[0-9]+)/adduser/addofficehead/$', OfficeAddOfficeHeadView.as_view(), name='office-add-office-head'),\n url(r'^office/(?P[0-9]+)/adduser/addinfoofficer/$', OfficeAddInfoofficerView.as_view(), name='office-add-info-officer'),\n #url(r'^office/(?P[0-9]+)/addtype/$', OfficeAddUserView.as_view(), name='office-addtype'),\n \n url(r'^office/addbudget/(?P[0-9]+)/$', OfficeBudgetUpdateView.as_view(), name='office-budget-update'),\n url(r'^deactivate_user/$', ProfileUpdateView.as_view(), name='deactivate-users'),\n url(r'^office/users/profile_update/(?P[0-9]+)/$', ProfileUpdateView.as_view(), name='profile-update'),\n url(r'^office/users/profile_detail/(?P[0-9]+)/$', ProfileDetailView.as_view(), name='profile-detail'),\n url(r'^office/monthly/$', Monthly.as_view(), name='monthly-report'),\n \n\n ]\n","sub_path":"office/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"449349595","text":"import boto3\n\nregion = 'us-east-2'\ninstances = ['i-0b8e04c02e8856c5a'] # you may input as many instances in the list\n\nec1 = boto3.client('ec2', region_name=region)\n\ndef lambda_handler(event, context):\n ec1.start_instances(InstanceIds=instances)\n print('started your instances: ' + str(instances))","sub_path":"lambda_start_instances.py","file_name":"lambda_start_instances.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333851750","text":"# python3\n\n# Task. Check whether a given directed graph with n\n# vertices and n edges contains a cycle.\n\nimport sys\n\n\ndef acyclic(adj):\n def explore(vertex, nodes):\n visited[vertex] = True\n nodes.append(vertex)\n\n for neighbor in adj[vertex]:\n if neighbor in nodes:\n result[0] = 1\n\n if not visited[neighbor]:\n explore(neighbor, nodes)\n nodes.pop()\n\n result = [0]\n visited = [False for _ in range(len(adj))]\n\n for i in range(len(adj)):\n if not visited[i]:\n explore(i, [])\n return result[0]\n\n\ndef main():\n input = sys.stdin.read()\n data = list(map(int, input.split()))\n n, m = data[0:2]\n data = data[2:]\n edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))\n adj = [[] for _ in range(n)]\n for (a, b) in edges:\n adj[a - 1].append(b - 1)\n print(acyclic(adj))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"check_cycles/acyclicity.py","file_name":"acyclicity.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453430839","text":"import settings\nimport threading\nfrom collections import deque\n\nimport numpy as np\n# from numpy.fft import rfft, fftshift, irfft, ifftshift, fft, ifft\nimport time\nfrom scipy.fftpack import fft, ifft, fftshift, rfft, rfftfreq\n\n\nclass ML(threading.Thread):\n\n def __init__(self, queueList, plottingQueues):\n threading.Thread.__init__(self)\n\n self.FRAMESIZE = settings.FRAMESIZE\n\n #### Useable Vars\n self.accX, self.accY, self.accZ = queueList[0:3]\n self.rotX, self.rotY, self.rotZ = queueList[3:6]\n self.accXY, self.rotXY, self.steps, self.integRotZ = queueList[6:10]\n self.accTime, self.rotTime, self.stepsTime = queueList[10:13]\n self.integRotXY = queueList[13]\n\n #### PlottingQueues\n self.fftQueue, self.ifftQueue = plottingQueues[0:2]\n\n\n self.accXYdeq = deque(maxlen=self.FRAMESIZE)\n self.accZdeq = deque(maxlen=self.FRAMESIZE)\n self.rotXYIntegdeq = deque(maxlen=self.FRAMESIZE)\n self.rotZIntegdeq = deque(maxlen=self.FRAMESIZE)\n\n\n self.model = settings.create_model()\n self.model.load_weights(settings.MODEL)\n\n\n\n def run(self):\n while True:\n newVal = False\n if not self.accXY.empty():\n accXYVal = self.accXY.get()\n self.accXYdeq.append(accXYVal[1])\n newVal = True\n if not self.accZ.empty():\n accZVal = self.accZ.get()\n self.accZdeq.append(accZVal[1])\n newVal = True\n if not self.integRotXY.empty():\n rotXYIntegVal = self.integRotXY.get()\n self.rotXYIntegdeq.append(rotXYIntegVal)\n newVal = True\n if not self.integRotZ.empty():\n rotZIntegVal = self.integRotZ.get()\n self.rotZIntegdeq.append(rotZIntegVal)\n newVal = True\n\n if newVal and len(self.accXYdeq) >= settings.FRAMESIZE and len(self.rotXYIntegdeq) >= settings.FRAMESIZE:\n\n actualFrame = np.array([list(self.accXYdeq), list(self.rotXYIntegdeq), list(self.accZdeq), list(self.rotZIntegdeq)])\n actualFrame = np.swapaxes(actualFrame, 0, 1)\n absolute = np.absolute(actualFrame)\n amax = np.amax(absolute, axis=0)\n maxabs = abs(amax)\n actualFrame /= maxabs\n\n actualFrame = np.reshape(actualFrame, (1, settings.FRAMESIZE, settings.NUM_SENSORS))\n\n prediction = self.model.predict_classes(actualFrame)\n print(prediction)\n settings.LASTPREDICTION = prediction[0][-1]\n\n\n\n\n\n\n # print(\"Lol\")\n #\n # for body in iter(self.accXY.get, None):\n # self.workingQueue.append(body)\n # counter += 1\n # if counter == self.FRAMESIZE/10:\n # timestep = (self.workingQueue[-1][0] - self.workingQueue[0][0]) / float(len(self.workingQueue))\n # self.FFT(self.workingQueue, timestep)\n # counter = 0\n\n\n\n # def FFT(self, values: deque, timestep):\n # fourier = rfft([y for (x,y) in self.workingQueue])\n # fourier = np.abs(fourier)\n # N = len(values)\n # samplerate = 1/timestep\n # # freqs = np.fft.fftfreq(N)*samplerate\n # freqs = rfftfreq(len(fourier), 1/timestep)\n #\n # self.fftQueue.clear()\n # for time, value in zip(freqs, fourier):\n # self.fftQueue.append((time, value))\n\n","sub_path":"machinelearning/MachineLearning.py","file_name":"MachineLearning.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275600846","text":"import io\nimport json\nfrom os import path\n# from dateutil import parser\nfrom .services import RealtyService\n\n\ndef load_seeds():\n root_path = path.dirname(path.dirname(__file__))\n seeds = None\n with io.open(path.join(root_path, 'seed.json')) as fp:\n service = RealtyService()\n seeds = json.loads(fp.read())\n for seed in seeds:\n service.create(\n id=seed.get('id'),\n title=seed.get('title'),\n listing_type=seed.get('listingType'),\n purpose=seed.get('purpose'),\n published_on=seed.get('published_on'),\n location_name=seed.get('location').get('name'),\n city=seed.get('location').get('city').get('name'),\n state=seed.get('location').get('city').get('state')\n )\n","sub_path":"ads/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486831172","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef plot_image(data, n_image = 16, n_x = 4, n_y = 4) :\n\t# figure\n\tfig = plt.figure()\n\t# divide\n\ta_c = np.array([])\n\ti_image = 0\n\tfor i_cx in range(n_x) :\n\t\tfor i_cy in range(n_y) :\n\t\t\ti_image += 1\n\t\t\ta_c = np.append(a_c, fig.add_subplot(n_x, n_y, i_image))\n\t# plot\n\tfor i_c in range(n_image) :\n\t\t(a_c[i_c]).imshow(data[i_c].reshape([28, 28]), cmap = \"gray\")\n\tplt.show()\n\ndef main() :\n\t# args\n\tn_args = len(sys.argv)\n\tif n_args < 2 :\n\t\tprint(\"usage: python3 %s \" % (sys.argv[0]))\n\t\texit()\n\t# load data\n\tfname = sys.argv[1]\n\tdf = pd.read_csv(fname, index_col = 0)\n\tprint(df)\n\tdata = df.values\n\t# plot\n\tplot_image(data)\n\n\nif __name__ == \"__main__\" :\n\tmain()\n","sub_path":"show_image_csv.py","file_name":"show_image_csv.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544241792","text":"from translatewise.tests.base import BaseTestCase\nfrom translatewise import db\nfrom translatewise.translations.models import Translation\n\n\nclass ViewsTestCase(BaseTestCase):\n\n def test_home_get(self):\n response = self.client.get('/')\n assert response.status_code == 200\n\n def test_home_post(self):\n response = self.client.post(\n '/',\n data=dict(text=\"hello\"),\n follow_redirects=True,\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"}\n )\n\n translation = db.session.query(Translation).get(1)\n\n assert translation.text == \"hello\"\n assert response.status_code == 200\n","sub_path":"translatewise/translations/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81234773","text":"#!/usr/bin/env python3\n\nimport socket\nimport requests\nimport socks\nfrom sys import argv\nimport urllib\nimport json\n\n# get string of sense family\ndef senseFamilyStr(entry: dict):\n result = ''\n for index, senseFamily in enumerate(entry['senseFamilies'], start=1):\n partsOfSpeechs = senseFamily['partsOfSpeechs'][0]['value']\n sensesStr = f'[{partsOfSpeechs}]\\r\\n'\n for index, sense in enumerate(senseFamily['senses'], start=1):\n definition = sense['definition']['text']\n senseStr = f' {definition}\\r\\n'\n if 'exampleGroups' in sense.keys():\n for exampleGroup in sense['exampleGroups']:\n example = exampleGroup['examples'][0]\n senseStr += f' • {example}\\r\\n'\n sensesStr += senseStr\n result += sensesStr\n return result\n\n\n# change your proxy's ip\nip = '127.0.0.1'\n# change your proxy's port\nport = 1086\nsocks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, ip, port)\nsocket.socket = socks.socksocket\n\n\n# url\nPATH = 'https://content.googleapis.com/dictionaryextension/v1/knowledge/search'\nterm = ' '.join(argv[1:])\nquery = {\n 'language': 'en',\n 'key': 'AIzaSyC9PDwo2wgENKuI8DSFOfqFqKP2cKAxxso',\n 'term': term\n}\nurl = PATH + '?' + urllib.parse.urlencode(query)\n# headers\nheaders = {\n 'x-origin': 'chrome-extension://mgijmajocgfcbeboacabfgobmjgjcoja'\n}\n# make the request, and decode the response to dict\nresponse = json.loads(requests.get(url, headers=headers).text)\n\n#if response not has key 'dictionaryData', exit\nif 'dictionaryData' not in response.keys():\n print(f'Sorry, entry [{term}] not found.')\n exit(-1)\n\n# get entry\nentry = response['dictionaryData'][0]['entries'][0]\n\n# construct output string\nheadword = entry['headword']\nphonetics = ''\nif 'phonetics' in entry.keys():\n phonetics = '|' + entry['phonetics'][0]['text'] + '|'\n\nsenseFamily = senseFamilyStr(entry)\n\n# output\nprint(f'{headword} {phonetics}\\r\\n\\r\\n{senseFamily}')\nexit(0)\n","sub_path":"google-dictionary.py","file_name":"google-dictionary.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"77332212","text":"__author__ = \"Dimitar Ganev\"\n__email__ = \"\"\n\nimport requests\nfrom datetime import datetime\n\nfrom dateutil import parser\nfrom dateutil.relativedelta import relativedelta, SU, SA\n\n\nclass OpenLigaSDK:\n \"\"\"\n https://www.openligadb.de/\n -------------------------\n OpenLigaDB is providing an open database for recent football seasons and\n matches The project is aimed primarily at (hobby) programmers who want to\n obtain current soccer and other sports results in their applications.\n The interface is particularly suitable for the implementation of projects\n for prediction games, statistic applications, Bundesliga widgets etc..\n \"\"\"\n\n def __init__(self):\n self.api_url = \"https://www.openligadb.de/api/\"\n\n def get_season(self, season: str, reverse: bool = False) -> list:\n \"\"\"\n Args:\n season: the season (ex. 2017, 2018 etc..)\n reverse: whether the List should be reversed or not.\n Returns:\n A List showing the matches for that season.\n\n \"\"\"\n route = self.api_url + \"/getmatchdata/bl1/\" + season\n data = requests.get(route)\n if reverse:\n return data.json()[::-1]\n return data.json()\n\n def get_rankings(self, season: str) -> list:\n \"\"\"\n Getting the ranked for the given season.\n\n Args:\n season: (str): the season (ex. 2017, 2018 etc..)\n Returns:\n A dictionary showing the rankings for that season.\n \"\"\"\n # Getting the current teams in the season.\n teams = self.get_teams(season)\n rankings = {}\n\n for team in teams:\n rankings[team[\"TeamId\"]] = {\n \"team_name\": team[\"TeamName\"],\n \"wins\": 0,\n \"loses\": 0,\n \"draws\": 0,\n \"points\": 0,\n }\n\n # Getting the current Season\n season = self.get_season(season)\n\n for match in season:\n if not match[\"MatchIsFinished\"]:\n continue\n\n team_1 = match[\"Team1\"][\"TeamId\"]\n team_2 = match[\"Team2\"][\"TeamId\"]\n\n if match[\"MatchResults\"]:\n goals_team_1 = int(match[\"MatchResults\"][0][\"PointsTeam1\"])\n goals_team_2 = int(match[\"MatchResults\"][0][\"PointsTeam2\"])\n\n # if TEAM 1 has won the match.\n if goals_team_1 > goals_team_2:\n rankings[team_1][\"wins\"] += 1\n rankings[team_1][\"points\"] += 3\n rankings[team_2][\"loses\"] += 1\n # if TEAM 2 has won the match.\n elif goals_team_1 < goals_team_2:\n rankings[team_2][\"wins\"] += 1\n rankings[team_2][\"points\"] += 3\n rankings[team_1][\"loses\"] += 1\n # if it is a draw\n else:\n rankings[team_1][\"points\"] += 1\n rankings[team_2][\"points\"] += 1\n rankings[team_1][\"draws\"] += 1\n rankings[team_2][\"draws\"] += 1\n\n return self._sort(rankings)\n\n @staticmethod\n def _sort(rankings: dict) -> list:\n \"\"\"\n Sorting the current rankings based on the points.\n\n Args:\n rankings: the returning rankings which get_rankings() returns.\n Returns:\n A sorted rankings Dictionary.\n \"\"\"\n return sorted(\n [v for k, v in rankings.items()], key=lambda x: x[\"points\"], reverse=True\n )\n\n def get_teams(self, season: str) -> list:\n \"\"\"\n Args:\n season: the season (ex. 2017, 2018 etc..)\n Returns:\n A list with the teams playing in that season.\n \"\"\"\n route = self.api_url + \"/getavailableteams/bl1/\" + season\n data = requests.get(route)\n return data.json()\n\n def get_weekend_matches(self, season: str) -> list:\n \"\"\"\n Args:\n season: the season (ex. 2017, 2018 etc..)\n Returns:\n A List with the upcoming matches (if theres any)\n \"\"\"\n current_season = self.get_season(season)\n weekdays = self._get_current_week_weekdays_dates()\n result = []\n\n for match in current_season:\n try:\n match_play_date = self._parse_date(match[\"MatchDateTime\"])\n except ValueError:\n raise ValueError(\"Invalid DateTime Format: [MatchDateTime]\")\n if match_play_date == weekdays[\"SA\"] or match_play_date == weekdays[\"SU\"]:\n result.append(match)\n return result\n\n def search_team(self, search: str, season: str) -> list:\n \"\"\"\n Searching for a team by a given string (search).\n\n Args:\n search: the search string (team name)\n season: the season (ex. 2017, 2018 etc..)\n Returns:\n A list with all results which matched the search.\n \"\"\"\n teams = self.get_teams(season)\n return [i for i in teams if search.lower() in i[\"TeamName\"].lower()]\n\n @staticmethod\n def _parse_date(date) -> datetime:\n \"\"\"\n Parsing a date, replacing its 'time' values so that it can be comprabale\n with _get_current_week_weekdays_dates()\n Returns:\n A datetime.datetime object.\n \"\"\"\n # https://dateutil.readthedocs.io/en/stable/parser.html\n return parser.parse(date).replace(hour=0, minute=0, second=0, microsecond=0)\n\n @staticmethod\n def _get_current_week_weekdays_dates() -> dict:\n \"\"\"\n Returns: Dict, the current weekdays as datetime objects.\n \"\"\"\n now = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n\n # MOCK\n # now = parser.parse(\"2019-04-22 00:00:00\")\n\n # Getting the current weekend days.\n sunday = now + relativedelta(weekday=SU)\n saturday = now + relativedelta(weekday=SA)\n\n # Probably SAT/SUN would be better but I decided to keep it consistent\n # with the decisions by dateutils\n return {\"SA\": saturday, \"SU\": sunday}\n","sub_path":"bundesligainfo/apps/core/openligasdk/openligasdk.py","file_name":"openligasdk.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"92647585","text":"from kindercare.config import Config\nfrom kindercare.db import Db\nfrom kindercare.orm import Db as Orm\n\n\nSITE_INI = ['config/config.ini']\nSITE_ENV = ['.env']\nDEFAULT = None\nRAISE_ERROR = True\n\ndb_config = Config(SITE_INI, SITE_ENV, DEFAULT, RAISE_ERROR)\ndb_config.set_namespace('database')\nstatement_config = Config()\ndb = Orm(db_config)\nstatement_config.set_namespace('eft')\nsite_db = Db(db_config)\n","sub_path":"kindercare/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264125900","text":"import os, glob\nimport cv2\nimport dlib\nimport numpy\nfrom imutils.face_utils import FaceAligner\nfrom imutils.face_utils import rect_to_bb\nimport imutils\n\nfaceWidth = 120\nimgFileType = \"jpg\"\npeopleFolder = \"/home/chtseng/works/face-align/peoples\"\noutputFaceFolder = \"/home/chtseng/works/face-align/faces\"\nfaceLandmarkModel = \"shape_predictor_68_face_landmarks.dat\"\n\n#detector = dlib.get_frontal_face_detector()\n#predictor = dlib.shape_predictor(faceLandmarkModel)\n#fa = FaceAligner(predictor, desiredFaceWidth=faceWidth)\n\ndef load_images_from_folder(folder, outputFolder):\n global faceLandmarkModel, faceWidth\n\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(faceLandmarkModel)\n fa = FaceAligner(predictor, desiredFaceWidth=faceWidth)\n labels = []\n images = []\n\n for folders in glob.glob(folder+\"/*\"):\n label = os.path.basename(folders)\n print(\"Load {} ...\".format(label))\n\n if(not os.path.exists(outputFolder + \"/\" + label)):\n os.mkdir(outputFolder + \"/\" + label)\n\n for filename in os.listdir(folders): \n if label is not None:\n\n jpgname, file_extension = os.path.splitext(filename)\n if(file_extension.lower() == \".\" + imgFileType):\n print(\"read file: \", os.path.join(folder,folders,filename))\n img = cv2.imread(os.path.join(folder,folders,filename))\n\n if img is not None:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 2)\n i = 0\n # loop over the face detections\n print(\"find {} faces\".format(len(rects)))\n\n for rect in rects:\n # extract the ROI of the *original* face, then align the face\n # using facial landmarks\n #(x, y, w, h) = rect_to_bb(rect)\n #faceOrig = image[y:y + h, x:x + w]\n faceAligned = fa.align(img, gray, rect)\n\n gray2 = cv2.cvtColor(faceAligned, cv2.COLOR_BGR2GRAY)\n rectB = detector( gray2 , 2)\n\n for rectFinal in rectB:\n (x2, y2, w2, h2) = rect_to_bb(rectFinal)\n face2 = faceAligned[y2:y2 + h2, x2:x2 + w2]\n \n #jpgname, file_extension = os.path.splitext(os.path.join(folder,folders,filename))\n print(\"write face to \", outputFolder + \"/\" + label + \"/\" + jpgname + \"-\" + str(i) + \".jpg\")\n cv2.imwrite(outputFolder + \"/\" + label + \"/\" + jpgname + \"-\" + str(i) + \".jpg\", face2)\n\n i += 1\n\n\n\nload_images_from_folder(peopleFolder, outputFaceFolder)\n","sub_path":"face-align/facealign_imutils.py","file_name":"facealign_imutils.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"482362080","text":"'''\nDateien umbenennen, löschen, erstellen, verschieben.\nMit File(\"name der datei\", \"path der datei\") ein Datei Objekt erstellen.\n'''\n\nimport os \n\nclass File:\n\n def init(self, name, path):\n\n self.name = name\n self.path = path\n self.full_path = f\"{self.path}\\{self.name}\"\n\n def write(self, data):\n\n with open(f\"{self.path}{self.name}\", \"r+\") as f:\n\n f.write(data)\n f.close()\n\n def move(self, new_path):\n\n try:\n os.replace(self.path, new_path)\n\n except Exception as e:\n\n print(e)\n\n def rename(self, new_name):\n\n try:\n os.rename(f\"{self.path}{self.name}\", f\"{self.path}{new_name}\")\n print(\"success\")\n\n except Exception as e:\n print(e)\n pass\n \n def remove(self):\n \n try:\n os.remove(self.full_path)\n \n except Exception as e:\n \n print(e)\n\n\ndef rename_raw(path, name, new_name):\n\n try:\n os.rename(f\"{path}{name}\", f\"{path}{new_name}\")\n print(\"success\")\n\n except Exception as e:\n print(e)\n pass\n\ndef create(name, path):\n\n try:\n open(f\"{path}{name}\", \"w+\")\n\n except Exception as e:\n print(e)\n \n \ndef remove(path):\n \n try:\n os.remove(path)\n\n except Exception as e:\n \n print(e)","sub_path":"file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52449454","text":"def getMax():\n num1 = int(input(\"请输入第一个数:\"))\n num2 = int(input(\"请输入第二个数:\"))\n num3 = int(input(\"请输入第三个数:\"))\n temp = 0\n if num1 > num2:\n temp = num1\n else:\n temp = num2\n if temp > num3:\n return \"其中最大值为:\" + str(temp)\n else:\n return \"其中最大值为:\" + str(num3)\nmaxValue = getMax()\nprint(maxValue)\n","sub_path":"05140.py","file_name":"05140.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26183483","text":"# The MIT License (MIT)\n# Copyright (c) 2018 by the xcube development team and contributors\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the \"Software\"), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is furnished to do\n# so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport glob\nimport logging\nimport os\nimport time\nfrom typing import Any, Dict, List, Optional\n\nimport fiona\nimport numpy as np\nimport pandas as pd\nimport s3fs\nimport xarray as xr\nimport zarr\n\nfrom . import __version__\nfrom .cache import MemoryCacheStore, Cache, FileCacheStore\nfrom .defaults import DEFAULT_CMAP_CBAR, DEFAULT_CMAP_VMIN, \\\n DEFAULT_CMAP_VMAX, TRACE_PERF, MEM_TILE_CACHE_CAPACITY, FILE_TILE_CACHE_CAPACITY, FILE_TILE_CACHE_PATH, \\\n FILE_TILE_CACHE_ENABLED, API_PREFIX, DEFAULT_NAME\nfrom .errors import ServiceConfigError, ServiceError, ServiceBadRequestError, ServiceResourceNotFoundError\nfrom .logtime import log_time\nfrom .reqparams import RequestParams\n\nCOMPUTE_DATASET = 'compute_dataset'\nALL_FEATURES = \"all\"\n\n_LOG = logging.getLogger('xcube')\n\nConfig = Dict[str, Any]\n\n\n# noinspection PyMethodMayBeStatic\nclass ServiceContext:\n\n def __init__(self,\n name: str = DEFAULT_NAME,\n base_dir: str = None,\n config: Config = None):\n self._name = name\n self.base_dir = os.path.abspath(base_dir or '')\n self._config = config or dict()\n self.dataset_cache = dict() # contains tuples of form (ds, ds_descriptor, tile_grid_cache)\n # TODO by forman: move pyramid_cache, mem_tile_cache, rgb_tile_cache into dataset_cache values\n self.pyramid_cache = dict()\n self.mem_tile_cache = Cache(MemoryCacheStore(),\n capacity=MEM_TILE_CACHE_CAPACITY,\n threshold=0.75)\n if FILE_TILE_CACHE_ENABLED:\n tile_cache_dir = os.path.join(FILE_TILE_CACHE_PATH, 'v%s' % __version__, 'tiles')\n self.rgb_tile_cache = Cache(FileCacheStore(tile_cache_dir, \".png\"),\n capacity=FILE_TILE_CACHE_CAPACITY,\n threshold=0.75)\n else:\n self.rgb_tile_cache = None\n self._feature_collection_cache = dict()\n\n @property\n def config(self) -> Config:\n return self._config\n\n @config.setter\n def config(self, config: Config):\n if self._config:\n old_dataset_descriptors = self._config.get('Datasets')\n new_dataset_descriptors = config.get('Datasets')\n if not new_dataset_descriptors:\n for ds, _, _ in self.dataset_cache.values():\n ds.close()\n self.dataset_cache.clear()\n if new_dataset_descriptors and old_dataset_descriptors:\n ds_names = list(self.dataset_cache.keys())\n for ds_name in ds_names:\n dataset_descriptor = self.find_dataset_descriptor(new_dataset_descriptors, ds_name)\n if dataset_descriptor is None:\n ds, _, _ = self.dataset_cache[ds_name]\n ds.close()\n del self.dataset_cache[ds_name]\n self._config = config\n\n def get_service_url(self, base_url, *path: str):\n return base_url + '/' + self._name + API_PREFIX + '/' + '/'.join(path)\n\n def get_dataset_and_variable(self, ds_name: str, var_name: str):\n dataset = self.get_dataset(ds_name)\n if var_name in dataset:\n return dataset, dataset[var_name]\n raise ServiceResourceNotFoundError(f'Variable \"{var_name}\" not found in dataset \"{ds_name}\"')\n\n def get_dataset_descriptors(self):\n dataset_descriptors = self.config.get('Datasets')\n if not dataset_descriptors:\n raise ServiceConfigError(f\"No datasets configured\")\n return dataset_descriptors\n\n def get_dataset_descriptor(self, ds_name: str) -> Dict[str, str]:\n dataset_descriptors = self.get_dataset_descriptors()\n if not dataset_descriptors:\n raise ServiceConfigError(f\"No datasets configured\")\n dataset_descriptor = self.find_dataset_descriptor(dataset_descriptors, ds_name)\n if dataset_descriptor is None:\n raise ServiceResourceNotFoundError(f'Dataset \"{ds_name}\" not found')\n return dataset_descriptor\n\n def get_color_mapping(self, ds_name: str, var_name: str):\n dataset_descriptor = self.get_dataset_descriptor(ds_name)\n style_name = dataset_descriptor.get('Style', 'default')\n styles = self.config.get('Styles')\n if styles:\n style = None\n for s in styles:\n if style_name == s['Identifier']:\n style = s\n # TODO: check color_mappings is not None\n if style:\n color_mappings = style.get('ColorMappings')\n if color_mappings:\n # TODO: check color_mappings is not None\n color_mapping = color_mappings.get(var_name)\n if color_mapping:\n cmap_cbar = color_mapping.get('ColorBar', DEFAULT_CMAP_CBAR)\n cmap_vmin, cmap_vmax = color_mapping.get('ValueRange', (DEFAULT_CMAP_VMIN, DEFAULT_CMAP_VMAX))\n return cmap_cbar, cmap_vmin, cmap_vmax\n _LOG.warning(f'color mapping for variable {var_name!r} of dataset {ds_name!r} undefined: using defaults')\n return DEFAULT_CMAP_CBAR, DEFAULT_CMAP_VMIN, DEFAULT_CMAP_VMAX\n\n def get_dataset(self, ds_name: str) -> xr.Dataset:\n if ds_name in self.dataset_cache:\n ds, _, _ = self.dataset_cache[ds_name]\n else:\n dataset_descriptor = self.get_dataset_descriptor(ds_name)\n\n path = dataset_descriptor.get('Path')\n if not path:\n raise ServiceConfigError(f\"Missing 'path' entry in dataset descriptor {ds_name}\")\n\n t1 = time.clock()\n\n fs_type = dataset_descriptor.get('FileSystem', 'local')\n if fs_type == 'obs':\n data_format = dataset_descriptor.get('Format', 'zarr')\n if data_format != 'zarr':\n raise ServiceConfigError(f\"Invalid format={data_format!r} in dataset descriptor {ds_name!r}\")\n client_kwargs = {}\n if 'Endpoint' in dataset_descriptor:\n client_kwargs['endpoint_url'] = dataset_descriptor['Endpoint']\n if 'Region' in dataset_descriptor:\n client_kwargs['region_name'] = dataset_descriptor['Region']\n s3 = s3fs.S3FileSystem(anon=True, client_kwargs=client_kwargs)\n store = s3fs.S3Map(root=path, s3=s3, check=False)\n cached_store = zarr.LRUStoreCache(store, max_size=2 ** 28)\n with log_time(f\"opened remote dataset {path}\"):\n ds = xr.open_zarr(cached_store)\n elif fs_type == 'local':\n if not os.path.isabs(path):\n path = os.path.join(self.base_dir, path)\n data_format = dataset_descriptor.get('Format', 'nc')\n if data_format == 'nc':\n with log_time(f\"opened local NetCDF dataset {path}\"):\n ds = xr.open_dataset(path)\n elif data_format == 'zarr':\n with log_time(f\"opened local zarr dataset {path}\"):\n ds = xr.open_zarr(path)\n else:\n raise ServiceConfigError(f\"Invalid format={data_format!r} in dataset descriptor {ds_name!r}\")\n elif fs_type == 'computed':\n if not os.path.isabs(path):\n path = os.path.join(self.base_dir, path)\n with open(path) as fp:\n python_code = fp.read()\n\n local_env = dict()\n global_env = None\n try:\n exec(python_code, global_env, local_env)\n except Exception as e:\n raise ServiceError(f\"Failed to compute dataset {ds_name!r} from {path!r}: {e}\") from e\n\n callable_name = dataset_descriptor.get('Function', COMPUTE_DATASET)\n callable_args = dataset_descriptor.get('Args', [])\n\n callable_obj = local_env.get(callable_name)\n if callable_obj is None:\n raise ServiceConfigError(f\"Invalid dataset descriptor {ds_name!r}: \"\n f\"no callable named {callable_name!r} found in {path!r}\")\n elif not callable(callable_obj):\n raise ServiceConfigError(f\"Invalid dataset descriptor {ds_name!r}: \"\n f\"object {callable_name!r} in {path!r} is not callable\")\n\n args = list()\n for arg_value in callable_args:\n if isinstance(arg_value, str) and len(arg_value) > 2 \\\n and arg_value.startswith('@') and arg_value.endswith('@'):\n ref_ds_name = arg_value[1:-1]\n if not self.get_dataset_descriptor(ref_ds_name):\n raise ServiceConfigError(f\"Invalid dataset descriptor {ds_name!r}: \"\n f\"argument {arg_value!r} of callable {callable_name!r} \"\n f\"must reference another dataset\")\n args.append(self.get_dataset(ref_ds_name))\n else:\n args.append(arg_value)\n\n try:\n with log_time(f\"created computed dataset {ds_name}\"):\n ds = callable_obj(*args)\n except Exception as e:\n raise ServiceError(f\"Failed to compute dataset {ds_name!r} \"\n f\"from function {callable_name!r} in {path!r}: {e}\") from e\n if not isinstance(ds, xr.Dataset):\n raise ServiceError(f\"Failed to compute dataset {ds_name!r} \"\n f\"from function {callable_name!r} in {path!r}: \"\n f\"expected an xarray.Dataset but got a {type(ds)}\")\n else:\n raise ServiceConfigError(f\"Invalid fs={fs_type!r} in dataset descriptor {ds_name!r}\")\n\n tile_grid_cache = dict()\n self.dataset_cache[ds_name] = ds, dataset_descriptor, tile_grid_cache\n\n t2 = time.clock()\n\n if TRACE_PERF:\n print(f'PERF: opening {ds_name!r} took {t2-t1} seconds')\n\n return ds\n\n def get_legend_label(self, ds_name: str, var_name: str):\n dataset = self.get_dataset(ds_name)\n if var_name in dataset:\n ds = self.get_dataset(ds_name)\n units = ds[var_name].units\n return units\n raise ServiceResourceNotFoundError(f'Variable \"{var_name}\" not found in dataset \"{ds_name}\"')\n\n def get_feature_collections(self) -> List[Dict]:\n features_configs = self._config.get(\"Features\", [])\n feature_collections = []\n for features_config in features_configs:\n feature_collections.append(dict(id=features_config.get(\"Identifier\"),\n title=features_config.get(\"Title\")))\n return feature_collections\n\n def get_feature_collection(self, collection_name: str = ALL_FEATURES) -> Dict:\n if ALL_FEATURES not in self._feature_collection_cache:\n features_configs = self._config.get(\"Features\", [])\n all_features = []\n feature_index = 0\n for features_config in features_configs:\n curr_collection_name = features_config.get(\"Identifier\")\n if not curr_collection_name:\n raise ServiceError(\"Missing 'Identifier' entry in 'Features'\")\n if curr_collection_name == ALL_FEATURES:\n raise ServiceError(\"Invalid 'Identifier' entry in 'Features'\")\n curr_collection_wc = features_config.get(\"Path\")\n if not curr_collection_wc:\n raise ServiceError(\"Missing 'Path' entry in 'Features'\")\n if not os.path.isabs(curr_collection_wc):\n curr_collection_wc = os.path.join(self.base_dir, curr_collection_wc)\n\n features = []\n collection_files = glob.glob(curr_collection_wc)\n for collection_file in collection_files:\n with fiona.open(collection_file) as feature_collection:\n for feature in feature_collection:\n self._remove_feature_id(feature)\n feature[\"id\"] = str(feature_index)\n feature_index += 1\n features.append(feature)\n self._feature_collection_cache[curr_collection_name] = dict(type=\"FeatureCollection\",\n features=features)\n all_features.extend(features)\n\n self._feature_collection_cache[ALL_FEATURES] = dict(type=\"FeatureCollection\",\n features=all_features)\n\n if collection_name not in self._feature_collection_cache:\n raise ServiceResourceNotFoundError(f'Feature collection \"{collection_name}\" not found')\n return self._feature_collection_cache[collection_name]\n\n @classmethod\n def _remove_feature_id(cls, feature: Dict):\n cls._remove_id(feature)\n if \"properties\" in feature:\n cls._remove_id(feature[\"properties\"])\n\n @classmethod\n def _remove_id(cls, properties: Dict):\n if \"id\" in properties:\n del properties[\"id\"]\n if \"ID\" in properties:\n del properties[\"ID\"]\n\n def get_dataset_and_coord_variable(self, ds_name: str, dim_name: str):\n ds = self.get_dataset(ds_name)\n if dim_name not in ds.coords:\n raise ServiceResourceNotFoundError(f'Dimension {dim_name!r} has no coordinates in dataset {ds_name!r}')\n return ds, ds.coords[dim_name]\n\n @classmethod\n def get_var_indexers(cls,\n ds_name: str,\n var_name: str,\n var: xr.DataArray,\n dim_names: List[str],\n params: RequestParams) -> Dict[str, Any]:\n var_indexers = dict()\n for dim_name in dim_names:\n if dim_name not in var.coords:\n raise ServiceBadRequestError(\n f'dimension {dim_name!r} of variable {var_name!r} of dataset {ds_name!r} has no coordinates')\n coord_var = var.coords[dim_name]\n dim_value_str = params.get_query_argument(dim_name, None)\n try:\n if dim_value_str is None:\n var_indexers[dim_name] = coord_var.values[0]\n elif dim_value_str == 'current':\n var_indexers[dim_name] = coord_var.values[-1]\n elif np.issubdtype(coord_var.dtype, np.floating):\n var_indexers[dim_name] = float(dim_value_str)\n elif np.issubdtype(coord_var.dtype, np.integer):\n var_indexers[dim_name] = int(dim_value_str)\n elif np.issubdtype(coord_var.dtype, np.datetime64):\n var_indexers[dim_name] = pd.to_datetime(dim_value_str)\n else:\n raise ValueError(f'unable to dimension value {dim_value_str!r} to {coord_var.dtype!r}')\n except ValueError as e:\n raise ServiceBadRequestError(\n f'{dim_value_str!r} is not a valid value for dimension {dim_name!r} '\n f'of variable {var_name!r} of dataset {ds_name!r}') from e\n return var_indexers\n\n @classmethod\n def find_dataset_descriptor(cls,\n dataset_descriptors: List[Dict[str, Any]],\n ds_name: str) -> Optional[Dict[str, Any]]:\n # TODO: optimize by dict/key lookup\n return next((dsd for dsd in dataset_descriptors if dsd['Identifier'] == ds_name), None)\n","sub_path":"xcube_server/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":17222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299352876","text":"\"\"\"\nGiven two rectangles, determine if they overlap. If they overlap then return overlapped area otw return -1. The rectangles are defined as a Dictionary, for example:\nr1 = {\n\n # x and y coordinates of the bottom-left corner of the rectangle\n 'x': 2 , 'y': 4,\n\n # Width and Height of rectangle\n 'w':5,'h':12}\n\n\"\"\"\ndef distance(x1, y1, x2, y2):\n return min(x2, y2) - max(x1, y1)\n\ndef ovelapping_area(rec1, rec2):\n #y-distance\n width = distance(rec1[\"x\"], rec2[\"x\"], rec1[\"x\"]+rec1[\"w\"], rec2[\"x\"]+rec2[\"w\"])\n\n #x-distance\n height = distance(rec1[\"y\"], rec2[\"y\"], rec1[\"y\"]+rec1[\"h\"], rec2[\"y\"]+rec2[\"h\"])\n\n if height <= 0 or width <= 0:\n return -1\n\n return height * width\n\nr1 = {'x': 2 , 'y': 4,'w':5,'h':6}\nr2 = {'x': 1 , 'y': 5,'w':7,'h':4}\n\nprint(ovelapping_area(r1, r2)) #20","sub_path":"Problems/Area of Overlapping Rectangles/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"356375145","text":"from __future__ import unicode_literals\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom collections import namedtuple\nfrom xml.etree import ElementTree as etree\nimport requests\n\n__all__ = [\n 'create_zip',\n 'get_addon_info',\n 'create_addon_branch',\n 'create_pull_request'\n]\n\nAddonInfo = namedtuple('AddonInfo', ['name', 'version', 'description', 'news'])\n\nREPO_URL_MASK = 'https://{gh_token}@github.com/{repo_slug}.git'\nPR_ENDPOINT = 'https://api.github.com/repos/romanvm/{}/pulls'\n\n\ndef clean_pyc(directory):\n \"\"\"Clean .pyc files recursively in a directory\n \n :param directory: root directory to clean\n :type directory: str\n \"\"\"\n cwd = os.getcwd()\n os.chdir(directory)\n paths = os.listdir(directory)\n for path in paths:\n abs_path = os.path.abspath(path)\n if os.path.isdir(abs_path):\n clean_pyc(abs_path)\n elif path[-4:] == '.pyc':\n os.remove(abs_path)\n os.chdir(cwd)\n\n\ndef create_zip(zip_name, work_dir, addon_id):\n \"\"\"Create a .zip for an addon\n \n :param zip_name: .zip file name\n :type zip_name: str\n :param work_dir: working directory\n :type work_dir: str\n :param addon_id: addon_id ID\n :type addon_id: str\n \"\"\"\n print('Creating ZIP file...')\n clean_pyc(os.path.join(work_dir, addon_id))\n shutil.make_archive(zip_name, 'zip', root_dir=work_dir, base_dir=addon_id)\n print('ZIP created successfully.')\n\n\ndef get_addon_info(xml_path):\n tree = etree.parse(xml_path)\n addon_tag = tree.getroot()\n descr_tag = addon_tag.find('.//description[@lang=\"en_GB\"]')\n news_tag = addon_tag.find('.//news')\n if news_tag:\n news = news_tag.text\n else:\n news = ''\n return AddonInfo(\n addon_tag.attrib.get('name'),\n addon_tag.attrib.get('version'),\n descr_tag.text,\n news\n )\n\n\ndef shell(*args, check=True):\n devnull = open(os.devnull, 'w')\n if sys.version_info >= (3, 5):\n subprocess.run(args, check=check, stdout=devnull, stderr=devnull)\n else:\n if check:\n subprocess.check_call(args, stdout=devnull, stderr=devnull)\n else:\n subprocess.call(args, stdout=devnull, stderr=devnull)\n\n\ndef create_addon_branch(work_dir, repo, branch, addon_id, version):\n print('Creatind addon branch...')\n gh_username = os.environ['GH_USERNAME']\n gh_token = os.environ['GH_TOKEN']\n full_name = os.environ['FULL_NAME']\n email = os.environ['EMAIL']\n repo_fork = REPO_URL_MASK.format(\n gh_token=gh_token,\n repo_slug='{}/{}'.format(gh_username, repo)\n )\n shell('git', 'clone', repo_fork)\n os.chdir(repo)\n shell('git', 'config', 'user.name', '\"{}\"'.format(full_name))\n shell('git', 'config', 'user.email', email)\n shell('git', 'remote', 'add', 'upstream',\n 'https://github.com/xbmc/{}.git'.format(repo))\n shell('git', 'fetch', 'upstream')\n shell('git', 'checkout', '-b', branch, '--track', 'origin/' + branch)\n shell('git', 'merge', 'upstream/' + branch)\n shell('git', 'branch', '-D', addon_id, check=False)\n shell('git', 'checkout', '-b', addon_id)\n clean_pyc(os.path.join(work_dir, addon_id))\n shutil.rmtree(os.path.join(work_dir, repo, addon_id), ignore_errors=True)\n shutil.copytree(\n os.path.join(work_dir, addon_id), os.path.join(work_dir, repo, addon_id)\n )\n shell('git', 'add', '--all', '.')\n shell(\n 'git', 'commit',\n '-m', '\"[{}] {}\"'.format(addon_id, version)\n )\n shell('git', 'push', '-f', '-q', 'origin', addon_id)\n print('Addon branch created successfully.')\n\n\ndef create_pull_request(repo, branch, addon_id, addon_info):\n gh_username = os.environ['GH_USERNAME']\n gh_token = os.environ['GH_TOKEN']\n print('Checking pull request...')\n resp = requests.get(\n PR_ENDPOINT.format(repo),\n params={\n 'head': '{}:{}'.format(gh_username, addon_id),\n 'base': branch,\n },\n headers={'Accept': 'application/vnd.github.v3+json'},\n auth=(gh_username, gh_token)\n )\n print(resp.json())\n if resp.status_code == 200 and not resp.json():\n print('Submitting pull request...')\n payload = {\n 'title': '[{}] {}'.format(addon_id, addon_info.version),\n 'head': '{}:{}'.format(gh_username, addon_id),\n 'base': branch,\n 'body': '{}\\n\\n{}'.format(addon_info.description, addon_info.news),\n 'maintainer_can_modify': True,\n }\n resp = requests.post(\n PR_ENDPOINT.format(repo),\n json=payload,\n headers={'Accept': 'application/vnd.github.v3+json'},\n auth=(gh_username, gh_token)\n )\n if resp.status_code != 201:\n raise RuntimeError(\n 'GitHub API error: {}\\n{}'.format(resp.status_code, resp.text)\n )\n print('Pull request submitted successfully:')\n print(resp.text)\n elif resp.status_code == 200 and resp.json():\n print(\n 'Pull request in {} for {}:{} already exists.'.format(\n branch, gh_username, addon_id)\n )\n else:\n raise RuntimeError(\n 'Unexpected GitHub error: {}'.format(resp.status_code)\n )\n","sub_path":"deploy_addon/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167927822","text":"#15649 N과 M (1)\n# 이 문제는 자연수 N,M이 주어졌을때 1부터 N까지 자연수 중에서 \n# \"중복 없이\" M개를 고르는 수열문제\n# 나는 permutations 라이브러리를 사용해서 풀었다.\n\nfrom itertools import permutations\n\nN, M = map(int,input().split()) # 데이터 type이 int형인 N,M을 입력받는다.\n\ndata = [] # 1~N까지 수를 담아둘 리스트 선언\n\nfor i in range(1,N+1): # 1~N까지 자연수를 담는다.\n data.append(i)\nprint(data)\n\n\nP = permutations(data,M) # data리스트에서 1개를 뽑아 나열(순열은 순서가 상관이 있다.)\n # permutations을 시키면 tuple값으로 담김\nprint(P)\n#여기서 핵심은 tuple값으로 담긴걸 str형태로 뽑아서 출력해야함\n\n\nfor i in P:\n print(' '.join(map(str,i))) # i를 뽑아낼 때 tuple => str로 map메서드를 사용\n # join메서드를 사용하는 이유는 리스트의 요소를 공백기준으로 연결해 문자열로 만들기 위함\n\n\n\n#15\n\n\n\n\n\n\n\n\n\n\n","sub_path":"CodingTest_Python/HomeWork_YJ.py","file_name":"HomeWork_YJ.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184007652","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'plots'\n\nurlpatterns = [\n path('', views.home, name='Home'),\n path('resultsplot/', views.resultsplot, name='resultsplot'),\n path('edit_profile', views.edit_profile, name='edit_profile'),\n path('profile', views.profile, name='profile'),\n path('registrar/', views.signup, name='registrar'),\n path('logout/', views.logoutview, name='logout'),\n path('user_login/', views.user_login, name='user_login'),\n path('post//', views.post_view, name='post'),\n path('posts/', views.list_of_post, name='list_post'),\n path('edit_post/', views.edit_post, name='edit_post'),\n path('delete_post/', views.delete_post, name='delete_post'),\n path('new_post/', views.new_post, name='new_post'),\n]\n","sub_path":"plots/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146362954","text":"# -*- coding: utf-8 -*-\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE.txt in the project root for\n# license information.\n# -------------------------------------------------------------------------\nfrom json import dumps\nfrom flask import (\n Response,\n Blueprint,\n)\n\nencoding_api = Blueprint(\"encoding_api\", __name__)\n\n\n@encoding_api.route(\"/latin-1\", methods=[\"GET\"])\ndef latin_1():\n r = Response(\"Latin 1: ÿ\".encode(\"latin-1\"), status=200)\n r.headers[\"Content-Type\"] = \"text/plain; charset=latin-1\"\n return r\n\n\n@encoding_api.route(\"/latin-1-with-utf-8\", methods=[\"GET\"])\ndef latin_1_charset_utf8():\n r = Response(\"Latin 1: ÿ\".encode(\"latin-1\"), status=200)\n r.headers[\"Content-Type\"] = \"text/plain; charset=utf-8\"\n return r\n\n\n@encoding_api.route(\"/no-charset\", methods=[\"GET\"])\ndef latin_1_no_charset():\n r = Response(\"Hello, world!\", status=200)\n r.headers[\"Content-Type\"] = \"text/plain\"\n return r\n\n\n@encoding_api.route(\"/iso-8859-1\", methods=[\"GET\"])\ndef iso_8859_1():\n r = Response(\"Accented: Österreich\".encode(\"iso-8859-1\"), status=200) # cspell:disable-line\n r.headers[\"Content-Type\"] = \"text/plain\"\n return r\n\n\n@encoding_api.route(\"/emoji\", methods=[\"GET\"])\ndef emoji():\n r = Response(\"👩\", status=200)\n return r\n\n\n@encoding_api.route(\"/emoji-family-skin-tone-modifier\", methods=[\"GET\"])\ndef emoji_family_skin_tone_modifier():\n r = Response(\"👩🏻‍👩🏽‍👧🏾‍👦🏿 SSN: 859-98-0987\", status=200)\n return r\n\n\n@encoding_api.route(\"/korean\", methods=[\"GET\"])\ndef korean():\n r = Response(\"아가\", status=200)\n return r\n\n\n@encoding_api.route(\"/json\", methods=[\"GET\"])\ndef json():\n data = {\"greeting\": \"hello\", \"recipient\": \"world\"}\n content = dumps(data).encode(\"utf-16\")\n r = Response(content, status=200)\n r.headers[\"Content-Type\"] = \"application/json; charset=utf-16\"\n return r\n\n\n@encoding_api.route(\"/invalid-codec-name\", methods=[\"GET\"])\ndef invalid_codec_name():\n r = Response(\"おはようございます。\".encode(\"utf-8\"), status=200)\n r.headers[\"Content-Type\"] = \"text/plain; charset=invalid-codec-name\"\n return r\n\n\n@encoding_api.route(\"/no-charset\", methods=[\"GET\"])\ndef no_charset():\n r = Response(\"Hello, world!\", status=200)\n r.headers[\"Content-Type\"] = \"text/plain\"\n return r\n\n\n@encoding_api.route(\"/gzip\", methods=[\"GET\"])\ndef gzip_content_encoding():\n r = Response(\n b\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\n\\xcbH\\xcd\\xc9\\xc9W(\\xcf/\\xcaI\\x01\\x00\\x85\\x11J\\r\\x0b\\x00\\x00\\x00\",\n status=200,\n )\n r.headers[\"Content-Type\"] = \"text/plain\"\n r.headers[\"Content-Encoding\"] = \"gzip\"\n return r\n\n\n@encoding_api.route(\"/deflate\", methods=[\"GET\"])\ndef deflate_content_encoding():\n r = Response(b\"\\xcb\\xc8T(\\xc9H-J\\x05\\x00\", status=200)\n r.headers[\"Content-Type\"] = \"text/plain\"\n r.headers[\"Content-Encoding\"] = \"deflate\"\n return r\n","sub_path":"sdk/core/azure-core/tests/testserver_tests/coretestserver/coretestserver/test_routes/encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565300080","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn.metrics import *\nfrom timeit import default_timer as timer\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.feature_selection import *\n\nhelmet_data = np.genfromtxt ('helmet.csv', delimiter=\",\")\nface_data = np.genfromtxt ('face.csv', delimiter=\",\")\n\ndata = np.concatenate((helmet_data, face_data), 0)\nnp.random.shuffle(data) #shuffle the tuples\n\n#feature reduction (on HOG part)\ngain, j = mutual_info_classif(data[:, 8:-1], data[:, -1], discrete_features='auto', n_neighbors=3, copy=True, random_state=None), 0\nfor i in np.arange(len(gain)):\n\tif gain[i] <= 0.001:\n\t\tdata = np.delete(data, 8+i-j, 1)\n\t\tj += 1\n\nX_train, X_test, y_train, y_test = train_test_split(data[:, 0:-1], data[:, -1], test_size = 0.4, random_state = 0)\n\nstart = timer()\nclf = svm.SVC(kernel='linear', probability=True).fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nend = timer()\n\nprint(\"Confusion Matrix: \\n\")\nprint(confusion_matrix(y_test, y_pred))\n\ntarget_names = ['Helmet', 'No Helmet']\nprint(\"\\n\\nClassification Report: \\n\")\nprint(\"Accuracy: %s\" % round(accuracy_score(y_test, y_pred), 4))\nprint(\"Precision \\t: %s\" % round(precision_score(y_test, y_pred, average = 'macro'), 4))\nprint(\"Recall \\t\\t: %s\" % round(recall_score(y_test, y_pred, average = 'macro'), 4))\nprint(\"F1 \\t\\t: %s\" % round(f1_score(y_test, y_pred, average = 'macro'), 4))\n\n#Percentage of False Negatives\ny = y_test - y_pred\nfn = sum(y[y > 0]) * 100 / len(y_test)\nprint(\"There are %s%% False Negatives\" % round(fn, 4))\n\nprint(\"\\nExecution time: %s ms\" % round((end - start) * 1000, 4))\n\n#ROC curve\ny_prob = clf.predict_proba(X_test)[:,1]\nfpr, tpr, thresholds = roc_curve(y_test, y_prob, pos_label=1)\nroc_auc = auc(fpr, tpr)\nplt.title('Support Vector Machines')\nplt.plot(fpr, tpr, 'b', label='AUC = %s'% round(roc_auc, 4))\nprint(\"\\nAUC \\t: %s\" % round(roc_auc, 4))\nplt.legend(loc='lower right')\nplt.plot([0,1],[0,1],'r--')\nplt.xlim([-0.05,1.0])\nplt.ylim([0.0,1.05])\nplt.ylabel('True Positive Rate')\nplt.xlabel('False Positive Rate')\nplt.show()\n","sub_path":"Holdout/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49094602","text":"import base64\n\n\ndef path2url(photo_path: str, chdir=''):\n chdir = chdir.strip('/') + '/' if len(chdir) != 0 else chdir\n return 'http://pic.tumiv.com/' + chdir + '/'.join(photo_path.split('/')[5:])\n\n\ndef assembling(res: dict):\n path = res['photo_path']\n res['avator'] = image2base64(path)\n res['photo_path'] = ''\n return res\n\n\ndef image2base64(img_path: str):\n try:\n with open(img_path, 'rb') as f:\n return base64.b64encode(f.read()).decode()\n except:\n return None\n\n\ndef args2dict(args: dict):\n params = {}\n if args.get('nickname'):\n params['nickname'] = args.get('nickname')\n if args.get('email'):\n params['email'] = args.get('email')\n if args.get('blog'):\n params['blog'] = args.get('blog')\n if args.get('github'):\n params['github'] = args.get('github')\n return params\n","sub_path":"flaskr/cqupt/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244355472","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 15 21:41:01 2019\r\n\r\n@author: vr_lab\r\n\"\"\"\r\nimport pdb\r\nimport read_tetgen\r\n\r\n\r\n# arg:\r\n# filename: the output file you want to write to\r\n# points: a list of 3D points or 1D points\r\n# write_flag: whether to rewrite ('w') or append('a')\r\ndef write_to_file(filename, points, write_flag):\r\n if write_flag == 'write':\r\n output_file = open(filename, 'w')\r\n elif write_flag == 'append':\r\n output_file = open(filename, 'a+')\r\n else:\r\n return\r\n for index, point in enumerate(points):\r\n if len(point) == 3:\r\n output_file.write(str(point[0]) + ' ' + str(point[1]) + \r\n ' ' + str(point[2]) + '\\n')\r\n if len(point) == 1:\r\n output_file.write(str(point[0]) + '\\n')\r\n output_file.close()\r\n\r\nif __name__ == \"__main__\":\r\n elefile = ''\r\n nodefile = 'Skin_Layer.node'\r\n surfacefile = 'Skin_Layer.face'\r\n triangles_unity = 'Skin_Layer_unity.face'\r\n nodefile_unity = 'Skin_Layer_unity.node'\r\n tet_gen = read_tetgen.ReadTetGen(elefile, nodefile)\r\n triangles = tet_gen.read_surface(surfacefile)\r\n coords = tet_gen.read_coordinates()\r\n write_to_file(triangles_unity, triangles, 'write')\r\n write_to_file(nodefile_unity, coords, 'write')","sub_path":"ChangeAbaqusInput/convert_tetgen_to_unity.py","file_name":"convert_tetgen_to_unity.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"409051491","text":"from odoo import models\nimport datetime\nfrom datetime import date, datetime, time, timedelta\nfrom pytz import timezone\nimport xlsxwriter\n\n\n\nclass ReportDeliverySlipExcel(models.AbstractModel):\n _name = 'report.surgi_delivery_slip_report_xlsx.delivery_slip_report'\n _inherit = 'report.report_xlsx.abstract'\n\n def generate_xlsx_report(self, workbook, data, partners):\n\n x=0\n for par in partners:\n yy=str('sheet'+str(x))\n worksheet = workbook.add_worksheet(yy[:31])\n x+=1\n header_format = workbook.add_format({\n 'font_size': 16,\n 'border': 1,\n 'align': 'left',\n 'font_color': 'black',\n 'bold': True,\n 'valign': 'vcenter',\n 'border_color': 'black',\n 'fg_color': '#C0C0C0'})\n header_format2 = workbook.add_format({\n 'font_size': 18,\n 'border': 1,\n 'align': 'center',\n 'font_color': 'white',\n 'bold': True,\n 'valign': 'vcenter',\n 'border_color': 'black',\n 'fg_color': '#C0C0C0'})\n header_format3 = workbook.add_format({\n 'font_size': 14,\n 'border': 1,\n 'align': 'left',\n 'font_color': 'black',\n 'bold': True,\n 'valign': 'vcenter',\n })\n\n # worksheet.left_to_right()\n worksheet.set_column('A:A', 40)\n worksheet.set_column('B:B', 40)\n worksheet.set_column('C:C', 40)\n worksheet.set_column('D:D', 30)\n worksheet.set_column('E:E', 30)\n worksheet.set_column('G:G', 50)\n worksheet.set_column('F:F', 50)\n worksheet.set_column('H:H', 50)\n worksheet.set_column('I:I', 20)\n worksheet.set_column('J:J', 20)\n worksheet.set_column('K:K', 20)\n worksheet.set_column('L:L', 20)\n worksheet.set_column('M:M', 20)\n\n\n\n domain = []\n worksheet.set_row(0, 40)\n worksheet.set_default_row(25)\n\n\n\n worksheet.merge_range('B1:D1', 'Delivery Slip Report', header_format2)\n\n worksheet.write('C3', par.name, header_format)\n\n worksheet.write('B5', 'Partner', header_format)\n worksheet.write('B6', par.partner_id.name, header_format3)\n worksheet.write('C5', 'Effective Date', header_format)\n\n date_done=''\n if par.date_done:\n date_done = datetime.strptime(str(par.date_done).split(\".\")[0],\n '%Y-%m-%d %H:%M:%S')\n worksheet.write('C6', str(date_done), header_format3)\n\n worksheet.write('A8', 'Product', header_format)\n worksheet.write('B8', 'Lot/Serial Number', header_format)\n worksheet.write('C8', 'Done', header_format)\n number = 1\n row = 8\n col=0\n for line in par.move_line_nosuggest_ids:\n if line.product_id:\n product=\"\"\n if line.product_id.default_code:\n product=\"[\"+line.product_id.default_code+\"] \"+line.product_id.name\n else:\n product= line.product_id.name\n worksheet.write(row, col, str(product), header_format3)\n if line.lot_id:\n worksheet.write(row, col+1, str(line.lot_id.name), header_format3)\n if line.qty_done:\n worksheet.write(row, col+2, str(line.qty_done), header_format3)\n\n number += 1\n row += 1\n","sub_path":"surgi_delivery_slip_report_xlsx/models/delivery_slip_report.py","file_name":"delivery_slip_report.py","file_ext":"py","file_size_in_byte":3768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12494451","text":"import numpy as np\nimport tensorflow as tf\n\n\nclass Policy:\n def __init__(self, actions, n_state=4, lr=0.01, reward_decay=0.95, n_l1=40):\n self.epsilon = 0.9\n self.actions = actions\n self.n_state = n_state\n self.n_actions = len(self.actions)\n self.n_l1 = n_l1\n self.lr = lr\n self.gamma = reward_decay\n\n self.reward_v, self.action_v, self.state_v = [], [], []\n\n self.build_net()\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.cost_his = []\n\n def build_net(self):\n self.states = tf.placeholder(tf.float32, [None, self.n_state], name=\"state\")\n self.act = tf.placeholder(tf.int32, [None, self.n_actions], name=\"actions_num\")\n self.tf_vt = tf.placeholder(tf.float32, [None, 1], name=\"actions_value\")\n with tf.variable_scope(\"l1\"):\n l1 = tf.layers.dense(self.states, self.n_l1, activation=tf.nn.relu,\n kernel_initializer=tf.random_normal_initializer(mean=0., stddev=0.03),\n bias_initializer=tf.constant_initializer(0.01), name=\"l1\")\n with tf.variable_scope(\"out\"):\n out = tf.layers.dense(l1, self.n_actions,\n kernel_initializer=tf.random_normal_initializer(mean=0., stddev=0.03),\n bias_initializer=tf.constant_initializer(0.01), name=\"out\")\n self.act_pro = tf.nn.softmax(out, name=\"act_pro\")\n with tf.name_scope(\"loss\"):\n log_pro = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.act_pro, labels=self.act)\n self.loss = tf.reduce_mean(log_pro * self.tf_vt)\n with tf.name_scope(\"train\"):\n self.train_step = tf.train.AdamOptimizer(self.lr).minimize(self.loss)\n\n def conver_state(self, state):\n row = (int(state / 10) - 2) / 2\n col = (state % 10 - 2) / 2\n return [row, col]\n\n def store_transition(self, state, action, reward, other_state):\n s = self.conver_state(state) + self.conver_state(other_state)\n self.state_v.append(s)\n action_index = self.actions.index(action)\n action_v = np.zeros(len(self.actions), dtype=np.float32)\n action_v[action_index] = 1\n self.action_v.append(action_v)\n self.reward_v.append(reward)\n\n def choose_action(self, state, other_state, islern=True):\n if ((np.random.uniform() > self.epsilon) and islern):\n return np.random.choice(self.actions)\n else:\n s = np.hstack((self.conver_state(state), self.conver_state(other_state)))\n s = s.reshape((1, self.n_state))\n action_val = self.sess.run(self.act_pro, feed_dict={self.states: s})\n # print(\"act_pro\")\n # print(action_val)\n # action_index = np.random.choice(range(action_val.shape[1]), p=action_val.ravel())\n action_index = np.argmax(action_val[0])\n action = self.actions[action_index]\n print(\"state=\" + str(state) + \"; other state=\" + str(other_state) + \";action=\" + str(action))\n return action\n\n def clear_storage(self):\n self.reward_v, self.action_v, self.state_v = [], [], []\n\n def lern(self):\n s_v = np.array(self.state_v)\n a_v = np.array(self.action_v)\n # r_v = np.vstack(self.reward_v)\n r_v = self._discount_and_norm_rewards()\n # print(\"s_v\")\n # print(s_v)\n # print(\"a_v\")\n # print(a_v)\n # print(\"r_v\")\n # print(r_v)\n _, cost = self.sess.run([self.train_step, self.loss],\n feed_dict={self.states: s_v, self.act: a_v,\n self.tf_vt: r_v})\n self.clear_storage()\n print(\"loss=\" + str(cost))\n # print(q_target)\n self.cost_his.append(cost)\n\n def _discount_and_norm_rewards(self):\n # discount episode rewards\n discounted_ep_rs = np.zeros_like(self.reward_v)\n running_add = 0\n for t in reversed(range(0, len(self.reward_v))):\n running_add = running_add * self.gamma + self.reward_v[t]\n discounted_ep_rs[t] = running_add\n\n # normalize episode rewards\n discounted_ep_rs = discounted_ep_rs.astype(np.float32)\n mean = np.mean(discounted_ep_rs)\n # print(\"mean=\" + str(mean))\n discounted_ep_rs -= mean\n std = np.std(discounted_ep_rs)\n # print(\"std=\" + str(std))\n if (std != 0.):\n discounted_ep_rs /= std\n return np.vstack(discounted_ep_rs)\n\n def plot_cost(self):\n import matplotlib.pyplot as plt\n plt.plot(np.arange(len(self.cost_his)), self.cost_his)\n plt.ylabel('Cost')\n plt.xlabel('training steps')\n plt.show()\n","sub_path":"RL_Policy.py","file_name":"RL_Policy.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"165952031","text":"import time \nimport sys\nimport pandas as pd\nimport numpy as np\nimport Utils\nfrom Utils import *\n\n\ndfList = []\ndfTables = []\ngraphs = []\ndfInfos = []\nnotifications = []\n\ndef Process(unit):\n if(unit is not None):\n unit = int(unit)\n if unit == 1: \n String()\n elif unit == 2:\n Numpy()\n return\n\ndef String():\n global dfInfos, dfList, graphs, notifications\n dataframer = DataFramer()\n htmlHelper = HTMLHelper()\n\n original = \"Welcome to String Operations\"\n notifications.append(\"Original String \" + original)\n welcome = original[:7]\n notifications.append(\"original[:7] : \" + welcome)\n strin = original[11:][0:6]\n notifications.append(\"original[11:][0:6] : \" + strin)\n wel2 = original[::2]\n notifications.append(\"original[::2] : \" + wel2)\n \n numbers = \"0123456789\"\n notifications.append(\"Numbers \" + numbers)\n evens = numbers[::2]\n notifications.append(\"evens numbers[::2] : \" + evens)\n odds = numbers[1::2]\n notifications.append(\"odds numbers[1::2] : \" + odds)\n\n likes = \"Sammy likes to swim in the ocean, likes to spin up servers, and likes to smile.\"\n notifications.append(\"likes is \" + likes)\n notifications.append(\"likes.count('likes') is \" + str(likes.count(\"likes\")))\n notifications.append(\"likes.find('likes') is \" + str(likes.find(\"likes\")))\n notifications.append(\"likes.upper() is \" + likes.upper())\n notifications.append(\"likes.lower() is \" + likes.lower())\n return\n\ndef Numpy():\n helper = Helper()\n # Create a python list\n a = [\"0\", 1, \"two\", \"3\", 4]\n\n # Print each element\n print(\"a[0]:\", a[0])\n print(\"a[1]:\", a[1])\n print(\"a[2]:\", a[2])\n print(\"a[3]:\", a[3])\n print(\"a[4]:\", a[4])\n\n helper.PrintNumpy(np.array(a))\n\n # Create numpy array\n c = np.array([20, 1, 2, 3, 4])\n # Assign the first element to 100\n c[0] = 100\n # Assign the 5th element to 0\n c[4] = 0\n helper.PrintNumpy(c)\n\n # Slicing the numpy array\n d = c[1:4]\n # Set the fourth element and fifth element to 300 and 400\n c[3:5] = 300, 400\n helper.PrintNumpy(d)\n # Create the index list\n select = [0, 2, 3]\n # Use List to select elements\n d = c[select]\n # Assign the specified elements to new value\n c[select] = 100000\n helper.PrintNumpy(d)\n\n # Create a numpy array\n a2 = np.array([0, 1, 2, 3, 4])\n helper.PrintNumpy(a2, describe = True)\n \n # Create a numpy array\n a2 = np.array([1, -1, 1, -1, 2, 3, 4, 5])\n helper.PrintNumpy(a2, True, True) \n\n u = np.array([1, 0])\n v = np.array([0, 1])\n # Numpy Array Addition\n z = u + v\n helper.PrintNumpy(z, True, True) \n \n # Plot numpy arrays\n #Plotvec1(u, z, v)\n\n # Create a numpy array\n y = np.array([1, 2])\n # Numpy Array Multiplication\n z2 = 2 * y\n helper.PrintNumpy(z2, True, True) \n \n # Create a numpy array\n u2 = np.array([1, 2])\n v2 = np.array([3, 2])\n # Calculate the production of two numpy arrays\n z3 = u2 * v2\n helper.PrintNumpy(z3, True, True) \n\n # Calculate the dot product\n npdot = np.dot(u2, v2)\n print(\"dot product is \" + str(npdot))\n \n # Create a constant to numpy array\n u4 = np.array([1, 2, 3, -1])\n # Add the constant to array\n u5 = u4 + 1\n helper.PrintNumpy(u5, True, True) \n\n # Create the numpy array in radians\n x2 = np.array([0, np.pi/2 , np.pi])\n helper.PrintNumpy(x2, True, True) \n # Calculate the sin of each elements\n y2 = np.sin(x2)\n helper.PrintNumpy(y2, True, True) \n\n print(\"Makeup a numpy array within [-2, 2] and 5 elements\")\n nlin = np.linspace(-2, 2, num=5)\n helper.PrintNumpy(nlin, True, True) \n\n print(\"Makeup a numpy array within [-2, 2] and 9 elements\")\n nlin2 = np.linspace(-2, 2, num=9)\n helper.PrintNumpy(nlin2, True, True)\n \n print(\"Makeup a numpy array within [0, 2π] and 100 elements\") \n nlin3 = np.linspace(0, 2*np.pi, num=100)\n helper.PrintNumpy(nlin3, True, True)\n \n # Plot the result\n #plt.plot(x, y)\n\n return\n\ndef Plotvec1(u, z, v):\n ax = plt.axes()\n ax.arrow(0, 0, *u, head_width=0.05, color='r', head_length=0.1)\n plt.text(*(u + 0.1), 'u')\n \n ax.arrow(0, 0, *v, head_width=0.05, color='b', head_length=0.1)\n plt.text(*(v + 0.1), 'v')\n ax.arrow(0, 0, *z, head_width=0.05, head_length=0.1)\n plt.text(*(z + 0.1), 'z')\n plt.ylim(-2, 2)\n plt.xlim(-2, 2)\n\ndef Plotvec2(a,b):\n ax = plt.axes()\n ax.arrow(0, 0, *a, head_width=0.05, color ='r', head_length=0.1)\n plt.text(*(a + 0.1), 'a')\n ax.arrow(0, 0, *b, head_width=0.05, color ='b', head_length=0.1)\n plt.text(*(b + 0.1), 'b')\n plt.ylim(-2, 2)\n plt.xlim(-2, 2)","sub_path":"IBMBrain/PyBasics.py","file_name":"PyBasics.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6595527","text":"from PIL import Image, ImageFont, ImageDraw\nimport numpy as np\n\n\ndef genLetter():\n\tboxsize = 100\n\tfontsize = int(boxsize * 0.8)\n\timg = Image.new('RGB', (boxsize, boxsize), color=(255, 255, 255))\n\n\t# get a font\n\tcharacter = 'P'\n\n\tfont = ImageFont.truetype(\"/System/Library/Fonts/Keyboard.ttf\", fontsize)\n\twidth, height = font.getsize(character)\n\n\tx = int((boxsize - width)/2)\n\ty = int((boxsize - height*1.3)/2) # Need to adjust for font height: https://websemantics.uk/articles/font-size-conversion/\n\n\td = ImageDraw.Draw(img)\n\td.text( (x,y) , character, fill=(0, 0, 0), font=font)\n\n\t# Flood file for masking.\n\tImageDraw.floodfill(img, xy=(0, 0), value=(255, 0, 255), thresh=200) # https://stackoverflow.com/questions/46083880/fill-in-a-hollow-shape-using-python-and-pillow-pil\n\n\t# Fill in holes.\n\tn = np.array(img)\n\tn[(n[:, :, 0:3] != [255, 0, 255]).any(2)] = [0, 0, 0]\n\t# Revert all artifically filled magenta pixels to white\n\tn[(n[:, :, 0:3] == [255,0,255]).all(2)] = [255,255,255]\n\n\treturn(n)\n\n\n\nif __name__ == '__main__':\n\tn = genLetter()\n\n\timg = Image.fromarray(n)\n\timg.save('pil_text.png')\n\timg.show()\n","sub_path":"PycharmProjects/geodesic/FilledPolygon.py","file_name":"FilledPolygon.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534863410","text":"def day_number(name):\n\tdays = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\",\n\t\t\t\"Saturday\"]\n\tif name not in days:\n\t\treturn None\n\treturn days.index(name)\n\n\ndef day_add(name, number):\n\tdays = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\",\n\t\t\t\"Saturday\"]\n\tday_num = day_number(name)\n\treturn days[(day_num + number) % 7]\n\n\nprint(day_add(\"Monday\", 4))\nprint(day_add(\"Tuesday\", 0))\nprint(day_add(\"Tuesday\", 14))\nprint(day_add(\"Sunday\", 100))\n","sub_path":"Books/Python/How to Think Like a Computer Scientist - Chris Meyers/exercise_answers/ch04/day_add.py","file_name":"day_add.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37389834","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nfrom zvt.contract.api import df_to_db, get_data\nfrom zvt.contract.recorder import TimeSeriesDataRecorder\nfrom zvt.utils.time_utils import now_pd_timestamp, to_time_str\nfrom zvt.domain import Stock, StockValuation, Etf, StockValuationNew, StockTradeDay\n\ntry:\n from EmQuantAPI import *\nexcept:\n pass\n\nclass JqChinaStockValuationRecorder(TimeSeriesDataRecorder):\n entity_provider = 'emquantapi'\n entity_schema = Stock\n\n # 数据来自jq\n provider = 'emquantapi'\n\n # data_schema = StockValuationNew\n data_schema = StockValuation\n\n def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,\n force_update=False, sleeping_time=5, default_size=2000, real_time=False, fix_duplicate_way='add',\n start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:\n super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,\n default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,\n close_minute)\n # 调用登录函数(激活后使用,不需要用户名密码)\n loginResult = c.start(\"ForceLogin=1\", '')\n if (loginResult.ErrorCode != 0):\n print(\"login in fail\")\n exit()\n\n def record2(self, entity, start, end, size, timestamps):\n if not end:\n end = to_time_str(now_pd_timestamp())\n if (pd.to_datetime(end) - start).days >= 30:\n from datetime import timedelta\n end = to_time_str(start + timedelta(days=30))\n start = to_time_str(start)\n if start == end:\n return None\n # 暂不处理港股\n if 'hk' in entity.id:\n return None\n exchange = 'SH' if 'sh' in entity.id else 'SZ'\n em_code = entity.code + '.' + exchange\n columns_list = {\n 'TOTALSHARE': 'capitalization', # 总股本\n 'LIQSHARE': 'circulating_cap', # 流通股本\n 'MV': 'market_cap', # 总市值\n 'LIQMV': 'circulating_market_cap', # 流通市值\n 'TURN': 'turnover_ratio', # 换手率\n 'PELYR': 'pe', # 静态pe\n 'PETTM': 'pe_ttm', # 动态pe\n 'PBLYR': 'pb', # 市净率PB(最新年报)\n 'PBMRQ': 'pb_mrq', # 市净率PB(MRQ)\n 'PSTTM': 'ps_ttm', # 市销率PS(TTM)\n 'PCFTTM': 'pcf_ttm', # 市现率PCF(最新年报,经营性现金流)\n }\n # df = c.csd(em_code, [i for i in columns_list.keys()], start,end,\"ispandas=1,DelType=2\")\n df = get_data(data_schema=StockValuation, entity_id=entity.id, provider='joinquant', start_timestamp=start,\n end_timestamp=end)\n if df.empty:\n df = get_data(data_schema=StockValuation, entity_id=entity.id, provider='joinquant', limit=1)\n start = df.timestamp[0]\n end = to_time_str(start + timedelta(days=30))\n df = get_data(data_schema=StockValuation, entity_id=entity.id, provider='joinquant', start_timestamp=start,\n end_timestamp=end)\n if df.empty:\n return None\n df.rename(columns={\n \"ps\": \"ps_ttm\",\n \"pcf\": \"pcf_ttm\",\n }, inplace=True)\n trade_day = StockTradeDay.query_data(order=StockTradeDay.timestamp.desc(), start_timestamp=start,\n end_timestamp=end)\n df_capital_all = pd.DataFrame()\n for tradeday in trade_day.timestamp:\n df_capital = c.css(em_code, \"WACC,DIVIDENDYIELDNEW\",\n f\"TradeDate={to_time_str(tradeday)},FrIndex=1,MrIndex=1,ispandas=1\")\n try:\n df_capital['DATES'] = tradeday\n except:\n continue\n df_capital_all = df_capital_all.append(df_capital)\n # 'DIVIDENDYIELDNEW': 'div_yield', #股息率\n\n try:\n if df.empty:\n return None\n except:\n self.logger.info(f'choice数据源的个股估值尚未准备完成,获取失败。'\n f'股票代码:{em_code}-开始时间:{start}-结束时间:{end}')\n return None\n df['CODES'] = df_capital_all.index[0]\n df['DATES'] = df['timestamp']\n df_capital_all['DATES'] = pd.to_datetime(df_capital_all['DATES'])\n\n df_capital_all.rename(columns={\"DIVIDENDYIELDNEW\": \"div_yield\", \"WACC\": \"wacc\"}, inplace=True)\n df = pd.merge(df, df_capital_all, on=['CODES', 'DATES'], how='outer')\n df.dropna(subset=['id'],inplace=True)\n df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)\n\n return None\n\n def record(self, entity, start, end, size, timestamps):\n if not end:\n end = to_time_str(now_pd_timestamp())\n if (pd.to_datetime(end) - start).days >=800:\n from datetime import timedelta\n end = to_time_str(start+timedelta(days=800))\n start = to_time_str(start)\n if start == end:\n return None\n # 暂不处理港股\n if 'hk' in entity.id:\n return None\n exchange = 'SH' if 'sh' in entity.id else 'SZ'\n em_code = entity.code+'.'+exchange\n columns_list = {\n 'TOTALSHARE': 'capitalization', # 总股本\n 'LIQSHARE': 'circulating_cap', # 流通股本\n 'MV': 'market_cap', #总市值\n 'LIQMV': 'circulating_market_cap', #流通市值\n 'TURN': 'turnover_ratio', #换手率\n 'PELYR': 'pe', # 静态pe\n 'PETTM': 'pe_ttm', # 动态pe\n 'PBLYR': 'pb', # 市净率PB(最新年报)\n # 'PBMRQ': 'pb_mrq', # 市净率PB(MRQ)\n # 'PSTTM': 'ps_ttm', #市销率PS(TTM)\n 'PCFTTM': 'pcf_ttm', #市现率PCF(最新年报,经营性现金流)\n # 'DIVIDENDYIELD': 'div_yield', #股息率\n }\n\n df = c.csd(em_code, [i for i in columns_list.keys()], start,end,\"ispandas=1,DelType=2\")\n try:\n if df.empty:\n return None\n except:\n self.logger.info(f'choice数据源的个股估值尚未准备完成,获取失败。'\n f'股票代码:{em_code}-开始时间:{start}-结束时间:{end}')\n return None\n df.rename(columns=columns_list,inplace=True)\n df['entity_id'] = entity.id\n df['timestamp'] = pd.to_datetime(df['DATES'])\n df['code'] = entity.code\n df['name'] = entity.name\n df['turnover_ratio'] = df['turnover_ratio'] / 100\n df['id'] = df['timestamp'].apply(lambda x: \"{}_{}\".format(entity.id, to_time_str(x)))\n\n df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update)\n\n return None\n\n\n__all__ = ['JqChinaStockValuationRecorder']\n\nif __name__ == '__main__':\n # 上证50\n df = Etf.get_stocks(code='510050')\n stocks = df.stock_id.tolist()\n print(stocks)\n print(len(stocks))\n\n JqChinaStockValuationRecorder(entity_ids=stocks, force_update=True).run()\n","sub_path":"zvt/recorders/emquantapi/fundamental/stock_valuation_recorder.py","file_name":"stock_valuation_recorder.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"143611026","text":"from TAS_Data_reading import choose_dataset\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# -------------------------- Spline definition --------------------------\r\ndef S(x, xj, a, b, c, d):\r\n return a + b * (x-xj) + c * (x-xj)**2 + d * (x-xj)**3\r\n\r\ndef system_matrix(grid):\r\n N = len(grid)-1\r\n h = grid[1:] - grid[:-1] # width of interval i - vector size N\r\n A = np.zeros((N+1,N+1))\r\n ### Equations describing conditions at interior nodes\r\n for i in range(1, N):\r\n A[i,i] = 2*(h[i-1]+h[i])\r\n A[i,i-1] = h[i-1]\r\n A[i,i+1] = h[i]\r\n return A\r\n\r\ndef rhs(grid, fi): ### RHS - interior points\r\n N = len(grid)-1\r\n h = grid[1:] - grid[:-1] # width of interval i - vector size N\r\n rhs = np.zeros(N+1)\r\n for i in range(1, N):\r\n rhs[i] = 3.*(fi[i+1] - fi[i])/h[i] - 3.*(fi[i] - fi[i-1])/h[i-1]\r\n return rhs\r\n\r\ndef spline_natural(xi, fi, xx):\r\n \"\"\"\r\n One-shot function for spline interpolation (with natural BCs).\r\n \r\n Args:\r\n xi (array, n+1): Sample locations\r\n fi (array, n+1): Sample values\r\n xx (array, M): Reconstuction locations\r\n Return:\r\n ff (array, M): Reconstructed values at xx\r\n \"\"\"\r\n h = xi[1:] - xi[:-1] # Interval width\r\n N = len(h)\r\n ### Setup system\r\n A = system_matrix(xi) # Left-hand side \r\n frhs = rhs(xi, fi) # Right-hand side\r\n A[0,0] = A[N,N] = 1 # BC for LHS (natural)\r\n frhs[0] = 0 # BC for RHS (natural)\r\n frhs[-1] = 0 \r\n ### Solve system for coefficients\r\n c = np.linalg.solve(A, frhs)\r\n a = fi[:] # N+1\r\n b = (a[1:] - a[:-1]) / h[:] - h[:]/3. * (2*c[:-1] + c[1:]) # N\r\n d = (c[1:] - c[:-1]) / (3. * h[:]) # N\r\n ### Reconstuct spline at locations xx \r\n ii = np.digitize(xx, xi) # Find to which interval each xx belongs\r\n ii = np.fmin(np.fmax(ii-1,0),N-1) \r\n ff = np.zeros(xx.shape)\r\n for j, i in enumerate(ii): # Compute spline for each x\r\n ff[j] = S(xx[j], xi[i], a[i], b[i], c[i], d[i])\r\n return ff\r\n\r\n\r\n# -------------------------- Spline implementation and plotting --------------------------\r\ndef spline(filename, element_number, xx, t_start, t_end, plotting = False):\r\n\r\n data = choose_dataset(filename)[element_number] \r\n xi, fi = np.array([data[i][0] for i in range(1, len(data))]), np.array([data[i][1] for i in range(1, len(data))])\r\n\r\n spline = spline_natural(xi, fi, xx)\r\n\r\n if plotting:\r\n plt.title('Element ' + data[0]); plt.xlabel(\"Time [s]\"); plt.ylabel(\"Temperature [°C]\")\r\n plt.plot(xi, fi, label='Experimental data', marker='o'); plt.plot(xx, spline , label='Spline reconstruction')\r\n plt.legend(loc='best'); plt.xlim(t_start, t_end)\r\n plt.show()\r\n\r\n return spline\r\n\r\n\r\n# The following command just makes sure that whetever is inside of this is run only when this file is run. \r\n# For example, if I was to import this file (Interpolation.py) into another file, whetever is inside of this loop would not be run. \r\nif __name__ == '__main__': \r\n\r\n # Possible filenames: 'AB2_data/AB2/Baffle_surfaces.csv', 'AB2_data/AB2/Mirror_segments.csv', 'AB2_data/AB2/Secondary_mirror.csv'\r\n # Might have to change directory based on where you saved file on own your computer\r\n filename, element_number = 'AB2_data/AB2/Baffle_surfaces.csv', 2\r\n t_start, t_end, t_steps = 0, 15000, 10001 # Set boundary for graph and spline evaluation\r\n xx = np.linspace(t_start, t_end, t_steps) # Set datapoints to evaluate splne at\r\n \r\n spline = spline(filename, element_number, xx, t_start, t_end, plotting = False)\r\n","sub_path":"Groundtrack/Interpolation.py","file_name":"Interpolation.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332254252","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nアンダースコアで始まる名前を持つ要素以外の要素を収集し、\nそのdocstringか、型を表示する。\n\"\"\"\n\nclass API(object):\n def _print_values(self, obj):\n def _print_value(key):\n if key.startswith('_'):\n return ''\n value = getattr(obj, key)\n if not hasattr(value, 'im_func'):\n doc = type(value).__name__\n else:\n if value.__doc__ is None:\n doc = 'no docstring'\n else:\n doc = value.__doc__\n return '%s : %s' % (key, doc)\n res = [_print_value(el) for el in dir(obj)]\n return ' '.join([el for el in res if el != ''])\n\n def __get__(self, instance, klass):\n if instance is not None:\n return self._print_values(instance)\n else:\n return self._print_values(klass)\n\nclass MyClass(object):\n __doc__ = API()\n def __init__(self):\n self.a = 2\n self._b = 98\n def meth(self):\n \"\"\"my method\"\"\"\n return 1\n def hey(self):\n return 2\n\nif __name__ == '__main__':\n MyClass.__doc__\n instance = MyClass()\n print(instance.__doc__)\n","sub_path":"expert/introDescripter.py","file_name":"introDescripter.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351680789","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nfrom pyowm.exceptions import parse_response_error, api_call_error\nfrom pyowm.commons.http_client import HttpClient\n\n\nclass TestHTTPClient(unittest.TestCase):\n\n instance = HttpClient()\n\n def test_get_json_against_httpbin_ok(self):\n # https://httpbin.org/ip\n status, data = self.instance.get_json('http://httpbin.org/ip')\n self.assertEqual(200, status)\n self.assertIsInstance(data, dict)\n\n def test_get_json_against_httpbin_status_code_ko(self):\n # https://httpbin.org/status/400\n expected_status = 400\n\n self.assertRaises(api_call_error.APICallError, HttpClient.get_json,\n self.instance, 'https://httpbin.org/status/' +\n str(expected_status))\n\n def test_get_json_against_httpbin_parse_error(self):\n # https://httpbin.org/xml\n try:\n status, data = self.instance.get_json('http://httpbin.org/xml')\n self.fail()\n except parse_response_error.ParseResponseError:\n pass\n\n def test_put_against_httpbin(self):\n # https://httpbin.org/put\n formdata = dict(a=1, b=2, c=3)\n status, data = self.instance.put('http://httpbin.org/put', data=formdata)\n self.assertEqual(200, status)\n self.assertIsInstance(data, dict)\n self.assertEquals(formdata, data['json'])\n\n def test_delete_against_httpbin(self):\n # https://httpbin.org/delete\n formdata = dict(a=1, b=2, c=3)\n status, data = self.instance.delete('http://httpbin.org/delete', data=formdata)\n self.assertEqual(200, status)\n self.assertIsInstance(data, dict)\n self.assertEquals(formdata, data['json'])\n","sub_path":"tests/integration/commons/test_http_client.py","file_name":"test_http_client.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349448164","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass Encoder(nn.Module):\n\n\tdef __init__(self):\n\n\t\tsuper(Encoder, self).__init__()\n\t\tself.conv1 = nn.Conv2d(1, 1024, 3, padding=1, stride=2)\n\t\tself.conv2 = nn.Conv2d(1024, 512, 3, padding=1)\n\t\tself.conv3 = nn.Conv2d(512, 256, 3, padding=1)\n\t\tself.conv4 = nn.Conv2d(256, 512, 3, padding=1)\n\t\tself.conv5 = nn.Conv2d(512, 1024, 3, padding=1) \n\t\tself.conv6 = nn.Conv2d(1024, 1024, 3, padding=1, stride=2)\n\t\tself.conv7 = nn.Conv2d(1024, 2048, 3, padding=1, stride=2)\n\t\tself.bn1 = nn.BatchNorm2d(1024)\n\t\tself.bn2 = nn.BatchNorm2d(512)\n\t\tself.bn3 = nn.BatchNorm2d(256)\n\t\tself.bn4 = nn.BatchNorm2d(512)\n\t\tself.bn5 = nn.BatchNorm2d(1024)\n\t\tself.bn6 = nn.BatchNorm2d(1024)\n\t\tself.bn7 = nn.BatchNorm2d(2048)\n\n\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m,nn.Conv2d) or isinstance(m, nn.Linear):\n\t\t\t\tprint('Initializing', m)\n\t\t\t\tnn.init.xavier_normal_(m.weight)\n\t\n\tdef forward(self, x):\n\n\t\t#print('ENCODER')\n\n\t\tout = self.bn1(F.leaky_relu(self.conv1(x)))\n\n\t\t#print('Conv1:', out.shape)\n\n\t\tout = self.bn2(F.leaky_relu(self.conv2(out)))\n\n\t\t#print('Conv2: ', out.shape)\n\n\t\tout = self.bn3(F.leaky_relu(self.conv3(out)))\n\n\t\t#print('Conv3: ', out.shape)\n\n\t\tout = self.bn4(F.leaky_relu(self.conv4(out)))\n\n\t\t#print('Conv4: ', out.shape)\n\t\tout = self.bn5(F.leaky_relu(self.conv5(out)))\n\n\t\tout = self.bn6(F.leaky_relu(self.conv6(out)))\n\t\t\n\t\tout = self.bn7(F.leaky_relu(self.conv7(out)))\n\t\t\n\t\treturn out\n\n\nclass Decoder(nn.Module):\n\n\tdef __init__(self, out_channels=1):\n\n\t\tsuper(Decoder, self).__init__()\n\t\tself.upsample1 = nn.Upsample(scale_factor=4)\n\t\tself.upsample2 = nn.Upsample(scale_factor=2)\n\n\t\tself.conv1 = nn.Conv2d(2048, 512, 3, padding=1)\n\t\tself.conv2 = nn.Conv2d(512, 512, 3, padding=1)\n\t\tself.conv3 = nn.Conv2d(512, 1024, 3, padding=1)\n\t\tself.conv4 = nn.Conv2d(1024, out_channels, 3, padding=1)\n\t\tself.bn1 = nn.BatchNorm2d(512)\n\t\tself.bn2 = nn.BatchNorm2d(512)\n\t\tself.bn3 = nn.BatchNorm2d(1024)\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m,nn.Conv2d) or isinstance(m, nn.Linear):\n\t\t\t\tprint('Initializing', m)\n\t\t\t\tnn.init.xavier_normal_(m.weight)\n\n\tdef forward(self, x):\n\n\t\t#print('DECODER')\n\n\t\tout = self.bn1(F.relu(self.conv1(x)))\n\t\t#print('Conv1 : ', out.shape)\n\n\t\tout = self.upsample1(out)\n\n\t\tout = self.bn2(F.relu(self.conv2(out)))\n\t\t#print('Conv2: ', out.shape)\n\n\t\tout = self.upsample2(out)\n\n\t\tout = self.bn3(F.relu(self.conv3(out)))\n\t\t#print('Conv3: ', out.shape)\n\n\t\tout = F.relu(self.conv4(out))\n\n\n\t\t#print('Conv4: ', out.shape)\n\n\t\treturn out\n\n\nclass ColorDecoder(nn.Module):\n\n\tdef __init__(self, out_channels=1):\n\n\t\tsuper(ColorDecoder, self).__init__()\n\t\tself.upsample1 = nn.Upsample(scale_factor=4)\n\t\tself.upsample2 = nn.Upsample(scale_factor=2)\n\n\t\tself.conv1 = nn.Conv2d(2048, 256, 3, padding=1)\n\t\tself.conv2 = nn.Conv2d(256, 512, 3, padding=1)\n\t\tself.conv3 = nn.Conv2d(512, 1024, 3, padding=1)\n\t\tself.conv4 = nn.Conv2d(1024, 512, 3, padding=1)\n\t\tself.conv5 = nn.Conv2d(512, 256, 3, padding=1)\n\t\tself.conv6 = nn.Conv2d(256, 128, 3, padding=1)\n\t\tself.conv7 = nn.Conv2d(128, out_channels, 3, padding=1)\n\n\t\tself.bn1 = nn.BatchNorm2d(256)\n\t\tself.bn2 = nn.BatchNorm2d(512)\n\t\tself.bn3 = nn.BatchNorm2d(1024)\n\t\tself.bn4 = nn.BatchNorm2d(512)\n\t\tself.bn5 = nn.BatchNorm2d(256)\n\t\tself.bn6 = nn.BatchNorm2d(128)\n\n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m,nn.Conv2d) or isinstance(m, nn.Linear):\n\t\t\t\tprint('Initializing', m)\n\t\t\t\tnn.init.xavier_normal_(m.weight)\n\n\tdef forward(self, x):\n\n\t\t#print('DECODER')\n\t\tout = self.bn1(F.leaky_relu(self.conv1(x)))\n\t\t#print('Conv1 : ', out.shape)\n\n\t\tout = self.upsample1(out)\n\n\t\tout = self.bn2(F.leaky_relu(self.conv2(out)))\n\t\t#print('Conv2: ', out.shape)\n\n\t\tout = self.upsample2(out)\n\n\t\tout = self.bn3(F.leaky_relu(self.conv3(out)))\n\t\t#print('Conv3: ', out.shape)\n\n\t\tout = self.bn4(F.leaky_relu(self.conv4(out)))\n\n\t\tout = self.bn5(F.relu(self.conv5(out)))\n\n\t\tout = self.bn6(F.relu(self.conv6(out)))\n\n\t\tout = F.relu(self.conv7(out))\n\t\t#print('Conv4: ', out.shape)\n\n\t\treturn out\n\n\nclass Generator(nn.Module):\n\n\tdef __init__(self, train=True):\n\n\t\tsuper(Generator, self).__init__()\n\t\tself.encode = Encoder()\n\n\t\tself.decode_color = ColorDecoder(2)\n\t\t# self.decode_color = self.decode_color.cuda(\"cuda:1\")\n\t\tself.train_stat = train\n\t\tif self.train_stat:\n\t\t\tself.decode1 = Decoder()\n\n\n\tdef forward(self, x):\n\n\t\t# print('Generator')\n\n\t\tout = self.encode(x)\n\t\t# out = out.cuda(\"cuda:1\")\n\t\tout_ab = self.decode_color(out)\n\t\t# out_ab = out_ab.cuda(\"cuda:0\")\n\t\tif self.train_stat:\n\t\t\tout_l = self.decode1(out)\n\t\t\n\t\t\treturn out_l, out_ab\n\n\t\treturn out_ab\n\nclass Discriminator(nn.Module):\n\n\tdef __init__(self, dim):\n\n\t\tsuper(Discriminator, self).__init__()\n\t\t\n\t\tself.conv1 = nn.Conv2d(2, 1024, 3, padding=1,stride=2)\n\t\tself.conv2 = nn.Conv2d(1024, 512, 3, padding=1)\n\t\tself.conv3 = nn.Conv2d(512, 256, 3, padding=1, stride=2)\n\t\tself.conv4 = nn.Conv2d(256, 128, 3, padding=1, stride=2)\n\t\t\n\t\tself.dropout1 = nn.Dropout(p=0.3)\n\t\tself.dropout2 = nn.Dropout(p=0.2) \n\n\t\tself.linear1 = nn.Linear(128 * int(dim/8) * int(dim/8), 100)\n\t\tself.linear2 = nn.Linear(100, 50)\n\t\tself.linear3 = nn.Linear(50, 1)\n\n\t\tself.bn1 = nn.BatchNorm2d(1024)\n\t\tself.bn2 = nn.BatchNorm2d(512)\n\t\tself.bn3 = nn.BatchNorm2d(256)\n\t\tself.bn4 = nn.BatchNorm2d(128) \n\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m,nn.Conv2d) or isinstance(m, nn.Linear):\n\t\t\t\tprint('Initializing', m)\n\t\t\t\tnn.init.xavier_normal_(m.weight)\n\n\tdef forward(self, x):\n\n\t\t# print('Discriminator')\n\n\t\tout = self.bn1(F.leaky_relu(self.conv1(x)))\n\t\t# print(out.shape)\n\t\tout = self.bn2(F.leaky_relu(self.conv2(out)))\n\t\t# print(out.shape)\n\t\tout = self.bn3(F.leaky_relu(self.conv3(out)))\n\t\t# print(out.shape)\n\t\tout = self.bn4(F.leaky_relu(self.conv4(out)))\n\t\t# print('conv4', out.shape)\n\t\t# print(x.shape[0])\n\t\tout = out.view(x.shape[0], -1)\n\t\t# print('reshaped ', out.shape)\n\t\tout = F.leaky_relu(self.linear1(out))\n\n\t\tout = self.dropout1(out)\n\t\tout = F.leaky_relu(self.linear2(out))\n\t\tout = self.dropout2(out)\n\t\tout = F.sigmoid(self.linear3(out))\n\t\treturn out\n\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\ndef test_net():\n\tCUDA = torch.cuda.is_available()\n\tgenerator = Generator()\n\tdiscriminator = Discriminator(128)\n\n\tprint('Generator Params', count_parameters(generator))\n\tprint('Discriminator Params', count_parameters(discriminator))\n\ttensor = torch.randn(5, 1, 128, 128)\n\t\n\tif CUDA :\n\t\tgenerator = generator.cuda()\n\t\tdiscriminator = discriminator.cuda()\n\t\ttensor = tensor.cuda()\n\t\t# val = input()\n\t \n\tout_l, out_ab = generator(tensor)\n\t\n\tprint(out_l.shape)\n\tprint(out_ab.shape)\n\n\tdisc_out = discriminator(out_ab)\n\n\tprint(disc_out.shape)\n\nif __name__ == '__main__':\n\ttest_net()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337595091","text":"'''\nDefinition and example of super stack below the code\n'''\nclass SuperStack:\n def __init__(self):\n self.stack = []\n\n def push(self, element):\n data = [element, 0]\n self.stack.append(data)\n return self.top()\n\n def pop(self):\n last = self.stack.pop()\n if len(self.stack) > 0:\n self.stack[-1][1] += last[1]\n return self.top()\n\n def inc(self, x, d):\n self.stack[x - 1][1] += d\n return self.top()\n\n def top(self):\n if len(self.stack) > 0:\n print(\"%s\" %sum(self.stack[-1]))\n return sum(self.stack[-1])\n else:\n print(\"Empty!\")\n return None\n\nss = SuperStack()\nss.push(4)\nss.pop()\nss.push(3)\nss.push(5)\nss.push(2)\nss.inc(3, 1)\nss.pop()\nss.push(1)\nss.inc(2, 2)\nss.push(4)\nss.pop()\nss.pop()\n\n\n\n'''\nYou are given an empty stack. Your task is to perform following three operations:\npush a: Push an integer a, to the top of the stack\npop: Pop the top element from the stack. It is guaranteed that stack is not empty, when performing the pop operation.\ninc x d: Add d to bottom x elements of the stack.\n \nAfter each operation, print the top element of the stack, if after an operation, the stack becomes empty, then print EMPTY.\n \nInput Format:\nThe first line of the input is n, total number of operations performed on the stack. Each of the next n lines is one of the three operations listed above.\n \nConstraints:\n0 ≤ n ≤ 2 x 105\n-109 ≤ a, d ≤ 109\n1 ≤ x ≤ size of the stack at the time of the operation\nThere is no pop operation when the stack is empty.\n \nOutput Format:\nFor each operation, output the top element of the stack. If after an operation, the stack becomes empty, print EMPTY.\n \nSample Input\n12\npush 4\npop\npush 3\npush 5\npush 2\ninc 3 1\npop\npush 1\ninc 2 2\npush 4\npop\npop\n \nSample Output\n4\nEMPTY\n3\n5\n2\n3\n6\n1\n1\n4\n1\n8\n \nExplanation\nAssume S is the stack, initially S = []. Also, the leftmost element in the S is the bottom most element and the right most element is the top element of the stack.\n \npush 4: We push 4 on the top of the stack, so the stack is now S = [4]. The top element is 4, so we print 4 after this operation.\npop: We pop the top element from the stack, so the stack is now S = []. The stack is empty, so we print EMPTY after this operation.\npush 3: We push 3 on the top of the stack, so the stack is now S = [3]. The top element is 3, so we print 3 after this operation.\npush 5: We push 5 on the top of the stack, so the stack is now S = [3, 5]. The top element is 5, so we print 5 after this operation.\npush 2: We push 2 on the top of the stack, so the stack is now S = [3, 5, 2]. The top element is 2, so we print 2 after this operation.\ninc 3 1: We add 1 to bottom 3 elements of the stack, so the stack is now S = [4, 6, 3]. The top element is 3, so we print 3 after this operation.\npop: We pop the top element from the stack, so the stack is now S = [4, 6]. The top element is 6, so we print 6 after this operation.\npush 1: We push 1 on the top of the stack, so the stack is now S = [4, 6, 1]. The top element is 1, so we print 1 after this operation.\ninc 2 2: We add 2 to bottom 2 elements of the stack, so the stack is now S = [6, 8, 1]. The top element is 1, so we print 1 after this operation.\npush 4: We push 4 on the top of the stack, so the stack is now S = [6, 8, 1, 4]. The top element is 4, so we print 4 after this operation.\npop: We pop the top element from the stack, so the stack is now S = [6, 8, 1]. The top element is 1, so we print 1 after this operation.\npop: We pop the top element from the stack, so the stack is now S = [6, 8]. The top element is 8, so we print 8 after this operation.\n'''","sub_path":"IK/LL, Queues, Stack/super_stack.py","file_name":"super_stack.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224322192","text":"##################################\n### berechnung aller seiten am ###\n### rechtwikligen dreieck ###\n### dreiecksberechnung ver. 02 ###\n### 10.10.2012 ###\n##################################\nimport math\nprint(\"\\nGegenkathete berechnen!\")\nprint(\"- -- -- -- -- -- -- -\")\nankat = input(\"Länge der Ankathete eingeben: \")\nneig = input(\"Winkelmass in °: \")\nankat2 = float(ankat.replace(\",\", \".\"))\nneig2 = float(neig.replace(\",\", \".\"))\ntang = math.tan(neig2*math.pi/180)\ngekat = ankat2*tang\nsinu = math.sin(neig2*math.pi/180)\nhypo = gekat/sinu\nprint(\"- -- -- -- -- -- -- -\")\nprint(\"die Gegenkathete ist: \", gekat, \"lang\")\nprint(\"- -- -- -- -- -- -- -\")\nprint(\"die Hypotenuse ist : \", hypo, \"lang\")\n\n#################################\n# ankat = Ankathete #\n# neig = Winkelmass #\n# gekat = Gegenkathete #\n# hypo = Hypotenuse #\n# sinu = sinus des Winkels #\n# tang = tangens des Winkels #\n# cotan = cotangens des Winkels #\n# cosi = cosinus des Winkels #\n#################################\n","sub_path":"dreiecksberechnung/dreieck_berechnung_02.py","file_name":"dreieck_berechnung_02.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338043723","text":"# -*- coding: utf-8 -*-\n\"\"\"\nHidden Markov Model\n\"\"\"\nimport string\nimport random\n\nfrom Bio import Alphabet\nfrom Bio.Seq import MutableSeq\nfrom Bio.Seq import Seq\n\nfrom Bio.HMM import MarkovModel\nfrom Bio.HMM import Trainer\nfrom Bio.HMM import Utilities\n\n#mendefine object emisi dan state\nclass emisi(Alphabet.Alphabet):\n letters = ['A', 'C', 'T', 'G']\n\n\nclass state(Alphabet.Alphabet):\n letters = ['1', '2', '3']\n \n\n#fungsi untuk mempermudah pengacakan\ndef guess_letter():\n return random.choice('ACTG')\ndef guess_state():\n return random.choice('123')\n\n#fungsi untuk membangkitkan state dan emisi sebanyak jumlah\ndef pembangkitStEm(jumlah):\n seq_em = MutableSeq('', emisi())\n seq_state = MutableSeq('', state())\n curr_state= guess_state()\n \n for i in range(jumlah):\n seq_state.append(curr_state)\n \n rand = guess_state()\n seq_em.append(guess_letter())\n \n curr_state = (guess_state())\n \n return seq_em.toseq(),seq_state.toseq()\n\n#fungsi untuk mengakhiri iterasi\ndef stop_criteria(log_likelihood_change, num_iterations):\n if log_likelihood_change < 0.01:\n return 1\n elif num_iterations >= 10:\n return 1\n else:\n return 0\n\n#membangun markov model \nmm = MarkovModel.MarkovModelBuilder(state(), emisi()) \n\n#mengizinkan semua transisi dari state yang ada\nmm.allow_all_transitions()\n\n#probabilitas perpindahan dirandom\nmm.set_random_probabilities()\n\n#algoritma Baum-Wetch dengan library\nbw_mm = mm.get_markov_model()\n\n#dibuat data training\nems, sts = pembangkitStEm(5000)\n\n#training data\nseq_training = Trainer.TrainingSequence(ems, Seq(\"\", state()))\ntrainer = Trainer.BaumWelchTrainer(bw_mm)\nmm_train = trainer.train([seq_training],stop_criteria)\n\n#percobaan dilakukan dengan membuat sequence random sepanjang 30\nem_coba, st_coba = pembangkitStEm(30)\n\n#algoritma Viterbi dengan library\nstate_akhir, prob = mm_train.viterbi(em_coba, state())\n\nprint('state dari ', em_coba, ' adalah :', state_akhir)","sub_path":"HMM.py","file_name":"HMM.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617369682","text":"\"\"\"Actions for a Epic Link in the DB.\"\"\"\nfrom .models import EpicLinks as EpicLinksModel\nfrom devcenter.server_utils import row2dict\n\n\nclass EpicLinks():\n\t\"\"\"Actions for a Epic Link in the DB.\"\"\"\n\n\tdef get_epic_links(self):\n\t\t\"\"\"Get all epic links.\"\"\"\n\t\tsession = self.login()\n\t\titems = session.query(EpicLinksModel)\n\n\t\tepic_links = {}\n\t\tfor item in items:\n\t\t\trow = row2dict(item)\n\t\t\tepic_links[ row['epic_link'] ] = row['epic_word']\n\n\t\tself.logout(session)\n\t\treturn epic_links","sub_path":"devcenter/sql/epic_links.py","file_name":"epic_links.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371641258","text":"from django.db import models\n\n\nclass BxSlide(models.Model):\n status_choice = (\n (0, '下线'),\n (1, '上线'),\n )\n status = models.IntegerField(choices=status_choice, default=1)\n img = models.ImageField(upload_to='static/img/lbt/')\n name = models.CharField(max_length=20, unique=True)\n href = models.CharField(max_length=256)\n create_date = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = \"BxSlide\"\n verbose_name_plural = '首页轮播'\n\n def __str__(self):\n return self.name\n\n\nclass Course(models.Model):\n status_choice = (\n (0, '下线'),\n (1, '上线'),\n )\n status = models.IntegerField(verbose_name='状态', choices=status_choice, default=1)\n weight = models.IntegerField(verbose_name='权重', default=0)\n icon = models.ImageField(verbose_name='图标', upload_to='static/img/icon/', null=True, blank=True)\n name = models.CharField(verbose_name='名称', max_length=32, unique=True)\n summary = models.TextField(verbose_name='简介', default=\"summary\")\n\n class Meta:\n db_table = 'Course'\n verbose_name_plural = '课程'\n\n def __str__(self):\n return self.name\n\n\nclass StudentInfo(models.Model):\n status_choice = (\n (0, '下线'),\n (1, '上线'),\n )\n status = models.IntegerField(verbose_name='状态', choices=status_choice, default=1)\n weight = models.IntegerField(verbose_name='权重', default=0)\n name = models.CharField(max_length=10, verbose_name='姓名', unique=True)\n salary = models.IntegerField(verbose_name='薪资')\n pic = models.ImageField(verbose_name='头像', upload_to='static/img/student_img/')\n company = models.CharField(max_length=50, verbose_name='就业单位')\n\n class Meta:\n db_table = 'StudentInfo'\n verbose_name_plural = '学生信息'\n\n def __str__(self):\n return self.name\n\n\nclass StudentMoreInfo(models.Model):\n name = models.ForeignKey(StudentInfo, on_delete=models.CASCADE)\n age = models.IntegerField(verbose_name='年龄')\n detail = models.TextField(verbose_name='个人简介')\n\n class Meta:\n db_table = 'StudentMoreInfo'\n verbose_name_plural = '学生详情'\n\n def __str__(self):\n return self.name.name\n\n\nclass News(models.Model):\n status_choice = (\n (0, '下线'),\n (1, '上线'),\n )\n status = models.IntegerField(verbose_name='状态', choices=status_choice, default=1)\n weight = models.IntegerField(verbose_name='权重', default=0)\n title = models.CharField(verbose_name='标题', max_length=32)\n detail = models.TextField(verbose_name='简介', null=True)\n text = models.TextField(verbose_name='公告信息')\n create_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table = 'News'\n verbose_name_plural = '最新公告'\n\n def __str__(self):\n return self.title\n\n\nclass SchoolActivity(models.Model):\n status_choice = (\n (0, '下线'),\n (1, '上线'),\n )\n status = models.IntegerField(verbose_name='状态', choices=status_choice, default=1)\n weight = models.IntegerField(verbose_name='权重', default=0)\n name = models.CharField(verbose_name='活动名称', max_length=32)\n leader = models.CharField(verbose_name='负责人', max_length=20)\n title = models.TextField(verbose_name='简介', max_length='30')\n detail = models.TextField(verbose_name='活动详情')\n photo = models.ImageField(verbose_name='活动照片', upload_to='static/img/school_img/')\n\n class Meta:\n db_table = 'SchoolActivity'\n verbose_name_plural = '校园活动'\n\n def __str__(self):\n return self.name\n\n\nclass Job(models.Model):\n name = models.CharField(max_length=20, verbose_name='职位')\n salary = models.IntegerField(verbose_name='薪资')\n skill = models.TextField(verbose_name='能力要求')\n\n class Meta:\n db_table = 'JobFound'\n verbose_name_plural = '招聘信息'\n\n def __str__(self):\n return self.name\n\n\nclass Direction(models.Model):\n weight = models.IntegerField(verbose_name='权重', default=0)\n name = models.CharField(verbose_name='名称', max_length=32)\n classification = models.ManyToManyField('Classification')\n\n class Meta:\n db_table = 'Direction'\n verbose_name_plural = '视频方向'\n\n def __str__(self):\n return self.name\n\n\nclass Classification(models.Model):\n weight = models.IntegerField(verbose_name='权重', default=0)\n name = models.CharField(verbose_name='名称', max_length=32)\n\n class Meta:\n db_table = 'Classification'\n verbose_name_plural = '视频分类'\n\n def __str__(self):\n return self.name\n\n\nclass Level(models.Model):\n title = models.CharField(verbose_name='级别', max_length=32)\n\n class Meta:\n verbose_name_plural = '级别'\n db_table = 'Level'\n\n def __str__(self):\n return self.title\n\n\nclass Video(models.Model):\n status_choice = (\n (1, '下线'),\n (2, '上线'),\n )\n status = models.IntegerField(verbose_name='状态', choices=status_choice, default=1)\n level = models.ForeignKey('Level')\n weight = models.IntegerField(verbose_name='权重', default=0)\n classification = models.ForeignKey('Classification', null=True, blank=True)\n title = models.CharField(verbose_name='标题', max_length=32)\n summary = models.CharField(verbose_name='简介', max_length=32)\n img = models.ImageField(verbose_name='图片', upload_to='static/img/video/')\n href = models.CharField(verbose_name='视频地址', max_length=256)\n create_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n db_table: 'Video'\n verbose_name_plural = '视频'\n\n def __str__(self):\n return self.title\n","sub_path":"app01/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"202060719","text":"# -*- coding: utf-8 -*-\n\"\"\"TcEx Playbook Common module\"\"\"\nimport json\nimport os\nfrom .test_case import TestCase\n\n\nclass TestCasePlaybookCommon(TestCase):\n \"\"\"Playbook TestCase Class\"\"\"\n\n _output_variables = None\n redis_client = None\n redis_staging_data = {\n '#App:1234:empty!String': '',\n '#App:1234:null!String': None,\n '#App:1234:non-ascii!String': 'ドメイン.テスト',\n }\n\n @property\n def default_args(self):\n \"\"\"Return App default args.\"\"\"\n args = super(TestCasePlaybookCommon, self).default_args\n args.update(\n {\n 'tc_playbook_db_context': self.context,\n 'tc_playbook_db_path': os.getenv('TC_PLAYBOOK_DB_PATH', 'localhost'),\n 'tc_playbook_db_port': os.getenv('TC_PLAYBOOK_DB_PORT', '6379'),\n 'tc_playbook_db_type': os.getenv('TC_PLAYBOOK_DB_TYPE', 'Redis'),\n 'tc_playbook_out_variables': '',\n }\n )\n return args\n\n @property\n def output_variables(self):\n \"\"\"Return playbook output variables\"\"\"\n if self._output_variables is None:\n self._output_variables = []\n # Currently there is no support for projects with multiple install.json files.\n for p in self.install_json.get('playbook', {}).get('outputVariables') or []:\n # \"#App:9876:app.data.count!String\"\n self._output_variables.append(\n '#App:{}:{}!{}'.format(9876, p.get('name'), p.get('type'))\n )\n return self._output_variables\n\n def populate_output_variables(self, profile_name):\n \"\"\"Generate validation rules from App outputs.\"\"\"\n profile_filename = os.path.join(self.profiles_dir, '{}.json'.format(profile_name))\n with open(profile_filename, 'r+') as fh:\n profile_data = json.load(fh)\n\n redis_data = self.redis_client.hgetall(self.context)\n outputs = {}\n for variable in self.output_variables:\n data = redis_data.get(variable.encode('utf-8'))\n\n # validate redis variables\n if data is None:\n # log warning missing output data\n self.log.error(\n '[{}] Missing redis output for variable {}'.format(profile_name, variable)\n )\n else:\n data = json.loads(data.decode('utf-8'))\n\n # validate validation variables\n validation_output = (profile_data.get('outputs') or {}).get(variable)\n if validation_output is None and profile_data.get('outputs') is not None:\n self.log.error(\n '[{}] Missing validations rule: {}'.format(profile_name, variable)\n )\n outputs[variable] = {'expected_output': data, 'op': 'eq'}\n\n if profile_data.get('outputs') is None:\n # update the profile\n profile_data['outputs'] = outputs\n\n fh.seek(0)\n fh.write(json.dumps(profile_data, indent=2, sort_keys=True))\n fh.truncate()\n\n def run(self, args):\n \"\"\"Implement in Child Class\"\"\"\n raise NotImplementedError('Child class must implement this method.')\n\n def stage_data(self, staged_data):\n \"\"\"Stage the data in the profile.\"\"\"\n for key, value in list(staged_data.get('redis', {}).items()):\n self.stager.redis.stage(key, value)\n","sub_path":"tcex/testing/test_case_playbook_common.py","file_name":"test_case_playbook_common.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73019464","text":"import datetime, random\nfrom datetime import date, timedelta\nfrom dateutil.relativedelta import *\nimport requests, json\n\nclass Nivel2:\n #def setear_fechas(self):\n def leer_gdd(self):\n try:\n response = requests.get( 'http://127.0.0.1:8080/periodos/api', headers={'Accept':'application/json'})\n except:\n data = \"Favor de Iniciar el Servicio\"\n return print(data)\n if response.status_code == 200:\n data = response.json()\n fecha_inicio = datetime.datetime.strptime(data['fechaCreacion'], '%Y-%m-%d')\n fecha_termino = datetime.datetime.strptime(data['fechaFin'], '%Y-%m-%d')\n fecha_inicio_real = fecha_inicio\n fecha_termino_real = fecha_termino\n meses = relativedelta(months=+1)\n listado_fechas_recibidas = []\n listado_fechas_faltantes = []\n for fecha in data['fechas']:\n fecha_inicio_string = str(fecha)\n listado_fechas_recibidas.append(fecha_inicio_string)\n while fecha_inicio.date() <= fecha_termino.date():\n fecha_inicio_string = str(fecha_inicio.date())\n if fecha_inicio_string not in listado_fechas_recibidas:\n listado_fechas_faltantes.append(fecha_inicio_string)\n fecha_inicio += meses\n print((\"fecha creación: {}\").format(str(fecha_inicio_real.date())))\n print((\"fecha fin: {}\").format(str(fecha_termino_real.date())))\n print(\"fechas recibidas:\")\n for fecha_recibida in listado_fechas_recibidas:\n print(fecha_recibida+\",\", end=\" \")\n print(\"\\n\")\n print(\"fechas faltantes:\")\n for fecha_faltante in listado_fechas_faltantes:\n print(fecha_faltante+\",\", end=\" \")\n print(\"\\n\")\n\n\nnivel2 = Nivel2()\nnivel2.leer_gdd()\n","sub_path":"solucion/nivel2.py","file_name":"nivel2.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185089269","text":"import sys\n\ndist_to_points = dict()\ncurr = (0, 0)\npoints = [(0,0)]\nmin_steps = 9223372036854775807\ndelta = {\"L\": (-1, 0), \"R\": (1, 0), \"U\": (0, 1), \"D\": (0, -1)}\nsteps2 = 0\n\ndef UpdatePoints(move):\n global curr\n global points\n move_vector = delta[move[0]]\n dist = int(move[1:])\n points.extend([(curr[0]+(move_vector[0]*i), curr[1]+(move_vector[1]*i)) for i in range(1, dist+1)])\n curr = points[-1]\n\n# Identify intersections and find smallest number of steps needed to reach an intersection\ndef IdentifyIntersections(move):\n global curr\n global dist_to_points\n global steps2\n global min_steps\n\n move_vector = delta[move[0]]\n dist = int(move[1:])\n for i in range(1, dist+1):\n xy = (curr[0]+(move_vector[0]*i), curr[1]+(move_vector[1]*i))\n if xy in dist_to_points:\n min_steps = min(min_steps, dist_to_points[xy] + steps2 + i)\n curr = xy\n steps2 = steps2 + dist\n\nf = open(sys.argv[1], \"r\")\n\n#wire 1\nmap(UpdatePoints, f.readline().split(\",\"))\n\nfor idx, point in enumerate(points):\n if point not in dist_to_points:\n dist_to_points[point] = idx\n\ndel dist_to_points[(0,0)]\ncurr = (0, 0)\n\n#wire 2\nmap(IdentifyIntersections, f.readline().split(\",\"))\n\nprint(min_steps)\n","sub_path":"2019/3b.py","file_name":"3b.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"282860514","text":"import gymnasium as gym\nimport numpy as np\n\nfrom minigrid.benchmark import benchmark\nfrom minigrid.manual_control import key_handler, reset\nfrom minigrid.utils.window import Window\n\n\ndef test_benchmark():\n \"Test that the benchmark function works for a specific environment\"\n env_id = \"MiniGrid-Empty-16x16-v0\"\n benchmark(env_id, num_resets=10, num_frames=100)\n\n\ndef test_window():\n \"Testing the class functions of window.Window. This should locally open a window !\"\n title = \"testing window\"\n window = Window(title)\n\n img = np.random.rand(100, 100, 3)\n window.show_img(img)\n\n caption = \"testing caption\"\n window.set_caption(caption)\n\n window.show(block=False)\n\n window.close()\n\n\ndef test_manual_control():\n class FakeRandomKeyboardEvent:\n active_actions = [\"left\", \"right\", \"up\", \" \", \"pageup\", \"pagedown\"]\n reset_action = \"backspace\"\n close_action = \"escape\"\n\n def __init__(self, active_actions=True, reset_action=False) -> None:\n if active_actions:\n self.key = np.random.choice(self.active_actions)\n elif reset_action:\n self.key = self.reset_action\n else:\n self.key = self.close_action\n\n env_id = \"MiniGrid-Empty-16x16-v0\"\n env = gym.make(env_id)\n window = Window(env_id)\n\n reset(env, window)\n\n for i in range(3): # 3 resets\n for j in range(20): # Do 20 steps\n key_handler(env, window, FakeRandomKeyboardEvent())\n\n key_handler(\n env,\n window,\n FakeRandomKeyboardEvent(active_actions=False, reset_action=True),\n )\n\n # Close the environment\n key_handler(\n env, window, FakeRandomKeyboardEvent(active_actions=False, reset_action=False)\n )\n","sub_path":"tests/test_scripts.py","file_name":"test_scripts.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152404936","text":"\"\"\"\n 989. Add to Array-Form of Integer\n For a non-negative integer X, the array-form of X is an array \n of its digits in left to right order. For example, if X = 1231, then the array form is [1,2,3,1].\n Given the array-form A of a non-negative integer X, return the array-form of the integer X+K.\n\n Example 1:\n Input: A = [1,2,0,0], K = 34\n Output: [1,2,3,4]\n Explanation: 1200 + 34 = 1234\n Example 2:\n Input: A = [2,7,4], K = 181\n Output: [4,5,5]\n Explanation: 274 + 181 = 455\n Example 3:\n Input: A = [2,1,5], K = 806\n Output: [1,0,2,1]\n Explanation: 215 + 806 = 1021\n Example 4:\n Input: A = [9,9,9,9,9,9,9,9,9,9], K = 1\n Output: [1,0,0,0,0,0,0,0,0,0,0]\n Explanation: 9999999999 + 1 = 10000000000\n Note:\n\n 1 <= A.length <= 10000\n 0 <= A[i] <= 9\n 0 <= K <= 10000\n If A.length > 1, then A[0] != 0\n\"\"\"\nclass Solution:\n def addToArrayForm(self, A: List[int], K: int) -> List[int]:\n carry=0\n i=len(A)-1\n while (carry or K) and i>=0:\n to_be=K%10+carry+A[i]\n A[i]=to_be%10\n carry=to_be//10\n i-=1\n K//=10\n if K==0 and carry==0:\n # if there is no carry or nothing to add, we can return A\n return A\n #print('K={} , carry={} , A={}'.format(K, carry, A))\n K+=carry # if carry if left or K is left, it needs to be added to A\n new=[]\n while K:\n new.append(K%10)\n K//=10\n new.reverse()\n new.extend(A)\n return new\n","sub_path":"LeetCode_exercises/ex0989_add_to_array_form_of_integer.py","file_name":"ex0989_add_to_array_form_of_integer.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191468232","text":"import perceptron_learning_algorithm as pla\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom random import random\n\nfig = plt.figure(num = \"Perceptron Learning Algrotihm\")\n\nax = plt.axes(xlim=[0, 1000], ylim=[0, 1000])\n\nax.plot([0, 1000], [pla.c, pla.m*1000 + pla.c], color = \"green\", lw=3, zorder = 1)\n\nfor point in pla.dataset:\n #k = black\n facecolor = \"k\" if point[3] == 1 else \"none\" \n ax.scatter(point[1], point[2], facecolor = facecolor, edgecolor='k', zorder = 2)\n\n\n[line] = ax.plot([],[],lw=3, color = \"red\", zorder=1)\n\ndef animate(i):\n if(i>=len(pla.points)):\n plt.pause(1)\n plt.close('all')\n else:\n line.set_data(pla.points[i][0], pla.points[i][1])\n\nanim = FuncAnimation(\n fig, \n animate, \n frames = pla.epochs+1, \n interval = 250)\n\nplt.show()","sub_path":"Perceptron/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405786135","text":"#!/usr/bin/python3\n\nfrom Bio import SeqIO\nimport re\nimport sys\nimport argparse\nfrom collections import namedtuple\n\n\nparser = argparse.ArgumentParser('extracts sequences from FASTA based on GFF '\n 'annotations')\nparser.add_argument('-f', '--fasta', help='FASTA genome file', required=True)\nparser.add_argument('-g', '--gff', help='GFF annotation file',\n type=argparse.FileType('r'), default=sys.stdin)\nparser.add_argument('-p', '--padding', help='extracted sequences are padded'\n 'by N nucleotides', type=int, default=0, metavar='N')\nparser.add_argument('-l', '--max-length', help='limit sequenes to N '\n 'nucleotides', type=int, default=0, metavar='N')\nparser.add_argument('-t', '--feature', help='filter by feature in the 3rd '\n 'column', nargs='*', default=[])\nparser.add_argument('-i', '--ident', help='identity field in last column',\n default='ID')\nparser.add_argument('-r', '--region', nargs=2, type=int, metavar='START,END',\n help='extract sequence from START to END or region '\n '(higher number first for anti-sense strand)')\n\n\nSeq = namedtuple('Seq', 'chro, start, end, strand, ident, seq')\n\n\nID_PATTERN = re.compile('^(\\S+)\\s+\\S+\\s+(\\S+)\\s+' # chro, _, type\n '(\\d+)\\s+(\\d+)\\s+\\S+\\s+([\\+-])' # from, to, _, strand\n '\\s+\\S+\\s+ID=([^;]+);?(.+)$') # _, ID, rest\nGFF_PATTERN = re.compile('^(\\S+)\\t+[^\\t]+\\s+(\\S+)\\s+(\\d+)\\s+'\n '(\\d+)\\s+\\S+\\s+([\\+-]).*$')\nGENOME_PATTERN = re.compile('gi\\|\\d+\\|ref\\|(NC_\\d+\\.\\d+)\\|\\s*.*')\n\n\ndef parse_gff_line(line):\n if line.startswith('#'):\n return None\n m = GFF_PATTERN.match(line)\n if m:\n chro = m.group(1)\n feature = m.group(2)\n start = int(m.group(3))\n end = int(m.group(4))\n strand = m.group(5)\n ident = '%s|%d-%d' % (chro, start, end)\n m = re.search(ID_PATTERN, line)\n if m:\n ident = m.group(6)\n if end < start:\n start, end = (end, start)\n return (chro, feature, start, end, strand, ident)\n\n\ndef parse_gff(fasta_name, gff=sys.stdin, padding=0, maxlen=0, typefilter=[]):\n fasta_seqs = SeqIO.to_dict(SeqIO.parse(fasta_name, 'fasta'))\n\n # sanitizing chromosome names\n tmp = dict()\n changed = 0\n for f in fasta_seqs:\n m = GENOME_PATTERN.match(f)\n if m:\n tmp[m.group(1)] = fasta_seqs[f]\n sys.stderr.write('found GI for refseq %s\\n' % f)\n changed = 1\n if changed == 1:\n fasta_seqs = tmp\n\n if gff == sys.stdin:\n sys.stderr.write(\"GFF input:\")\n for line in gff:\n sys.stderr.write('\\r \\r')\n gff_line = parse_gff_line(line)\n if gff_line is None:\n continue\n (chro, feature, start, end, strand, ident) = gff_line\n start -= padding\n end += padding\n if start < 1:\n start = 1\n if end > len(str(fasta_seqs[chro].seq)):\n end = len(str(fasta_seqs[chro].seq))\n if not typefilter or feature in typefilter:\n seq = fasta_seqs[chro].seq[start-1:end]\n if strand is '-':\n seq = seq.reverse_complement()\n if maxlen == 0 or abs(start-end) < maxlen:\n yield Seq(chro, start, end, strand, ident, seq)\n\n\ndef print_fasta(seq):\n print(\">%s|%d-%d|%s|%s\\n%s\\n\" % (seq.chro, seq.start, seq.end, seq.strand,\n seq.ident, seq.seq))\n\n\ndef main():\n args = parser.parse_args()\n global ID_PATTERN\n ID_PATTERN = re.compile('^(\\S+)\\s+\\S+\\s+(\\S+)\\s+' # chro, _, type\n '(\\d+)\\s+(\\d+)\\s+\\S+\\s+([\\+-])' # from, to, _, strand\n '\\s+\\S+\\s+.*%s=([^;]+);?(.+)$' % # _, ID, rest\n args.ident)\n\n # only a region is specified\n if args.region:\n r = SeqIO.read(args.fasta, 'fasta')\n (start, end, strand) = (args.region[0], args.region[1], '+')\n if start > end:\n seq = str(r.seq[end-1:start].reverse_complement())\n strand = '-'\n else:\n seq = str(r.seq[start-1:end])\n print_fasta(Seq(r.id, start, end, strand, 'region', seq))\n exit()\n\n for g in parse_gff(args.fasta, args.gff, args.padding, args.max_length,\n args.feature):\n print_fasta(g)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gff2fasta.py","file_name":"gff2fasta.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379517199","text":"import os\nimport random\nimport time\nimport unittest\n\nfrom elasticdl.python.common.k8s_tensorboard_client import TensorBoardClient\n\n\n@unittest.skipIf(\n os.environ.get(\"K8S_TESTS\", \"True\") == \"False\",\n \"No Kubernetes cluster available\",\n)\nclass K8sTensorBoardClientTest(unittest.TestCase):\n def test_create_tensorboard_service(self):\n tb_client = TensorBoardClient(\n image_name=None,\n namespace=\"default\",\n job_name=\"test-job-%d-%d\"\n % (int(time.time()), random.randint(1, 101)),\n event_callback=None,\n )\n tb_client._k8s_client.create_tensorboard_service(\n port=80, service_type=\"LoadBalancer\"\n )\n time.sleep(1)\n service = tb_client._get_tensorboard_service()\n self.assertTrue(\"load_balancer\" in service[\"status\"])\n self.assertEqual(service[\"spec\"][\"ports\"][0][\"port\"], 80)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"elasticdl/python/tests/k8s_tensorboard_client_test.py","file_name":"k8s_tensorboard_client_test.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367424158","text":"import re, pprint, os, numpy\r\n\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.stem import SnowballStemmer\r\nfrom nltk.stem.lancaster import LancasterStemmer\r\nfrom nltk.stem.porter import PorterStemmer\r\n\r\nfrom sklearn.metrics.cluster import *\r\nfrom sklearn.cluster import AgglomerativeClustering\r\nfrom nltk.cluster import GAAClusterer\r\nfrom sklearn.metrics.cluster import adjusted_rand_score\r\n\r\n\r\ndef read_file(file):\r\n myfile = open(file, \"r\")\r\n data = \"\"\r\n lines = myfile.readlines()\r\n for line in lines:\r\n data = data + line\r\n myfile.close\r\n return data\r\n\r\n\r\ndef remove_stop_words(terms):\r\n filtered_terms = [term for term in terms if term not in stopwords.words('english')]\r\n return filtered_terms\r\n\r\n\r\ndef lemmatize(terms):\r\n lem_terms = list()\r\n wn_lem = WordNetLemmatizer()\r\n for term in terms:\r\n lem_terms.append(wn_lem.lemmatize(term, pos='v'))\r\n return lem_terms\r\n\r\n\r\ndef snow_stemming(terms):\r\n st_terms = list()\r\n stemmer = SnowballStemmer(\"english\")\r\n for term in terms:\r\n st_terms.append(stemmer.stem(term))\r\n return st_terms\r\n\r\n\r\ndef lanc_stemming(terms):\r\n st_terms = list()\r\n stemmer = LancasterStemmer()\r\n for term in terms:\r\n st_terms.append(stemmer.stem(term))\r\n return st_terms\r\n\r\n\r\ndef porter_stemming(terms):\r\n st_terms = list()\r\n stemmer = PorterStemmer()\r\n for term in terms:\r\n st_terms.append(stemmer.stem(term))\r\n return st_terms\r\n\r\ndef pos_tagger(terms):\r\n tag_terms = nltk.pos_tag(terms)\r\n tag_terms = [term for term in tag_terms if term[1] in [\"CD\",\"JJ\",\"JJR\",\"JJS\",\"MD\",\"NN\",\"NNP\",\"NNPS\",\"NNS\",\r\n \"PDT\",\"PRP\",\"PRP$\",\"RB\",\"RBR\",\"RBS\",\"VB\",\"VBD\",\"VBG\",\"VBN\",\r\n \"VBP\",\"VBZ\"]]\r\n return tag_terms\r\n\r\n\r\ndef cluster_texts(texts, clustersNumber, distance):\r\n # Load the list of texts into a TextCollection object.\r\n collection = nltk.TextCollection(texts)\r\n print(\"Created a collection of\", len(collection), \"terms.\")\r\n\r\n # get a list of unique terms\r\n unique_terms = list(set(collection))\r\n print(\"Unique terms found: \", len(unique_terms))\r\n\r\n # Removing \"Thomas Baker\" because it's a constant in every document\r\n print(\"Removing 'Thomas Baker'\")\r\n unique_terms = [term for term in unique_terms if term not in [\"thomas\",\"baker\"]]\r\n\r\n # Remove stopwords\r\n print(\"Removing stopwords\")\r\n filtered_terms = remove_stop_words(unique_terms)\r\n print(\"Removed \" + str(len(unique_terms) - len(filtered_terms)) + \" terms\")\r\n unique_terms = filtered_terms\r\n\r\n # Remove other \"useless\" words\r\n print(\"POS-tagging\")\r\n pos_terms = pos_tagger(unique_terms)\r\n pos_terms = [term[0] for term in pos_terms]\r\n print(\"Removed \" + str(len(unique_terms) - len(pos_terms)) + \" terms\")\r\n unique_terms = pos_terms\r\n\r\n # Lemmatization\r\n print(\"Lemmatization\")\r\n lem_terms = lemmatize(unique_terms)\r\n collection = lemmatize(collection)\r\n # Removing posible duplicated lemmatized words\r\n print(\"Removed \" + str(len(unique_terms) - len(list(set(lem_terms)))) + \" terms\")\r\n unique_terms = list(set(lem_terms))\r\n\r\n # # # Stemming\r\n # print(\"Stemming\")\r\n # # st_terms = snow_stemming(unique_terms)\r\n # # st_terms = lanc_stemming(unique_terms)\r\n # st_terms = porter_stemming(unique_terms)\r\n # # collection = snow_stemming(collection)\r\n # # collection = lanc_stemming(collection)\r\n # collection = porter_stemming(collection)\r\n # print(\"Removed \" + str(len(unique_terms) - len(list(set(st_terms)))) + \" terms\")\r\n # # Removing posible duplicated stemmed words\r\n # unique_terms = list(set(st_terms))\r\n\r\n print(\"Final unique terms: \" + str(len(unique_terms)))\r\n\r\n collection = nltk.TextCollection(collection)\r\n\r\n ### And here we actually call the function and create our array of vectors.\r\n vectors = [numpy.array(TFIDF(lemmatize(f), unique_terms, collection)) for f in texts]\r\n print(\"Vectors created.\")\r\n\r\n # # initialize the clusterer\r\n # clusterer = GAAClusterer(clustersNumber)\r\n # clusters = clusterer.cluster(vectors, True)\r\n\r\n clusterer = AgglomerativeClustering(n_clusters=clustersNumber,\r\n linkage=\"average\", affinity=distanceFunction)\r\n clusters = clusterer.fit_predict(vectors)\r\n\r\n return clusters\r\n\r\n\r\n# Function to create a TFIDF vector for one document. For each of\r\n# our unique words, we have a feature which is the tf-idf for that word\r\n# in the current document\r\ndef TFIDF(document, unique_terms, collection):\r\n word_tf = []\r\n for word in unique_terms:\r\n word_tf.append(collection.tf_idf(word, document))\r\n return word_tf\r\n\r\ndef TF(document, unique_terms, collection):\r\n word_tf = []\r\n for word in unique_terms:\r\n word_tf.append(collection.tf(word, document))\r\n return word_tf\r\n\r\nif __name__ == \"__main__\":\r\n folder = \"Thomas_Baker\"\r\n # Empty list to hold text documents.\r\n texts = []\r\n\r\n listing = sorted(os.listdir(folder))\r\n for file in listing:\r\n if file.endswith(\".txt\"):\r\n # print(\"Reading \"+file)\r\n url = folder + \"/\" + file\r\n f = open(url, encoding=\"latin-1\");\r\n # Forcing lower case\r\n raw = f.read().lower()\r\n f.close()\r\n tokens = nltk.word_tokenize(raw)\r\n text = nltk.Text(tokens)\r\n texts.append(text)\r\n\r\n print(\"Prepared \", len(texts), \" documents...\")\r\n print(\"They can be accessed using texts[0] - texts[\" + str(len(texts) - 1) + \"]\")\r\n\r\n distanceFunction = \"cosine\"\r\n # distanceFunction = \"euclidean\"\r\n test = cluster_texts(texts, 4, distanceFunction)\r\n print(\"test:\\t\\t\", list(test))\r\n # Gold Standard\r\n reference = [0, 1, 2, 0, 0, 0, 3, 0, 0, 0, 2, 0, 3, 3, 0, 1, 2, 0, 1]\r\n print(\"reference:\\t\", reference)\r\n\r\n # Evaluation\r\n print(\"rand_score: \", adjusted_rand_score(reference, test))\r\n","sub_path":"jcanom/RecInfo/practica_desambiguacion/PNDBasicClustering.py","file_name":"PNDBasicClustering.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"603744826","text":"# 分别导入数据和模型\nimport pandas as pd\nword_dict = pd.read_csv('word_dict.csv', encoding=\"utf8\")\nword_dict = word_dict.drop(['0'], axis=1)\nword_dict.columns = ['0', 'id']\n\nimport pickle\ndef load_obj(name):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)\n\nint_catagory = load_obj('int_catagory')\ncatagory_dict = load_obj('catagory_dict')\n\n# 导入模型\nfrom keras.models import load_model\nmodel = load_model('model.hdf5')\n\n\nfrom keras.preprocessing import sequence\nimport re\nmaxlen = 25\n\n# 预测函数\ndef predict_(title):\n words = re.findall('[\\x80-\\xff]{3}|[\\w\\W]', title)\n # w2v = [word_dict[word_dict['0'] == x]['id'].values[0] for x in words]\n w2v = []\n # w2v = [word_dict[word_dict['0'] == x]['id'].value[0] for x in words if word_dict[word_dict['0'] == x]['id'].value]\n for x in words:\n if word_dict[word_dict['0'] == x]['id'].values:\n w2v.append(word_dict[word_dict['0'] == x]['id'].values[0])\n else:\n pass\n xn = sequence.pad_sequences([w2v], maxlen=maxlen)\n predicted = model.predict_classes(xn, verbose=0)[0]\n return int_catagory[predicted]\n\n# 有三种可能性分类\ndef predict_3(title):\n words = re.findall('[\\x80-\\xff]{3}|[\\w\\W]', title)\n # w2v = [word_dict[word_dict['0'] == x]['id'].value[0] for x in words if word_dict[word_dict['0'] == x]['id'].value]\n w2v = []\n for x in words:\n if word_dict[word_dict['0'] == x]['id'].values:\n w2v.append(word_dict[word_dict['0'] == x]['id'].values[0])\n else:\n pass\n xn = sequence.pad_sequences([w2v], maxlen=maxlen)\n predicted = model.predict(xn, verbose=0)[0]\n predicted_sort = predicted.argsort()\n li = [(int_catagory[p], predicted[p] * 100) for p in predicted_sort[-3:]]\n return li[::-1]\n\nmsg = [\"第三届粤港澳温州人大会倡议温商回归实业\", \"香港警方两破大麻种植场 检获大麻市值超3000万港元\", \"外媒:巴西咖啡倍受欢迎 特种咖啡年销售量增长\"]\ndef l3(msg):\n print(predict_3(msg))\ndef l(msg):\n print(predict_(msg))\n\nfor i in msg:\n l3(i)\n l(i)\n\n","sub_path":"machine_learning/news_category/news_category_classify_load_model.py","file_name":"news_category_classify_load_model.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374043711","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 16 09:09:08 2018\n\n@author: xinger\n\"\"\"\n#------------------import---------------------\nimport os\nimport numpy as np\nimport pandas as pd\nimport bcolz\nfrom time import time\nfrom datetime import datetime\nimport sys\nsys.path.append(r'C:\\Users\\xinger\\Desktop\\git\\data_service\\data_Service')\nfrom origin import *\nfrom utils import *\n\n\n#-------------------vra--------------------\n'''\n变量定义\n'''\nh5p = r'E:/Factor/data.hd5'\nbzp = 'E:\\bcolz_data'\n\n#------------------data---------------------\n'''\n提取数据:\n应该包括那几种格式的数据\n'''\n\njs = JaqsData()\ndata = js.get_daily_data(prop)\n\nww = WindData()\ndata = ww.get_daily_data(prop)\n\n\ndt = pd.concat(data)\njs_data = dataformat(pd.concat(data,axis=1))\n\n#------------------test---------------------\n'''\n杂乱的测试代码区域\n'''\n\nroot = r'C:\\Users\\xinger\\Desktop\\bcolz_ttt'\ntb = bcolz.ctable.fromdataframe(data,rootdir=root)\ntb.flush()\n\n\nctable = bcolz.open(path)\ndata = ctable.todataframe()\n'''\ntype(data)\nOut[195]: bcolz.ctable.ctable\n'''\n\n\nctable.fromdataframe()\nctable.flush()\n\n\ndata.fetchwhere('date>20170101')\n\ntable2 = bcolz.open(r'C:\\Users\\xinger\\.rqalpha\\bundle\\st_stock_days.bcolz','r')\n\n\n\n\n\ndata.columns = data.columns.get_level_values('symbol') + '|' + data.columns.get_level_values('fields')\ntb = bcolz.ctable.fromdataframe(data,rootdir=file)\ntb.flush()\n\n\n\n#----------------------------------------------\n'''\n实用脚本区域\n'''\n\ndef load_bundle(start_date,end_date,fields):\n item = table.where('(trade_date>20160101)&(trade_date<20180101)',outcols = fields)\n return pd.DataFrame(list(item))\n\n\n\n\n\n\n\n","sub_path":"other/bcolz_test.py","file_name":"bcolz_test.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231463370","text":"import pytz\nfrom datetime import timedelta\nfrom ..notification_create import *\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\n\nDEBUG = False\n\ndef manage(request):\n\t\n\tif (DEBUG):\n\t\tprint(\"[DEBUG] scheduler start\")\n\n\ttz_hkt = pytz.timezone(\"Asia/Hong_Kong\")\n\t\n\t# filter booked timeslots among just-finished timeslots\n\tfinished_booked_timeslots = Timeslot.objects.filter(\n\t\tis_booked = True,\n\t\tis_finished = False,\n\t\tendTime__lte = datetime.now(tz=tz_hkt)\n\t)\n\tfor timeslot in finished_booked_timeslots:\n\t\t# calculate the fee to go to the tutor\n\t\tfee = timeslot.fee\n\n\t\t# session finish tutor get money\n\t\twallet = Wallet.objects.get(user=timeslot.tutor.user)\n\t\twallet.balance += fee\n\t\twallet.save()\n\n\t\t# tutor receipt, student review notification\n\t\tcreateReviewNotification(timeslot)\n\t\tcreateTransactionNotification(timeslot, fee, 'end')\n\t\tcreateTransactionRecord(timeslot, fee, 'end', None)\n\n\t\t# Mytutor receive comission fee\n\t\tmytutors = MyTutors.objects.all()[0]\n\t\tmytutors.balance += fee * 0.05\n\t\tmytutors.save()\n\n\t# update is_finished state of timeslots\n\tfinished_timeslots = Timeslot.objects.filter(\n\t\tis_finished = False,\n\t\tendTime__lte = datetime.now(tz=tz_hkt)\n\t)\n\tfinished_timeslots.update(is_finished=True)\n\t\n\t# update cancellable status\n\tcancellable_timeslots = Timeslot.objects.filter(\n\t\tis_booked = True,\n\t\tstartTime__gte = datetime.now(tz=tz_hkt) + timedelta(days=1)\n\t)\n\n\tnon_cancellable_timeslots = Timeslot.objects.exclude(\n\t\tis_booked = True,\n\t\tstartTime__gte = datetime.now(tz=tz_hkt) + timedelta(days=1)\n\t)\n\tnon_cancellable_timeslots.update(cancellable = False)\n\tcancellable_timeslots.update(cancellable = True)\n\n\t# update bookable status\n\tbookable_timeslots = Timeslot.objects.filter(\n\t\tis_booked = False,\n\t\tstartTime__gte = datetime.now(tz=tz_hkt) + timedelta(days=1),\n\t)\n\t\n\tnon_bookable_timeslots = Timeslot.objects.exclude(\n\t\tis_booked = False,\n\t\tstartTime__gte = datetime.now(tz=tz_hkt) + timedelta(days=1),\n\t)\n\tbookable_timeslots.update(bookable=True)\n\tnon_bookable_timeslots.update(bookable=False)\n\n\n\t# update within_week status\n\twithin_week_timeslots = Timeslot.objects.filter(\n\t\tbookable = True,\n\t\tstartTime__lte = datetime.now(tz=tz_hkt) + timedelta(weeks=1),\n\t\tstartTime__gte = datetime.now(tz=tz_hkt)\n\t)\n\tnot_within_week_timeslots = Timeslot.objects.exclude(\n\t\tbookable = True,\n\t\tstartTime__lte = datetime.now(tz=tz_hkt) + timedelta(weeks=1),\n\t\tstartTime__gte = datetime.now(tz=tz_hkt)\n\t)\n\twithin_week_timeslots.update(within_week=True)\n\tnot_within_week_timeslots.update(within_week=False)\n\t\n\tif (DEBUG):\n\t\tprint(\"[DEBUG] finished = \" + str(len(Timeslot.objects.filter(is_finished=True))))\n\t\tprint(\"[DEBUG] bookable = \" + str(len(bookable_timeslots)))\n\t\tprint(\"[DEBUG] not bookable = \" + str(len(non_bookable_timeslots)))\n\t\tprint(\"[DEBUG] cancellable = \" + str(len(cancellable_timeslots)))\n\t\tprint(\"[DEBUG] not cancellable = \" + str(len(non_cancellable_timeslots)))\n\t\tprint(\"[DEBUG] now = \" + str(datetime.now(tz=tz_hkt)))\n\t\tprint(\"[DEBUG] scheduler end processing\")\n\n\treturn HttpResponse(\"success\")","sub_path":"TP/tutoria/views/manage_view.py","file_name":"manage_view.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"89737172","text":"# Randomly fills a grid of size 10 x 10 with 0s and 1s and computes:\n# - the size of the largest homogenous region starting from the top left corner,\n# so the largest region consisting of connected cells all filled with 1s or\n# all filled with 0s, depending on the value stored in the top left corner;\n# - the size of the largest area with a checkers pattern.\n#\n# Written by *** and Eric Martin for COMP9021\n\nimport sys\nfrom random import seed, randint\n\ndim = 10\ngrid = [[None] * dim for _ in range(dim)]\n\ndef display_grid():\n for i in range(dim):\n print(' ', ' '.join(str(grid[i][j]) for j in range(dim)))\n\n# Possibly define other functions\n\ntry:\n arg_for_seed, density = input('Enter two nonnegative integers: ').split()\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\ntry:\n arg_for_seed, density = int(arg_for_seed), int(density)\n if arg_for_seed < 0 or density < 0:\n raise ValueError\nexcept ValueError:\n print('Incorrect input, giving up.')\n sys.exit()\nseed(arg_for_seed)\n# We fill the grid with randomly generated 0s and 1s,\n# with for every cell, a probability of 1/(density + 1) to generate a 0.\nfor i in range(dim):\n for j in range(dim):\n grid[i][j] = int(randint(0, density) != 0)\nprint('Here is the grid that has been generated:')\ndisplay_grid()\n\nsize_of_largest_homogenous_region_from_top_left_corner = 0\n# Replace this comment with your code\nimport copy\n\ngrid_cp = copy.deepcopy(grid)\n\n#area1\ndef replace_1_by_star(i,j):\n if grid[i][j] == 1:\n grid[i][j] = '*'\n if i:\n replace_1_by_star(i - 1, j)\n if i < dim - 1:\n replace_1_by_star(i + 1, j)\n if j:\n replace_1_by_star(i, j - 1)\n if j < dim - 1:\n replace_1_by_star(i, j + 1)\n \ndef replace_0_by_star(i,j):\n if grid[i][j] == 0:\n grid[i][j] = '*'\n if i:\n## print('11111')\n replace_0_by_star(i - 1, j)\n if i < dim - 1:\n## print('22222')\n replace_0_by_star(i + 1, j)\n if j:\n## print('33333')\n replace_0_by_star(i, j - 1)\n if j < dim - 1:\n## print('44444')\n replace_0_by_star(i, j + 1)\n\ndef count_star(dim,grid,count):\n for m in range(dim):\n for n in range(dim):\n if grid[m][n] == '*':\n count = count + 1\n return count\n\ncount1 = 0 \nif grid[0][0] == 1:\n replace_1_by_star(0,0)\n## print()\n## display_grid()\n## print('grid',grid)\n count1 = count_star(dim,grid,count1)\n## print('count1_1',count1)\nelse:\n replace_0_by_star(0,0)\n## print()\n## display_grid()\n## print('grid',grid)\n count1 = count_star(dim,grid,count1)\n## print('count_1_0',count1)\nsize_of_largest_homogenous_region_from_top_left_corner = count1\n\nprint()\nprint('The size_of the largest homogenous region from the top left corner is '\n f'{size_of_largest_homogenous_region_from_top_left_corner}.'\n )\n\nmax_size_of_region_with_checkers_structure = 0\n# Replace this comment with your code\ndef count_chess_area_1(i,j):\n if grid[i][j] == 1:\n grid[i][j] = '*'\n if i:\n count_chess_area_0(i - 1, j)\n if i < dim - 1:\n count_chess_area_0(i + 1, j)\n if j:\n count_chess_area_0(i, j - 1)\n if j < dim - 1:\n count_chess_area_0(i, j + 1)\n\ndef count_chess_area_0(i,j):\n if grid[i][j] == 0:\n grid[i][j] = '*'\n if i:\n count_chess_area_1(i - 1, j)\n if i < dim - 1:\n count_chess_area_1(i + 1, j)\n if j:\n count_chess_area_1(i, j - 1)\n if j < dim - 1:\n count_chess_area_1(i, j + 1)\n\ncount2 = 0\ncount_2 = 0\nfor i in range(dim):\n for j in range(dim):\n## print('\\n(i,j)',(i,j))\n grid = copy.deepcopy(grid_cp)\n## print()\n## display_grid()\n if grid[i][j] == 1:\n count_chess_area_1(i,j)\n## print()\n## display_grid()\n if grid[i][j] == 0:\n count_chess_area_0(i,j)\n## print()\n## display_grid()\n## print('count_star(dim,grid,count2)',count_star(dim,grid,count2))\n if count_star(dim,grid,count_2) > count2:\n count2 = count_star(dim,grid,count_2)\n## m = i\n## n = j\n\n## print('count2',count2)\n##grid = copy.deepcopy(grid_cp) \n##if grid[i][j] == 1:\n## count_chess_area_1(i,j)\n##if grid[i][j] == 0:\n## count_chess_area_0(i,j)\n##print()\n##display_grid()\n##print('count2',count2)\nmax_size_of_region_with_checkers_structure = count2\n\nprint('The size of the largest area with a checkers structure is '\n f'{max_size_of_region_with_checkers_structure}.'\n )\n\n\n\n\n \n \n\n","sub_path":"Quiz5/quiz_5.py","file_name":"quiz_5.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610226474","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\ndef main():\n listCurrencies = []\n #list of datas\n data = {}\n data['digital_currencies'] = []\n sep = 0\n\n headers = {'User-Agent': 'Mozilla/5.0'}\n y = [\"gold-price\",\"silver-price\",\"platinum-price\",\"palladium-price\"]\n url = \"https://www.coindesk.com/coindesk20\"\n r = requests.get(url, headers=headers)\n\n soup = BeautifulSoup(r.text, 'html.parser')\n s4 = soup.select(\"#__next > div:nth-child(2) > main > section > div.data-module > div.cex-table-wrapper > div > section > section.tbody\")\n for span in s4[0].findAll(\"section\"):\n if(str(span.find(\"span\")) != \"None\"):\n result = span\n rows = result.find_all('span')\n for x in rows:\n listCurrencies.append(x.get_text())\n sep += 1\n if(sep>10):\n data['digital_currencies'].append({\n 'asset': listCurrencies[2],\n 'price': listCurrencies[3],\n 'market cap': listCurrencies[4],\n 'total exchange volume': listCurrencies[5],\n 'returns 24h': listCurrencies[6],\n 'total supply': listCurrencies[7],\n 'category': listCurrencies[8],\n 'value proposition': listCurrencies[9]\n })\n listCurrencies = []\n sep = 0\n \n\n with open('../digital_currencies.json', 'w') as outfile:\n json.dump(data, outfile)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"telegram_bot/getCost.py","file_name":"getCost.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596342897","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 28 13:18:34 2016\r\n\r\n@author: okada\r\n\"\"\"\r\n\r\ndef load_text(path):\r\n f = open(path)\r\n text = f.read().replace(\"\\r\\n\", \"\\n\").replace(\"\\r\", \"\\n\").split(\"\\n\")\r\n f.close()\r\n \r\n return text\r\n\r\ndef load_html(path):\r\n import re\r\n \r\n text = load_text(path)\r\n \r\n for i in range(len(text)):\r\n if re.search(\"^ *.+ *$\", text[i]):\r\n text[i] = \"\"\r\n elif re.search(\"^Generated on [0-9]{4}-[0-9]{1,2}-[0-9]{1,2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2} for .+ paplot-[0-9]+.[0-9]+.[0-9]+.$\", text[i]):\r\n text[i] = \"\"\r\n elif re.search(\"^ *.* .*- .* *$\", text[i]):\r\n text[i] = \"\"\r\n \r\n return text\r\n\r\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"649399604","text":"#!/usr/bin/python\n# -*- coding: utf8 -*-\n# import gensim, glob, os\nimport math\nimport operator\nimport re\nimport string\nimport time\nfrom collections import Counter\n\nimport numpy as np\nfrom pyemd import emd\nfrom sklearn.metrics import euclidean_distances\n\n# wv = gensim.models.KeyedVectors.load_word2vec_format(\"../nlp/word2vec/vector.txt\",\n# binary=False)\n# shape = wv.syn0.shape\n# if not os.path.exists(\"data/embed_vn.dat\"):\n# print(\"Caching word embeddings in memmapped format...\")\n#\n# fp = np.memmap(\"data/embed_vn.dat\", dtype=np.double, mode='w+', shape=shape)\n# fp[:] = wv.syn0[:]\n# with open(\"data/embed_vn.vocab\", \"w\", encoding='utf-8') as f:\n# for _, w in sorted((voc.index, word) for word, voc in wv.vocab.items()):\n# print(w, file=f)\n# del fp, wv\n\nshape = (17900, 600)\nsize = 600\npath = \"wmd/\"\n# path = \"\"\nW = np.memmap(path + \"data/embed_vn.dat\", dtype=np.double, mode=\"r\",\n shape=shape)\n\nwith open(path + \"data/embed_vn.vocab\", encoding='utf-8') as f:\n vocab_list = map(str.strip, f.readlines())\nvocab_dict = {w: k for k, w in enumerate(vocab_list)}\n\n\ndef get_xd(document):\n # Matrix of document\n ds = document.split()\n list_doc = [word for word in ds if word in vocab_dict]\n vect = Counter(list_doc)\n\n input_vector_matrix = W[[vocab_dict[w] for w in vect.keys()]]\n # Calculate word frequency\n v = list(vect.values())\n v = np.ravel(v)\n frequency = np.divide(v, v.sum())\n # Calculate di*xi\n input_vector = []\n for i in range(0, len(input_vector_matrix)):\n input_vector.append(\n np.multiply(input_vector_matrix[i], frequency[i]))\n\n X = np.sum(input_vector, axis=0)\n return X\n\n\n# if not os.path.exists(\"data/Xd.dat\"):\n# print(\"Caculate Xd.dat...\")\n#\n# list_docs = []\n# with open(\"../nlp/data-filter/result/data_filter_sw.txt\", \"r\") as filename:\n# for line in filename:\n# line = line.strip()\n# if line != '':\n# list_docs.append(line)\n# # list_docs = sorted(list_docs)\n# # print(list_docs[10])\n# X_dict = []\n# for i in range(0, len(list_docs)):\n# X_dict.append(get_xd(list_docs[i]))\n# fp = np.memmap(\"data/Xd.dat\", dtype=np.double, mode='w+',\n# shape=(len(list_docs), size))\n# fp[:] = X_dict[:]\n#\n# with open(\"data/list_doc.vocab\", \"w\", encoding='utf-8') as f:\n# for doc in list_docs:\n# print(doc, file=f)\n# del fp\n\nwith open(path + \"data/list_doc.vocab\", encoding='utf-8') as f:\n list_docs = f.read().splitlines()\ndoc_dict = {doc: k for k, doc in enumerate(list_docs)}\n\nx_matrix = np.memmap(path + \"data/Xd.dat\", dtype=np.double, mode=\"r\",\n shape=(len(list_docs), size))\n\n# Get stop-words\nSW = set()\nfor line in open(path + 'vn_stopword.txt'):\n line = line.strip()\n if line != '':\n SW.add(line)\nstop_words = list(SW)\n\n\ndef WMD(docs_1, docs_2):\n ds1 = docs_1.split()\n ds2 = docs_2.split()\n list_doc_1 = [word for word in ds1 if word in vocab_dict]\n list_doc_2 = [word for word in ds2 if word in vocab_dict]\n\n vect_1 = Counter(list_doc_1)\n vect_2 = Counter(list_doc_2)\n vect = Counter(list_doc_1 + list_doc_2)\n vect = sorted(vect.keys())\n v_1 = []\n v_2 = []\n for key in vect:\n if vect_1[key] is not None:\n v_1.append(vect_1[key])\n else:\n v_1.append(0)\n if vect_2[key] is not None:\n v_2.append(vect_2[key])\n else:\n v_2.append(0)\n\n v_1 = np.ravel(v_1)\n v_1 = np.divide(v_1, v_1.sum())\n v_2 = np.ravel(v_2)\n v_2 = np.divide(v_2, v_2.sum())\n\n W_ = W[[vocab_dict[w] for w in vect]]\n D_ = euclidean_distances(W_)\n D_ = D_.astype(np.double)\n D_ /= D_.max()\n\n return emd(v_1, v_2, D_)\n\n\ndef WCD(document):\n x1 = get_xd(document)\n\n temple = np.matrix(x_matrix) * (np.matrix(x1).transpose())\n results = {}\n for doc in list_docs:\n results[doc_dict[doc]] = math.sqrt(\n math.fabs(np.linalg.norm(x1) ** 2\n - 2 * temple[doc_dict[doc]]\n + np.linalg.norm(\n x_matrix[doc_dict[doc]]) ** 2))\n\n results = sorted(results.items(), key=operator.itemgetter(1))\n return results\n\n\ndef __rwmd(docs_1, docs_2):\n ds1 = docs_1.split()\n ds2 = docs_2.split()\n list_doc_1 = [word for word in ds1 if word in vocab_dict]\n list_doc_2 = [word for word in ds2 if word in vocab_dict]\n vect_1 = Counter(list_doc_1)\n vect_2 = Counter(list_doc_2)\n\n matrix_1 = W[[vocab_dict[w] for w in vect_1.keys()]]\n matrix_2 = W[[vocab_dict[w] for w in vect_2.keys()]]\n\n # Calculate word frequency\n v1 = list(vect_1.values())\n v1 = np.ravel(v1)\n v1 = np.divide(v1, v1.sum())\n\n v2 = list(vect_2.values())\n v2 = np.ravel(v2)\n v2 = np.divide(v2, v2.sum())\n\n d1_ = euclidean_distances(matrix_1, matrix_2)\n d1_min = np.amin(d1_, axis=1)\n\n d2_ = euclidean_distances(matrix_2, matrix_1)\n d2_min = np.amin(d2_, axis=1)\n\n return max(np.dot(d1_min, v1), np.dot(d2_min, v2))\n\n\ndef knn(k, input_doc):\n result = re.sub('\\W+', ' ', input_doc.lower()).strip()\n repl = ['muốn đọc sách của nhà văn', 'muốn tìm sách của nhà văn',\n 'thích đọc sách của nhà văn', 'muốn sách của nhà văn',\n 'thích sách của nhà văn', 'muốn đọc sách của', 'muốn tìm sách của',\n 'thích đọc sách của', 'muốn sách của', 'thích sách của', 'có thể',\n 'được không', 'được không ạ', 'sách của', 'vài']\n for element in repl:\n result = result.replace(element, \"\")\n result = \" \".join([word for word in result.split() if\n word in vocab_dict and word not in stop_words])\n\n if len(result) == 0:\n return []\n\n wcd = WCD(result)\n\n return wcd[:k]\n\n # wmd_k_doc = {}\n # count = 1\n #\n # min_rwmd = __rwmd(list_docs[wcd[0][0]], input_doc)\n # for i in range(0, len(wcd)):\n # if count <= k:\n # wmd_k_doc[wcd[i][0]] = WMD(list_docs[wcd[i][0]], input_doc)\n # rwmd_temp = __rwmd(list_docs[wcd[i][0]], input_doc)\n # min_rwmd = rwmd_temp if min_rwmd > rwmd_temp else min_rwmd\n # else:\n # _rwmd = __rwmd(list_docs[wcd[i][0]], input_doc)\n # if _rwmd < min_rwmd:\n # wmd_k_doc[wcd[i][0]] = WMD(list_docs[wcd[i][0]], input_doc)\n # count += 1\n #\n # wmd_k_doc = sorted(wmd_k_doc.items(), key=operator.itemgetter(1))\n #\n # return wmd_k_doc[:k]\n\n\ndef main():\n d1 = \"giết con chim nhại\"\n\n d2 = \"gỏi salad và các món khai vị tái bản cẩm tuyết sách tiếng việt sách kinh tế gỏi salad và các món khai vị mục lục 1 salad rau củ xốt mayonnais 2 salad rau câu măng tây 3 salad nga 4 salad hải sản 5 salad heo quay 6 salad cà chua cá thu 7 salad tôm hấp tỏi 8 salad tôm cà ri 9 nghêu trộn măng tây với xốt mù tạt 10 salad chả chiên 50 bò bốp thấu 51 heo bốp thấu 52 bao tử bóp rau răm 53 bò nhúng giấm 54 tai mũi heo ngâm giấm 55 bò ngâm giấm 56 dồi thịt 57 giò thủ 58 chả lụa 59 jam bon 60 pa tê tư vấn gia chánh\"\n\n d3 = \"harry potter và đứa trẻ bị nguyền rủa phần một và hai j k rowling jack thorne john tiffany sách tiếng việt sách văn học văn học nước ngoài harry potter và đứa trẻ bị nguyền rủa phần một và hai kịch bản harry potter và đứa trẻ bị nguyền rủa được viết dựa trên câu chuyện của j k rowling jack thorne và john tiffany từ những nhân vật quen thuộc trong bộ harry potter kịch bản nói về cuộc phiêu lưu của những hậu duệ sự can thiệp vào dòng thời gian đã gây ra những thay đổi không ngờ cho tương lai tưởng chừng đã yên ổn sau khi vắng bóng chúa tể voldermort\"\n t1 = time.time()\n print(knn(20, d1))\n # print(__rwmd(d1, d2))\n print(WCD(d1)[:20])\n print(time.time() - t1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"wmd/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235914409","text":"import csv\r\nimport json\r\nimport random\r\nfrom langconv import *\r\n\r\nrelpath = \"../Datasets/RelationSet.csv\"\r\ncsvpath = \"../Datasets/TrainSetUnique.csv\"\r\noutpath = \"../Datasets/TrainSetUniqueOMIT.csv\"\r\n\r\nwith open(relpath, \"r\", encoding=\"utf8\") as relInput:\r\n with open(csvpath, \"r\", encoding=\"utf8\") as csvInput:\r\n with open(outpath, \"w\", encoding=\"utf8\") as output:\r\n reader = csv.reader(csvInput)\r\n relStorage = []\r\n cntStorage = {}\r\n tempStorage = []\r\n outStorage = []\r\n for row in relInput:\r\n relStorage.append(row.replace('\\n', ''))\r\n\r\n print(relStorage)\r\n for row in reader:\r\n # print(row)\r\n key = row[2]\r\n if key in relStorage:\r\n tempStorage.append((row[0], row[1], row[2], row[3]))\r\n if key not in cntStorage:\r\n cntStorage[key] = 1\r\n else:\r\n cntStorage[key] += 1\r\n\r\n for row in tempStorage:\r\n times = int(1000 / cntStorage[row[2]])\r\n if times == 0:\r\n times = 1\r\n # for i in range(times):\r\n outStorage.append(\"\\\"\" + row[0] + \"\\\",\\\"\" + row[1] + \"\\\",\\\"\" + row[2] + \"\\\",\\\"\" + row[3] + \"\\\"\\n\")\r\n \r\n # random.shuffle(outStorage)\r\n for row in outStorage:\r\n output.write(row)\r\n\r\n print(cntStorage)","sub_path":"Spider/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"71392650","text":"import os\nimport logging\nimport time, datetime\nimport shutil\n\nfrom ..common.db import DBClient\nfrom ..common.utils import *\nfrom csst_dfs_commons.models import Result\nfrom csst_dfs_commons.models.mci import Level1Record\nfrom csst_dfs_commons.models.common import from_dict_list\n\nlog = logging.getLogger('csst')\n\nclass Level1DataApi(object):\n def __init__(self, sub_system = \"mci\"):\n self.sub_system = sub_system\n self.root_dir = os.getenv(\"CSST_LOCAL_FILE_ROOT\", \"/opt/temp/csst\")\n self.db = DBClient()\n\n def find(self, **kwargs):\n ''' retrieve level1 records from database\n\n parameter kwargs:\n level0_id: [str]\n data_type: [str]\n create_time : (start, end),\n qc1_status : [int],\n prc_status : [int],\n filename: [str]\n limit: limits returns the number of records,default 0:no-limit\n\n return: csst_dfs_common.models.Result\n '''\n try:\n level0_id = get_parameter(kwargs, \"level0_id\")\n data_type = get_parameter(kwargs, \"data_type\")\n create_time_start = get_parameter(kwargs, \"create_time\", [None, None])[0]\n create_time_end = get_parameter(kwargs, \"create_time\", [None, None])[1]\n qc1_status = get_parameter(kwargs, \"qc1_status\")\n prc_status = get_parameter(kwargs, \"prc_status\")\n filename = get_parameter(kwargs, \"filename\")\n limit = get_parameter(kwargs, \"limit\", 0)\n\n sql_count = \"select count(*) as c from mci_level1_data where 1=1\"\n sql_data = f\"select * from mci_level1_data where 1=1\"\n\n sql_condition = \"\" \n if level0_id:\n sql_condition = f\"{sql_condition} and level0_id='{level0_id}'\"\n if data_type:\n sql_condition = f\"{sql_condition} and data_type='{data_type}'\"\n if create_time_start:\n sql_condition = f\"{sql_condition} and create_time >='{create_time_start}'\"\n if create_time_end:\n sql_condition = f\"{sql_condition} and create_time <='{create_time_end}'\"\n if qc1_status:\n sql_condition = f\"{sql_condition} and qc1_status={qc1_status}\"\n if prc_status:\n sql_condition = f\"{sql_condition} and prc_status={prc_status}\" \n if filename:\n sql_condition = f\" and filename='{filename}'\" \n\n sql_count = f\"{sql_count} {sql_condition}\"\n sql_data = f\"{sql_data} {sql_condition}\"\n\n if limit > 0:\n sql_data = f\"{sql_data} limit {limit}\" \n\n totalCount = self.db.select_one(sql_count)\n _, recs = self.db.select_many(sql_data)\n return Result.ok_data(data=from_dict_list(Level1Record, recs)).append(\"totalCount\", totalCount['c'])\n\n except Exception as e:\n return Result.error(message=str(e))\n \n\n def get(self, **kwargs):\n '''\n parameter kwargs:\n id = [int] \n\n return dict or None\n '''\n try:\n fits_id = get_parameter(kwargs, \"id\", -1)\n r = self.db.select_one(\n \"select * from mci_level1_data where id=?\", (fits_id,))\n\n if r:\n return Result.ok_data(data=Level1Record().from_dict(r))\n else:\n return Result.error(message=f\"id:{fits_id} not found\") \n except Exception as e:\n log.error(e)\n return Result.error(message=str(e)) \n\n def update_proc_status(self, **kwargs):\n ''' update the status of reduction\n\n parameter kwargs:\n id : [int],\n status : [int]\n\n return csst_dfs_common.models.Result\n '''\n fits_id = get_parameter(kwargs, \"id\")\n status = get_parameter(kwargs, \"status\")\n try:\n existed = self.db.exists(\n \"select * from mci_level1_data where id=?\",\n (fits_id,)\n )\n if not existed:\n log.warning('%s not found' %(fits_id, ))\n return Result.error(message ='%s not found' %(fits_id, ))\n self.db.execute(\n 'update mci_level1_data set prc_status=?, prc_time=? where id=?',\n (status, format_time_ms(time.time()), fits_id)\n ) \n self.db.end() \n return Result.ok_data()\n \n except Exception as e:\n log.error(e)\n return Result.error(message=str(e))\n\n def update_qc1_status(self, **kwargs):\n ''' update the status of QC1\n \n parameter kwargs:\n id : [int],\n status : [int]\n ''' \n fits_id = get_parameter(kwargs, \"id\")\n status = get_parameter(kwargs, \"status\")\n try:\n existed = self.db.exists(\n \"select * from mci_level1_data where id=?\",\n (fits_id,)\n )\n if not existed:\n log.warning('%s not found' %(fits_id, ))\n return Result.error(message ='%s not found' %(fits_id, ))\n self.db.execute(\n 'update mci_level1_data set qc1_status=?, qc1_time=? where id=?',\n (status, format_time_ms(time.time()), fits_id)\n ) \n self.db.end() \n return Result.ok_data()\n \n except Exception as e:\n log.error(e)\n return Result.error(message=str(e))\n\n def write(self, **kwargs):\n ''' insert a level1 record into database\n \n parameter kwargs:\n level0_id : [str]\n data_type : [str]\n cor_sci_id : [int]\n prc_params : [str]\n filename : [str]\n file_path : [str] \n prc_status : [int]\n prc_time : [str]\n pipeline_id : [str]\n refs : [dict]\n\n return csst_dfs_common.models.Result\n ''' \n try:\n rec = Level1Record(\n id = 0,\n level0_id = get_parameter(kwargs, \"level0_id\"),\n data_type = get_parameter(kwargs, \"data_type\"),\n cor_sci_id = get_parameter(kwargs, \"cor_sci_id\"),\n prc_params = get_parameter(kwargs, \"prc_params\"),\n filename = get_parameter(kwargs, \"filename\"),\n file_path = get_parameter(kwargs, \"file_path\"),\n prc_status = get_parameter(kwargs, \"prc_status\", -1),\n prc_time = get_parameter(kwargs, \"prc_time\", format_datetime(datetime.now())),\n pipeline_id = get_parameter(kwargs, \"pipeline_id\"),\n refs = get_parameter(kwargs, \"refs\", {})\n )\n existed = self.db.exists(\n \"select * from mci_level1_data where filename=?\",\n (rec.filename,)\n )\n if existed:\n log.error(f'{rec.filename} has already been existed')\n return Result.error(message=f'{rec.filename} has already been existed') \n\n now_str = format_time_ms(time.time())\n self.db.execute(\n 'INSERT INTO mci_level1_data (level0_id,data_type,cor_sci_id,prc_params,filename,file_path,qc1_status,prc_status,prc_time, create_time,pipeline_id) \\\n VALUES(?,?,?,?,?,?,?,?,?,?,?)',\n (rec.level0_id, rec.data_type, rec.cor_sci_id, rec.prc_params, rec.filename, rec.file_path, -1, rec.prc_status,rec.prc_time, now_str, rec.pipeline_id,)\n )\n self.db.end()\n rec.id = self.db.last_row_id()\n\n if rec.refs.items():\n sql_refs = \"insert into mci_level1_ref (level1_id,ref_type,cal_id) values \"\n values = [\"(%s,'%s',%s)\"%(rec.id,k,v) for k,v in rec.refs.items()]\n _ = self.db.execute(sql_refs + \",\".join(values)) \n self.db.end()\n\n rec.create_time = now_str\n return Result.ok_data(data=rec)\n except Exception as e:\n log.error(e)\n return Result.error(message=str(e)) ","sub_path":"csst_dfs_api_local/mci/level1.py","file_name":"level1.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"78206768","text":"class Relu:\n def __init__(self):\n self.mask = None\n\n def foward(self, x):\n self.mask = (0 >= x)\n out = x.copy()\n out[self.mask] = 0\n\n return out\n\n def backward(self, dout):\n dout[self.mask] = 0\n dx = dout\n\n return dx\n\nimport numpy as np\nx = np.array([[1.0, -0.5],[-2.0, 3.0]])\nmask = (x <= 0)\nprint(mask)","sub_path":"1-Chap.5/backpg_relu.py","file_name":"backpg_relu.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584817703","text":"# @Author: Jenkins Alec \n# @Date: 2017-07-09T16:46:45-07:00\n# @Project: LTSPM analysis\n# @Last modified by: alec\n# @Last modified time: 2017-08-30T12:52:28-07:00\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\nimport plotting.format_plots_tkagg as fp\nfrom PIL import Image\nimport calc_NV_field as cNVf\nimport load_scan as lscan\nimport linecut\n\npi = np.pi\n\ndef plot_stray_field(scannum, helicities = [0, 90, 180]):\n\n path = '/Users/alec/UCSB/cofeb_analysis_data/ta/'\n scan_params_path = path+str(scannum)+'/'+'scan_parameters.json'\n field_path = path+str(scannum)+'/stray_field_sim/'\n datapath = path+'1760/'\n\n with open(scan_params_path, 'r') as fread:\n scan_params = json.load(fread)\n\n phi = scan_params['phi']\n theta = scan_params['theta']\n xres = scan_params['xres']\n xcenter = scan_params['xcenter']\n ycenter = scan_params['ycenter']\n scanSize = (1e6)*scan_params['scanSize'] # convert to microns\n\n errnames = [\"lower\", \"mean\", \"upper\"]\n\n ffdata = lscan.load_ff('/Users/alec/UCSB/scan_data/'+str(scannum)+'-esrdata/fitdata.txt',xres,xres,maxfgrad=20)\n\n scd = np.empty((len(helicities)), dtype=object)\n vcd = np.empty_like(scd)\n meff = np.empty_like(scd)\n bx = np.empty((len(helicities), len(errnames)), dtype=object)\n by = np.empty_like(bx)\n bz = np.empty_like(bx)\n bNV = np.empty_like(bx)\n fieldCutNV = np.empty_like(bx)\n\n for j in range(len(helicities)):\n scd[j] = np.loadtxt(field_path+'scd_'+str(helicities[j])+str(scannum)+'.txt', delimiter=',')\n vcd[j] = np.loadtxt(field_path+'vcd_'+str(helicities[j])+str(scannum)+'.txt', delimiter=',')\n meff[j] = np.loadtxt(field_path+'meff_'+str(helicities[j])+str(scannum)+'.txt', delimiter=',')\n for i in range(len(errnames)):\n bx[j,i] = (1e4)*np.loadtxt(field_path+'h'+str(helicities[j])+'_x_'+errnames[i]+'_'+str(scannum)+'.txt', delimiter=',')\n by[j,i] = (1e4)*np.loadtxt(field_path+'h'+str(helicities[j])+'_y_'+errnames[i]+'_'+str(scannum)+'.txt', delimiter=',')\n bz[j,i] = (1e4)*np.loadtxt(field_path+'h'+str(helicities[j])+'_z_'+errnames[i]+'_'+str(scannum)+'.txt', delimiter=',')\n bNV[j,i] = cNVf.calc_NV_field(bx[j,i], by[j,i], bz[j,i], theta, phi)\n\n slen = len(bx[0,0])\n\n cutSize = 2.2\n phinum = 8\n philist = np.linspace(0, pi, phinum, endpoint=False)\n\n ffdataCut = np.empty((phinum), dtype=object)\n ffdataCutErr = np.empty((phinum), dtype=object)\n fieldCutNV = np.empty((phinum, len(helicities), len(errnames)), dtype=object)\n\n for k in range(phinum):\n ffdataCut[k] = linecut.linecut(ffdata[0], scanSize, cutSize, philist[k], xcenter, ycenter)\n ffdataCutErr[k] = linecut.linecut(ffdata[1], scanSize, cutSize, philist[k], xcenter, ycenter)\n for j in range(len(helicities)):\n for i in range(len(errnames)):\n fieldCutNV[k,j,i] = linecut.linecut(bNV[j,i], scanSize, cutSize, philist[k])\n\n\n #---------------- PLOTS ------------------------------------------\n #-----------------------------------------------------------------\n\n savepath = '/Users/alec/UCSB/papers/tacofeb/figures/'\n\n plt.close('all')\n\n if(len(helicities)==4):\n fig, ax = plt.subplots(figsize=(4,4))\n im = plt.imshow(bNV[3,1], interpolation='nearest', cmap='bone')\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n cbar = fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04)\n plt.subplots_adjust(left=0.0, bottom=0.0, right=0.9, top=1.0, wspace=0, hspace=0)\n plt.savefig(savepath+'BNV_bestFitHelicity_'+str(scannum)+'.pdf', format='pdf')\n\n fig, ax = plt.subplots(figsize=(4,4))\n im = plt.imshow(ffdata[0], interpolation='nearest', cmap='bone')\n ax.get_yaxis().set_visible(False)\n ax.get_xaxis().set_visible(False)\n cbar = fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04)\n plt.subplots_adjust(left=0.0, bottom=0.0, right=0.9, top=1.0, wspace=0, hspace=0)\n plt.savefig(savepath+'BNV_data_'+str(scannum)+'.pdf', format='pdf')\n\n fig, axes = plt.subplots(ncols=2, nrows=int(phinum/2), sharex=True, sharey=True, figsize=(6,7.5))\n for j in range(0,2):\n for i in range(0,int(phinum/2)):\n axes[i,j].errorbar(ffdataCut[int(i+(phinum/2)*j)][0], ffdataCut[int(i+(phinum/2)*j)][1], color='#000000', fmt='.', linewidth=1.0, label='data')\n axes[i,j].plot(fieldCutNV[0,0,1][0], fieldCutNV[int(i+(phinum/2)*j),0,1][1], linewidth=2.0, label=u'right-handed Néel')\n axes[i,j].plot(fieldCutNV[0,1,1][0], fieldCutNV[int(i+(phinum/2)*j),1,1][1], linewidth=2.0, label=\"Bloch\")\n axes[i,j].plot(fieldCutNV[0,2,1][0], fieldCutNV[int(i+(phinum/2)*j),2,1][1], linewidth=2.0, label=u'left-handed Néel')\n if(len(fieldCutNV[0,:,1])==4):\n axes[i,j].plot(fieldCutNV[0,3,1][0], fieldCutNV[int(i+(phinum/2)*j),3,1][1], linewidth=2.0, label=r'$\\psi_h$ = '+str(helicities[3])+u'°')\n axes[i,j].get_yaxis().set_visible(False)\n axes[i,j].get_xaxis().set_visible(False)\n axes[i,j].text(0.04,0.86,r'$\\phi$ = '+'{:d}{}{:d}'.format(int(i+(phinum/2)*j),r'$\\pi$/',phinum),\n horizontalalignment='left', verticalalignment='center',\n transform=axes[i,j].transAxes, fontsize=10)\n axes[int((phinum/2)-1),1].get_yaxis().set_visible(True)\n axes[int((phinum/2)-1),1].get_xaxis().set_visible(True)\n axes[int((phinum/2)-1),1].yaxis.tick_right()\n axes[int((phinum/2)-1),1].yaxis.set_label_position(\"right\")\n axes[int((phinum/2)-1),1].set_xlabel(r'r ($\\mu$m)')\n axes[int((phinum/2)-1),1].set_ylabel(r'B$\\mathrm{_{NV}}$ (G)')\n axes[int((phinum/2)-1),0].legend(bbox_to_anchor=(0.0, -0.74), loc=3, borderaxespad=0., frameon=False, prop={'size':10})\n plt.ylim([0,32])\n plt.subplots_adjust(left=0.0, bottom=0.15, right=0.88, top=1.0, wspace=0, hspace=0)\n plt.savefig(savepath+'BNV_linecuts_'+str(scannum)+'.pdf', format='pdf')\n\n\n # fp.format_plots(plt, small=False, tight=False)\n\n plt.show()\n\nif __name__ == \"__main__\":\n import sys\n if (len(sys.argv) == 2):\n plot_stray_field(int(sys.argv[1]))\n elif (len(sys.argv) == 3):\n plot_stray_field(int(sys.argv[1]), helicities=np.array(eval(sys.argv[2])))\n else:\n print('enter scan number')\n","sub_path":"cofeb_analysis/ta/plot_stray_field.py","file_name":"plot_stray_field.py","file_ext":"py","file_size_in_byte":6356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74668027","text":"\n\nclass exact_line_search:\n\n def __init__(self, func, grad, tolerance):\n \"\"\"\n Creates a class which performs an inexact line search.\n\n In:\n func: the objective function\n grad: its (possibly numerical) gradient\n\n Parameters and their default values:\n rho = 0.1\n sigma = 0.7\n tau = 0.1\n chi = 9.\n\n \"\"\"\n self.func = func\n self.grad = grad\n self.tolerance = tolerance\n\n\n def __call__(self, x_0, s):\n \"\"\"\n Given an initial guess, returns a factor for use in line search which\n is acceptable according to either Goldstein or Wolfe-Powell\n conditions.\n\n In:\n x_0: the starting point\n s: the direction along which to line search\n guess: initial guess for line search factor alpha_0\n wolfe_powell: optional boolean, controls whether to use Goldstein\n or Wolfe-Powell conditions (recommended for non-quadratic\n objective function)\n Out:\n a0: scalar, line search factor alpha_0\n f_a0: scalar, the objective function evaluated for x + a0*s\n\n \"\"\"\n \"\"\"\n Exact line search using the bisection method\n In: x is the current point, dir is the search direction\n Out: returnes the stepsize the minimized the search function\n \"\"\"\n #This is an implementation of the bisection method for exact line search\n\n #This is the derivative of our F(lambda) = f(x +lambda*dir). The search function is minimized when this is equal to 0.\n search_func = lambda step: (self.grad(x_0 + step*s) @ s)\n\n #We create an interval starting at the derivative of the current point\n a = 0\n #a = search_func(0);\n step_size = 0.01\n b = a\n #Search in the search direction until the sign changes, then the minimum is within\n #this interval, SEEMS LIKE WE SOMETIME SEARCH IN A NON DESCENT DIRECTION WHICH MAKES THIS LOOP RUN ENDLESSLY\n while search_func(a)*search_func(b) > 0:\n step_size = step_size*2\n b = step_size\n #b = search_func(step_size)\n\n #Halve the interval until it has reached a certain length\n\n while abs(b-a) > self.tolerance:\n m = (a + b)/2\n f_m = search_func(m)\n\n if f_m <= 0:\n a = m\n elif f_m > 0:\n b = m\n else:\n print(\"Bisection method fails\")\n return\n #Return the midpoint of the interval\n m = (a+b)/2\n return m, search_func(m)\n","sub_path":"classes/exact_line_search.py","file_name":"exact_line_search.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117231019","text":"import torch\r\nfrom torch import nn\r\n\r\nclass Net(nn.Module):\r\n def __init__(self,input_dim):\r\n super(Net, self).__init__()\r\n self._map_layer=nn.Sequential(\r\n nn.Conv1d(input_dim,16,(3,),(1,))\r\n )\r\n self._encoder_layer=nn.TransformerEncoderLayer(d_model=16,nhead=2,batch_first=True)\r\n self._transformer_encoder=nn.TransformerEncoder(self._encoder_layer,num_layers=6)\r\n\r\n self._output_layer=nn.Sequential(\r\n nn.Linear(16,16),\r\n nn.BatchNorm1d(16),\r\n nn.ReLU(),\r\n nn.Linear(16,1),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self,x):\r\n _x=x.permute(0,2,1)\r\n _y=self._map_layer(_x)\r\n _y=_y.permute(0,2,1)\r\n _y=self._transformer_encoder(_y)\r\n _y=_y[:,-1]\r\n _y=self._output_layer(_y)\r\n\r\n return _y\r\n\r\nif __name__ == '__main__':\r\n x=torch.randn(2,5,7)\r\n net=Net(7)\r\n y=net(x)\r\n print(y.shape)","sub_path":"Day006/python/Stock/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"152229194","text":"# valueIterationAgents.py\n# -----------------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nimport mdp, util\n\nfrom learningAgents import ValueEstimationAgent\n\nclass ValueIterationAgent(ValueEstimationAgent):\n \"\"\"\n * Please read learningAgents.py before reading this.*\n\n A ValueIterationAgent takes a Markov decision process\n (see mdp.py) on initialization and runs value iteration\n for a given number of iterations using the supplied\n discount factor.\n \"\"\"\n def __init__(self, mdp, discount = 0.9, iterations = 100):\n \"\"\"\n Your value iteration agent should take an mdp on\n construction, run the indicated number of iterations\n and then act according to the resulting policy.\n\n Some useful mdp methods you will use:\n mdp.getStates()\n mdp.getPossibleActions(state)\n mdp.getTransitionStatesAndProbs(state, action)\n mdp.getReward(state, action, nextState)\n mdp.isTerminal(state)\n \"\"\"\n self.mdp = mdp\n self.discount = discount\n self.iterations = iterations\n self.values = util.Counter() # A Counter is a dict with default 0\n # Write value iteration code here\n \n \"*** YOUR CODE HERE ***\"\n #number of iteration\n for i in range(iterations):\n tempvalues = []\n #Iterate over all the states [0,0], [0,1]... and update each state value \n for state in self.mdp.getStates():\n arr = [] #Array to store values of all possible action on particular action\n for action in self.mdp.getPossibleActions(state):\n value = self.computeQValueFromValues(state,action)\n arr.append(value)\n if self.mdp.isTerminal(state):\n tempvalues.append(0) #assign 0 when pos at terminal state\n else:\n tempvalues.append(max(arr)) ##update to maximum value\n\n #update all the states values \n for ind,state in enumerate(self.mdp.getStates()):\n self.values[state] = tempvalues[ind]\n \n\n def getValue(self, state):\n \"\"\"\n Return the value of the state (computed in __init__).\n \"\"\"\n return self.values[state]\n\n\n def computeQValueFromValues(self, state, action):\n \"\"\"\n Compute the Q-value of action in state from the\n value function stored in self.values.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n #util.raiseNotDefined()\n value = 0\n for t in self.mdp.getTransitionStatesAndProbs(state, action):\n #t[0] is nextstate\n #t[1] is probability\n value += t[1]*(self.mdp.getReward(state, action, t[0]) + self.discount * self.values[t[0]])\n return value\n\n def computeActionFromValues(self, state):\n \"\"\"\n The policy is the best action in the given state\n according to the values currently stored in self.values.\n\n You may break ties any way you see fit. Note that if\n there are no legal actions, which is the case at the\n terminal state, you should return None.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n #util.raiseNotDefined()\n if self.mdp.isTerminal(state):\n return 0\n valuearr = []\n actionarr = []\n \n #Return the action whose value is maximum\n for action in self.mdp.getPossibleActions(state):\n valuearr.append(self.computeQValueFromValues(state, action))\n actionarr.append(action)\n return actionarr[valuearr.index(max(valuearr))]\n \n\n def getPolicy(self, state):\n return self.computeActionFromValues(state)\n\n def getAction(self, state):\n \"Returns the policy at the state (no exploration).\"\n return self.computeActionFromValues(state)\n\n def getQValue(self, state, action):\n return self.computeQValueFromValues(state, action)\n","sub_path":"ML_HW4/Q1/valueIterationAgents.py","file_name":"valueIterationAgents.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59980353","text":"import numpy as np\nfrom .IndexPolicy import IndexPolicy\n\n__author__ = \"Petteri Pulkkinen\"\n__version__ = \"0.9\"\n\n\nclass AdaptiveScalingPolicy(IndexPolicy):\n\n def __init__(self, nb_arms, discount):\n super(AdaptiveScalingPolicy, self).__init__(nb_arms)\n self.scaling = 1\n self.discount = discount\n self.d_t = 0\n self.d_pulls = np.zeros(nb_arms)\n self.d_rewards = np.zeros(nb_arms)\n\n def startGame(self):\n super(AdaptiveScalingPolicy, self).startGame()\n self.d_pulls.fill(0)\n self.d_rewards.fill(0)\n self.d_t = 0\n\n def getReward(self, arm, reward):\n super().getReward(arm, reward)\n\n self.d_pulls = self.discount * self.d_pulls\n self.d_pulls[arm] += 1\n self.d_rewards = self.discount * self.d_rewards\n self.d_rewards[arm] += reward\n self.d_t = self.discount * self.d_t + 1\n\n nz_idx = np.nonzero(self.d_pulls)\n self.scaling = np.max(self.d_rewards[nz_idx] / self.d_pulls[nz_idx])\n\n def computeIndex(self, arm):\n raise NotImplementedError(\"\")\n\n\n\n","sub_path":"SMPyBandits/Policies/AdaptiveScalingPolicy.py","file_name":"AdaptiveScalingPolicy.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"188896259","text":"import requests\nimport urllib\nfrom collections import OrderedDict\nimport pickle\nimport re\n\n\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\n\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer\nimport nltk\n\n\nstopset = set(stopwords.words('english'))\n\n\n\ndef removeStopWords(input):\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens=tokenizer.tokenize(input)\n tokens = [w for w in tokens if not w in stopset]\n returnString= ' '.join(tokens)\n returnString = ' '.join(OrderedDict((w,w) for w in returnString.split()).keys())\n return returnString\n\n\ndef strip_non_ascii(string):\n ''' Returns the string without non ASCII characters'''\n stripped = (c for c in string if 0 < ord(c) < 127)\n return ''.join(stripped)\n\n\ndef getConceptsForWord(word):\n \n \n word = word_tokenize(word)\n word = nltk.tag.pos_tag(word)\n #print word\n \n if (word[0][1]=='NNP' or word[0][1]=='NN'):\n response = requests.get(\"https://www.googleapis.com/freebase/v1/search?query=\" + word[0][0] + \"&key=AIzaSyAP06H4HjCk69jQy4J78SKtlhcxup6Ae8k\")\n jsonResponse = response.json()\n #print jsonResponse['result']\n resultString = ''\n \n counter=0\n for row in jsonResponse['result']:\n \n #print row\n try: \n resultString = resultString + row['notable']['id'] + ' '\n counter+=1\n except KeyError: pass\n \n if (counter>=4): #no. of ids\n break\n \n #resultString = ' '.join(OrderedDict((w,w) for w in resultString.lower().split()).keys()) \n #print resultString \n return resultString\n\ndef replaceStringWithConcepts(input):\n \n input = strip_non_ascii(input)\n \n input = removeStopWords(input)\n returnString = ''\n splitString = input.split()\n for splitWord in splitString:\n concept = getConceptsForWord(splitWord)\n if not concept:\n returnString = returnString + ' ' + splitWord\n else:\n returnString = returnString + ' ' + concept\n returnString = ' '.join(OrderedDict((w,w) for w in returnString.split()).keys())\n #print \"pass\" + input\n #print returnString\n return returnString\n\n\n##########################MAIN#############################\n\n####################TRAIN PICKLE##########################\n'''\ninputFile = open(\"techTweets.txt\", 'r+')\n\nclassifiedMessages = np.loadtxt(\"techTweets.txt\", comments='\\\\<>=#', delimiter=\"\\t\", unpack=False, dtype ='string' )\n\n\ni=0 \nmessageArray = pickle.load(open(\"TransformedTweetsArray.p\",\"rb\"))\nfor message in classifiedMessages[:]:\n print message[-1].replace('#','')\n messageArray.append([str( message[0] ) , str( replaceStringWithConcepts(( urllib.unquote(message[-1]).decode('utf8') ).replace('#',' ').replace('\\\\n',' ').replace('\\\\','')) )])\n if (i%200==0):\n pickle.dump(messageArray,open(\"TransformedTweetsArray.p\",\"wb\"))\n i+=1\nprint messageArray\n'''\n\n\n###########################################################################\n\n\n\ndef getPredictions (testingTweetsWithIDs):\n\n #####################LOAD PICKLE AND CROSS VALIDATE###########################################\n \n ## Pickle Dumps Add / Load\n #pickle.dump(messageArray,open(\"TransformedTechTweetsArray.p\",\"wb\"))\n labelledMessageArray = pickle.load(open(\"TransformedTechTweetsArray.p\",\"rb\"))\n tempArray = pickle.load(open(\"TransformedTechTweetsArray2.p\",\"rb\"))\n labelledMessageArray = labelledMessageArray + tempArray\n \n np.random.shuffle(labelledMessageArray)\n #labelledMessageArray = [ [s[0], s[1].replace('/', ' ')] for s in labelledMessageArray]\n labelledMessageArray = np.asarray(labelledMessageArray)\n trainingLabels = np.asarray(labelledMessageArray[:, 0], dtype=int )\n trainingMessages = np.asarray(labelledMessageArray[:, 1] )\n \n \n vectorizer = CountVectorizer(ngram_range=(1, 3))\n frequencies = vectorizer.fit_transform(trainingMessages )\n \n classifier = MultinomialNB()\n targets = trainingLabels\n #print \"\\n\\n\\n\\n\" + str(targets)\n classifier.fit(frequencies, targets)\n \n #################################### CLASSIFIER TRAINED\n \n testingTweetsWithIDs = testingTweetsWithIDs\n #testingTweetsWithIDs = np.loadtxt(\"lala.txt\", comments='\\\\<>=#', delimiter=\"\\t\", unpack=False, dtype ='string' )\n \n \n testingTweetsWithIDs = np.asarray(testingTweetsWithIDs)\n #print testingTweetsWithIDs\n \n \n ###############\n \n results = []\n for row in testingTweetsWithIDs:\n tweet_id = row[0]\n tweet = row[1]\n tweet_stripped_url = re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', urllib.unquote_plus(tweet))\n tweet_concept = str( replaceStringWithConcepts(( urllib.unquote(tweet_stripped_url)).replace('#',' ').replace('\\\\n',' ').replace('\\\\','')) )\n \n test_result = vectorizer.transform([tweet_concept]) #TWEET\n prediction = classifier.predict(test_result)\n prediction_probability = classifier.predict_proba(test_result)\n# print \"Tweet id: \" + tweet_id + \" \" + \"done!\"\n #print prediction\n #print prediction_probability\n results.append([tweet_id, prediction[0], prediction_probability[0][1]]) \n \n \n return results\n \n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252493616","text":"import numpy as np\nimport sorting\nimport unittest\n\nclass TestPreStepDawa(unittest.TestCase):\n\n\tdef test_sorting_g_0(self):\n\t\td = range(0,10)\n\t\torder, noisy_data =\tsorting.create_order(d, 0, 'freq', add_noise=False)\n\t\tself.assertListEqual(list(order), range(0,10))\n\n\tdef test_sorting_g_01(self):\n\t\td = range(0,10)\n\t\torder, noisy_data =\tsorting.create_order(d, 0.2, 'freq', add_noise=False)\n\t\tself.assertListEqual(list(order), range(0,10))\n\n\tdef test_final_estimation(self):\n\t\td = [1, 1, 1, 100, 10, 10, 10]\n\t\tbins = [[0, 2], [3, 3], [4, 6]]\n\t\that = sorting.create_hat(d, d, bins, 1, 0.1, 0.25, add_noise=False)\n\t\tself.assertListEqual(d, hat.tolist())\n\n\tdef test_pre_step_dawa(self):\n\t\td = [1, 1, 1, 100, 10, 10, 10]\n\t\that = sorting.pre_step_DAWA(d, pow(2,100), 0.5, 0.25, 'freq', add_noise=False)\n\t\tself.assertListEqual(d, hat.tolist())\n\n\tdef test_hier_sort(self):\n\t\tattrs = [\n\t\t\t['a1', 'b1'],\n\t\t\t['a1', 'b2'],\n\t\t\t['a1', 'b1'],\n\t\t\t['a2', 'b3'],\n\t\t\t['a1', 'b4'],\n\t\t\t['a1', 'b4'],\n\t\t\t['a1', 'b4']\n\t\t]\n\t\tfreqs = np.array([1, 100, 1, 100, 1, 1, 10])\n\t\toutput = np.array([1, 1, 1, 1, 10, 100, 100])\n\n\t\torder, sorted_freqs =\tsorting.create_order(freqs, 0.2, 'hier_sort', attrs=attrs, add_noise=False)\n\t\tnp.testing.assert_array_equal(sorted_freqs, output)\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"dawa/unittests.py","file_name":"unittests.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85011240","text":"from django.dispatch import Signal\nfrom django.dispatch.dispatcher import NO_RECEIVERS\n\n\nclass EventSignal(Signal):\n def __init__(self, name, module_name, version, providing_args=None):\n super(EventSignal, self).__init__(providing_args)\n self.name = name\n self.module_name = module_name\n self.version = version\n\n def send(self, sender, allow_non_idempotent=True, **named):\n \"\"\"\n Send signal from sender to all connected receivers.\n\n If any receiver raises an error, the error propagates back through send,\n terminating the dispatch loop, so it is quite possible to not have all\n receivers called if a raises an error.\n\n Arguments:\n\n sender\n The sender of the signal Either a specific object or None.\n\n named\n Named arguments which will be passed to receivers.\n\n Returns a list of tuple pairs [(receiver, response), ... ].\n \"\"\"\n responses = []\n if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:\n return responses\n\n for receiver in self._live_receivers(sender):\n\n if not allow_non_idempotent:\n if not getattr(receiver, 'is_idempotent', False):\n continue\n\n response = receiver(signal=self, sender=sender, **named)\n responses.append((receiver, response))\n\n return responses\n","sub_path":"src/libs/common_domain/event_signal.py","file_name":"event_signal.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228582704","text":"#!/usr/bin/env python3\n# Copyright (C) 2016-2018 by Ali Baharev \n# All rights reserved.\n# BSD license.\n\nfrom os import listdir\nfrom os.path import isdir\n\ndef main():\n ignore = set(('__pycache__', 'images', 'nyuszi_template'))\n dirs = sorted(e for e in listdir('.') if isdir(e) and e not in ignore)\n print(', '.join(dirs))\n links = '\\n'.join(to_link(d) for d in dirs)\n with open('index.html', 'w') as f:\n f.write(PREAMBLE)\n f.write(links)\n f.write(POSTAMBLE)\n\ndef to_link(d):\n return '
  • {0}
  • '.format(d)\n\n\nPREAMBLE = \\\n'''\n\n \n \n Sandbox projects\n \n\n\n
      \n'''\n\nPOSTAMBLE = \\\n'''\n
    \n\n\n'''\n\nif __name__ == '__main__':\n main()\n","sub_path":"sandbox/create_index.py","file_name":"create_index.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"511493051","text":"import os\nimport pytest\nfrom astropy.io.fits.diff import FITSDiff\n\nfrom jwst.pipeline.collect_pipeline_cfgs import collect_pipeline_cfgs\nfrom jwst.stpipe import Step\n\n@pytest.fixture(scope=\"module\")\ndef run_pipeline(jail, rtdata_module):\n \"\"\"Run the calwebb_spec2 pipeline on a single NIRSpec MOS exposure.\"\"\"\n\n rtdata = rtdata_module\n\n # Get the cfg files\n collect_pipeline_cfgs(\"config\")\n\n # Get the MSA metadata file referenced in the input exposure\n rtdata.get_data(\"nirspec/mos/jw95065006001_0_short_msa.fits\")\n\n # Get the input exposure\n rtdata.get_data(\"nirspec/mos/f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod.fits\")\n\n # Run the calwebb_spec2 pipeline; save results from intermediate steps\n args = [\"config/calwebb_spec2.cfg\", rtdata.input,\n \"--steps.assign_wcs.save_results=true\",\n \"--steps.msa_flagging.save_results=true\",\n \"--steps.extract_2d.save_results=true\",\n \"--steps.srctype.save_results=true\",\n \"--steps.wavecorr.save_results=true\",\n \"--steps.flat_field.save_results=true\",\n \"--steps.pathloss.save_results=true\",\n \"--steps.barshadow.save_results=true\"]\n Step.from_cmdline(args)\n\n return rtdata\n\n\n@pytest.mark.bigdata\n@pytest.mark.parametrize(\"output\",[\n \"assign_wcs\", \"msa_flagging\", \"extract_2d\", \"wavecorr\", \"flat_field\", \"srctype\",\n \"pathloss\", \"barshadow\", \"cal\", \"s2d\", \"x1d\"])\ndef test_nirspec_mos_spec2(run_pipeline, fitsdiff_default_kwargs, output):\n \"\"\"Regression test of the calwebb_spec2 pipeline on a\n NIRSpec MOS exposure.\"\"\"\n\n # Run the pipeline and retrieve outputs\n rtdata = run_pipeline\n rtdata.output = \"f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_\" + output + \".fits\"\n\n # Get the truth files\n rtdata.get_truth(os.path.join(\"truth/test_nirspec_mos_spec2\",\n \"f170lp-g235m_mos_observation-6-c0e0_001_dn_nrs1_mod_\" + output + \".fits\"))\n\n # Compare the results\n diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)\n assert diff.identical, diff.report()\n","sub_path":"jwst/regtest/test_nirspec_mos_spec2.py","file_name":"test_nirspec_mos_spec2.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400172324","text":"import unittest\n\nimport numpy as np\n\nimport chainer\nimport onnx\nimport onnx_chainer\nfrom chainer import testing\nfrom onnx_chainer.testing import test_mxnet\n\nMXNET_OPSET_VERSION = {\n 'Add': (1, 6, 7),\n 'AddConst': (1, 6, 7),\n 'Absolute': (1, 6),\n 'Div': (1, 6, 7),\n 'Mul': (1, 6, 7),\n 'Neg': (1, 6),\n 'PowVarConst': (1, 7),\n 'Sub': (1, 6, 7),\n 'Clip': (6,),\n 'Exp': (1, 6),\n 'MatMul': (1, 6, 7),\n 'Maximum': (1, 6),\n 'Minimum': (1, 6),\n 'Sqrt': (1, 6),\n 'Sum': (1,),\n}\n\n\n@testing.parameterize(\n {'info': 'Neg', 'ops': '-a'},\n {'info': 'Absolute', 'ops': 'abs(a)'},\n {'info': 'Clip', 'ops': 'chainer.functions.clip(a, 0.1, 0.2)'},\n {'info': 'Exp', 'ops': 'chainer.functions.exp(a)'},\n {'info': 'Sqrt', 'ops': 'chainer.functions.sqrt(a)'},\n {'info': 'PowVarConst',\n 'ops': 'chainer.functions.math.basic_math.pow(a, 2)'},\n {'info': 'Sum',\n 'ops': 'chainer.functions.sum(a, axis=1)'},\n {'info': 'Sum',\n 'ops': 'chainer.functions.sum(a, axis=0, keepdims=True)'},\n {'info': 'AddConst', 'ops': 'a + 1'},\n)\nclass TestUnaryMathOperators(unittest.TestCase):\n\n def setUp(self):\n class Model(chainer.Chain):\n\n def __init__(self, ops):\n super(Model, self).__init__()\n self.ops = ops\n\n def __call__(self, a):\n if not isinstance(a, chainer.Variable):\n a = chainer.Varaible(a)\n return eval(self.ops)\n\n self.model = Model(self.ops)\n self.a = chainer.Variable(np.ones((2, 3), dtype=np.float32))\n self.fn = self.info + '.onnx'\n\n def test_compatibility(self):\n if MXNET_OPSET_VERSION[self.info] is not None:\n for opset_version in MXNET_OPSET_VERSION[self.info]:\n test_mxnet.check_compatibility(\n self.model, self.a, self.fn, opset_version=opset_version)\n for opset_version in range(1, onnx.defs.onnx_opset_version() + 1):\n onnx_chainer.export(self.model, self.a)\n\n\n@testing.parameterize(\n {'info': 'Add', 'ops': 'a + b'},\n {'info': 'Sub', 'ops': 'a - b'},\n {'info': 'Mul', 'ops': 'a * b'},\n {'info': 'Div', 'ops': 'a / b'},\n {'info': 'MatMul', 'ops': 'chainer.functions.matmul(a, b, transb=True)'},\n {'info': 'Maximum', 'ops': 'chainer.functions.maximum(a, b)'},\n {'info': 'Minimum', 'ops': 'chainer.functions.minimum(a, b)'},\n)\nclass TestBinaryMathOperators(unittest.TestCase):\n\n def setUp(self):\n class Model(chainer.Chain):\n\n def __init__(self, ops):\n super(Model, self).__init__()\n self.ops = ops\n\n def __call__(self, a, b):\n if not isinstance(a, chainer.Variable):\n a = chainer.Varaible(a)\n if not isinstance(b, chainer.Variable):\n b = chainer.Varaible(b)\n return eval(self.ops)\n\n self.model = Model(self.ops)\n a = chainer.Variable(np.ones((2, 3), dtype=np.float32))\n b = chainer.Variable(np.ones((2, 3), dtype=np.float32) * 2)\n self.x = (a, b)\n self.fn = self.info + '.onnx'\n\n def test_compatibility(self):\n if MXNET_OPSET_VERSION[self.info] is not None:\n for opset_version in MXNET_OPSET_VERSION[self.info]:\n test_mxnet.check_compatibility(\n self.model, self.x, self.fn, opset_version=opset_version)\n for opset_version in range(1, onnx.defs.onnx_opset_version() + 1):\n onnx_chainer.export(self.model, self.x)\n","sub_path":"tests/functions_tests/test_maths.py","file_name":"test_maths.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609240044","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns=[\n url('^$', views.home, name='home'),\n url(r'^new/image$', views.new_image, name='new-image'),\n url(r'^profile/(\\d+)',views.profile, name='profile'),\n url(r'^profile',views.own_profile, name='myprofile'),\n url(r'^edit_profile/(?P\\w{0,50})',views.edit_profile,name='edit_profile'),\n url(r'^like/(\\d+)$',views.like,name='like'),\n url(r'^image/(?P\\d+)', views.single_image, name='single_image'),\n url(r'^search/', views.search, name='search')\n]\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)","sub_path":"insta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629554042","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom blog.models import Post, Categories\nfrom django.template import Context,loader\nfrom django.views.decorators.csrf import csrf_exempt\n\n@csrf_exempt\ndef add_post(request):\n\t\n\tif len(request.POST['title']) == 0:\n\t\treturn HttpResponse('한글자라도 쓰세요 진짜')\n\telse:\n\t\tentry_title = request.POST['title']\n\t\tentry_category = request.POST['taskOption']\n\n\treturn HttpResponse(entry_title)\n\ndef index(request,page = 1):\n\tpage = int(page)\n\ttitle = '구영서는 천재입니다'\n\tpost = Post.objects.all().order_by('-created')[0:5]\n\ttpl = loader.get_template('blog/list.html')\n\tctx = Context({\n\t\t'page_title':title,\n\t\t'post':post,\n\t\t'post_id':page,\n\t}) # ''로 묶인것인 html에서 쓸수있는 다음 변수의 치환자\n\treturn HttpResponse(tpl.render(ctx))\n\t#return HttpResponse(\"안녕하세요 진짜 구영서입니다. 반갑습니다 여러분들 [%s]\" % post[0].title) # Title.encode('utf-8')\ndef read(request,read = None):\n\tif(int(read) <= 0):\n\t\tread = 1\n\tread = int(read)\n\ttitle = '글 읽는 곳'\n\n\n\tpostread = Post.objects.get(id = int(read))\n\n\ttry:\n\t\tnext_post = postread.get_next_by_created()\n\texcept:\n\t\tnext_post = None\n\ttry:\n\t\tprevious_post = postread.get_previous_by_created()\n\texcept:\n\t\tprevious_post = None\n\n\t#return HttpResponse('여기는 %s 이고 %d 글이다.' % (postread.title , postread.id))\n\ttpl = loader.get_template('blog/read.html')\n\tctx = Context({\n\t\t'post_title':title,\n\t\t'post': postread,\n\t\t'post_next':next_post,\n\t\t'post_previous':previous_post,\n\t}) # ''로 묶인것인 html에서 쓸수있는 다음 변수의 치환자\n\n\treturn HttpResponse(tpl.render(ctx))\n\ndef post_list(request):\n\treturn render(request, 'blog/post_list.html', {})\ndef write(request):\n\tpage_title = '글쓰는 곳입니다.'\n\ttpl = loader.get_template('blog/write.html')\n\tcategories = Categories.objects.all()\n\tctx = Context({\n\t\t'page_title' : page_title,\n\t\t'categories' : categories,\n\t})\n\treturn HttpResponse(tpl.render(ctx))","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54243353","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 <+YOU OR YOUR COMPANY+>.\n#\n# This is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3, or (at your option)\n# any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software; see the file COPYING. If not, write to\n# the Free Software Foundation, Inc., 51 Franklin Street,\n# Boston, MA 02110-1301, USA.\n#\n\nimport numpy\nfrom gnuradio import gr\nimport pmt\nimport socket\n\nclass J2497_decoder(gr.sync_block):\n \"\"\"\n docstring for block J2497_decoder\n \"\"\"\n #def __init__(self, if_peak_threshold, if_peak_offset):\n def __init__(self, if_peak_threshold, if_peak_offset,do_udp,udp_port):\n gr.sync_block.__init__(self,\n name=\"J2497_decoder\",\n in_sig=[numpy.float32],\n out_sig=[])\n\n self.message_port_register_out(pmt.intern(\"out\"))\n self.start_tag = 0\n self.end_tag = 0\n self.if_data = numpy.array([], dtype=numpy.float32)\n self.do_analysis = False\n self.message_number = 0\n self.prev_time = 0\n self.if_peak_threshold = if_peak_threshold\n self.if_peak_offset = if_peak_offset\n self.do_udp = do_udp\n self.udp_port = udp_port\n \n # Create UDP Socket\n if self.do_udp:\n self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\n def work(self, input_items, output_items):\n in0 = input_items[0]\n in0_len = len(in0)\n window_start = self.nitems_read(0)\n\n # Locate Tags\n tags = self.get_tags_in_window(0, 0, in0_len, pmt.string_to_symbol(\"burst\"))\n\n # Tag Exists\n for tag in tags: #.offset, .key, .value\n \n # Record on Start\n if str(tag.value) == \"#t\":\n self.start_tag = tag.offset\n self.if_data = numpy.append(self.if_data, in0[self.start_tag-window_start:])\n\n # Stop Recording on Stop\n if str(tag.value) == \"#f\":\n self.end_tag = tag.offset\n burst_size = self.end_tag - self.start_tag\n\n # Perfect Size\n if burst_size > 4000 and burst_size < 50000: # 1 Sync + 1 MID + 20? Characters + 1 Checksum + Gap = 23 characters * 10 bits * 100 us = 23000 + Gap\n self.do_analysis = True\n\n # Multiple Windows\n if len(self.if_data) > 0:\n self.if_data = numpy.append(self.if_data, in0[:self.end_tag-window_start])\n \n # One Window\n else:\n self.if_data = in0[self.start_tag-window_start:self.end_tag-window_start]\n\n # Ignore and Reset\n else:\n self.start_tag = 0\n self.end_tag = 0\n self.if_data = numpy.array([], dtype=numpy.float32)\n\n # Whole Window with no Stop Tag\n if len(tags) == 0 and len(self.if_data) < 50000 and len(self.if_data) > 0:\n self.if_data = numpy.append(self.if_data, in0)\n\n # Do Analysis on all the Data\n if self.do_analysis is True:\n\n # Obtain Bitstream\n get_bits = self.getBitstream()\n\n # Parse Bits\n if len(get_bits) > 8:\n get_message, get_message_hex = self.getFields(get_bits)\n\n # Print to Output Port\n self.message_port_pub(pmt.intern(\"out\"), pmt.to_pmt(get_message))\n\n # Send Message to UDP Port\n if self.do_udp and len(get_message_hex) > 0:\n self.sendUDP(get_message_hex)\n\n # Reset\n self.start_tag = 0\n self.end_tag = 0\n self.if_data = numpy.array([], dtype=numpy.float32)\n self.do_analysis = False\n\n return in0_len\n\n\n def getBitstream(self):\n \"\"\" Prints out the bitstream from the instantaneous frequency.\n \"\"\"\n initial_offset = self.if_peak_offset #106#109 # Message start to first peak\n interval = 100 # 100 us between bits\n reference_sample = -8 # Bottom of the Peak\n\n # Calculate Expected Number of Bits\n num_bits = len(self.if_data)/interval\n\n # Parse the IF Data\n bitstream = \"1\"\n for n in range(0,num_bits-1):\n\n # Phase Change\n if self.if_data[initial_offset+n*interval] - self.if_data[initial_offset+n*interval+reference_sample] > self.if_peak_threshold:\n if bitstream[-1] is \"1\":\n bitstream = bitstream + \"0\"\n else:\n bitstream = bitstream + \"1\"\n\n # No Phase Change\n else:\n bitstream = bitstream + bitstream[-1]\n\n return bitstream\n\n\n def getFields(self, bits):\n \"\"\" Prints out the content of the message fields from the bitstream.\n \"\"\"\n # Update Count\n self.message_number = self.message_number + 1\n\n # Get Time\n start_time = self.start_tag / 1e6 # 1e6 = Sampling Rate\n delta_time = start_time - self.prev_time\n\n # Find the Fields from Start/Stop Bits\n start_bit = False\n bit_counter = 0\n data_bytes = \"\"\n\n for n in range(0, len(bits)):\n\n # Start Bit Found\n if start_bit is True:\n data_bytes = data_bytes + bits[n]\n bit_counter = bit_counter + 1\n\n # Reached End of Byte\n if bit_counter == 8:\n start_bit = False\n bit_counter = 0\n\n # Detect New Start Bit\n else: \n if bits[n] is \"0\" and start_bit is False:\n start_bit = True\n\n # Get Fields from Data Bits\n if len(data_bytes) >= 24:\n mid = data_bytes[0:8]\n data = data_bytes[8:-8]\n checksum = data_bytes[-8:]\n\n # Construct the Output Message\n message = \"\"\n message = message + \"MESSAGE NUMBER: \" + str(self.message_number) + \"\\t\\t\"\n message = message + \"TIME: \" + str(start_time) + ' s' + \"\\t\\t\"\n message = message + \"DELTA: \" + str(delta_time) + ' s' + \"\\n\"\n message = message + \"MID: \" + '0x%0*X' % (2,int(mid[::-1],2)) + \"\\t\\t\"\n message_hex = '%0*X' % (2,int(mid[::-1],2))\n\n # Valid Bitstream\n if len(data) % 8 == 0:\n\n # Order Bytes Correctly from Reversed Bitstream\n wrong_hex_order = ('%0*X' % (2,int(data[::-1],2))).zfill(len(data)/4)\n correct_hex_order = \"\"\n for m in range(0,len(wrong_hex_order),2):\n correct_hex_order = wrong_hex_order[m:m+2] + correct_hex_order\n message = message + \"DATA: \" + '0x' + correct_hex_order + \"\\t\\t\"\n message_hex = message_hex + correct_hex_order\n\n # Invalid Bitstream\n else:\n message = message + \"DATA: BITS MISSING\\t\\t\"\n\n message = message + \"CHECKSUM: \" + '0x%0*X' % (2,int(checksum[::-1],2))\n message_hex = message_hex + '%0*X' % (2,int(checksum[::-1],2))\n\n # Not Enough Bits\n else:\n \n # Construct the Output Message\n message = \"\"\n message = message + \"MESSAGE NUMBER: \" + str(self.message_number) + \"\\t\\t\"\n message = message + \"TIME: \" + str(start_time) + ' s' + \"\\t\\t\"\n message = message + \"DELTA: \" + str(delta_time) + ' s' + \"\\t\\t\"\n message = message + \"MID: NOT FOUND\\t\\t\"\n message = message + \"DATA: NOT FOUND\\t\\t\"\n message = message + \"CHECKSUM: NOT FOUND\"\n message_hex = \"\"\n\n # Store Time\n self.prev_time = start_time\n\n return message, message_hex\n\n\n def setIF_PeakThreshold(self,if_peak_threshold):\n self.if_peak_threshold = if_peak_threshold\n\n\n def setIF_PeakOffset(self,if_peak_offset):\n self.if_peak_offset = if_peak_offset\n\n\n def sendUDP(self, message_hex):\n \"\"\" Converts a message to bytes and sends it to a specified UDP port.\n \"\"\"\n # Convert Message\n udp_message = message_hex.decode('hex')\n \n # Send Message\n self.udp_socket.sendto(udp_message,(\"127.0.0.1\", self.udp_port))\n \n","sub_path":"python/J2497_decoder.py","file_name":"J2497_decoder.py","file_ext":"py","file_size_in_byte":8778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113749785","text":"n, k, m = map(int, input().split())\nlanguage_array = input().split()\ncost_array = list(map(int, input().split()))\n\nlanguage_dic = {}\nfor i in range(0, n):\n language_dic[language_array[i]] = i+1\n\n# print(language_dic)\n\ndef get_min(ids, costs):\n tmp_min = int(1e9)\n for tmp_id in ids:\n tmp_min = min(tmp_min, costs[tmp_id-1])\n return tmp_min\n\ncost_dic = {}\nfor i in range(0, k):\n group = list(map(int, input().split()))\n x = group[0]\n del group[:1]\n tmp_min = get_min(group, cost_array)\n for j in range(0, x):\n cost_dic[group[j]] = tmp_min\n\n# print(cost_dic)\n\nans = 0\nfor tmp_str in input().split():\n # print(\"tmp_str==\" + tmp_str + \", id==\" + str(language_dic[tmp_str]))\n tmp_id = language_dic[tmp_str]\n ans += cost_dic[tmp_id]\n\nprint(ans)\n","sub_path":"959B.py","file_name":"959B.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238403740","text":"#pylint: disable=no-init,non-parent-init-called,too-few-public-methods\n# non-parent-init-called is disabled to remove false positives from a bug in pyLint < 1.4\n\nfrom abc import ABCMeta, abstractmethod\nimport stresstesting\nimport mantid.simpleapi as ms\nfrom mantid import mtd\n\n\nclass ISISIndirectDiffractionReduction(stresstesting.MantidStressTest):\n \"\"\"\n Base class for tests that use the ISISIndirectDiffractionReduction algorithm.\n \"\"\"\n\n __metaclass__ = ABCMeta\n _output_workspace = None\n\n @abstractmethod\n def get_reference_file(self):\n \"\"\"\n Gets reference result file for workspace comparison.\n \"\"\"\n raise NotImplementedError()\n\n def runTest(self):\n \"\"\"\n Runs an ISISIndirectDiffractionReduction with the configured parameters.\n \"\"\"\n ms.ISISIndirectDiffractionReduction(InputFiles=self.raw_file,\n OutputWorkspace=self.output_workspace_group,\n Instrument=self.instrument,\n Mode=self.mode,\n SpectraRange=self.spectra_range,\n RebinParam=self.rebinning)\n\n self._output_workspace = mtd[self.output_workspace_group].getNames()[0]\n\n def validate(self):\n \"\"\"\n Validates the result workspace with the reference file.\n \"\"\"\n self.disableChecking.append('Instrument')\n return self._output_workspace, self.get_reference_file()\n\n#-------------------------------------------------------------------------------\n\nclass IRISDiffspecDiffractionTest(ISISIndirectDiffractionReduction):\n\n def __init__(self):\n ISISIndirectDiffractionReduction.__init__(self)\n\n self.instrument = 'IRIS'\n self.mode = 'diffspec'\n self.raw_file = 'IRS21360.raw'\n self.spectra_range = [105, 112]\n self.rebinning = '3.0,0.001,4.0'\n self.output_workspace_group = 'IRIS_Diffraction_DiffSpec_Test'\n\n def get_reference_file(self):\n return 'IRISDiffspecDiffractionTest.nxs'\n\n#-------------------------------------------------------------------------------\n\nclass TOSCADiffractionTest(ISISIndirectDiffractionReduction):\n\n def __init__(self):\n ISISIndirectDiffractionReduction.__init__(self)\n\n self.instrument = 'TOSCA'\n self.mode = 'diffspec'\n self.raw_file = 'TSC11453.raw'\n self.spectra_range = [146, 149]\n self.rebinning = '0.5,0.001,2.1'\n self.output_workspace_group = 'TOSCA_Diffraction_DiffSpec_Test'\n\n def get_reference_file(self):\n return 'TOSCADiffractionTest.nxs'\n\n#-------------------------------------------------------------------------------\n\nclass OSIRISDiffspecDiffractionTest(ISISIndirectDiffractionReduction):\n\n def __init__(self):\n ISISIndirectDiffractionReduction.__init__(self)\n\n self.instrument = 'OSIRIS'\n self.mode = 'diffspec'\n self.raw_file = 'osiris00101300.raw'\n self.spectra_range = [3, 962]\n self.rebinning = '2.0,0.001,3.0'\n self.output_workspace_group = 'OSIRIS_Diffraction_DiffSpec_Test'\n\n def get_reference_file(self):\n return 'OsirisDiffspecDiffractionTest.nxs'\n\n#-------------------------------------------------------------------------------\n\nclass OSIRISDiffonlyDiffractionTest(stresstesting.MantidStressTest):\n\n def runTest(self):\n ms.OSIRISDiffractionReduction(OutputWorkspace=\"OsirisDiffractionTest\",\n Sample=\"OSI89813.raw, OSI89814.raw, OSI89815.raw, OSI89816.raw, OSI89817.raw\",\n CalFile=\"osiris_041_RES10.cal\",\n Vanadium=\"OSI89757, OSI89758, OSI89759, OSI89760, OSI89761\")\n\n def validate(self):\n self.disableChecking.append('Instrument')\n return 'OsirisDiffractionTest', 'OsirisDiffractionTest.nxs'\n","sub_path":"Testing/SystemTests/tests/analysis/IndirectDiffractionTests.py","file_name":"IndirectDiffractionTests.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502971522","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n # 首页登陆\n url(r'^$', views.index),\n url(r'^login$', views.index),\n\n # 首页的验证码更换\n url(r'^changeVcode$', views.changVcode),\n\n # 验证登陆帐号是否存在\n url(r'^vaccount$', views.vaccount),\n\n # 验证登陆\n url(r'^login.do$', views.logindo),\n\n #注册页面\n url(r'^regist/$', views.regist),\n\n #接受注册\n url(r'^regist.do$', views.registdo),\n\n #商城主页\n url(r'^homepage$', views.homepage),\n\n #退出登录\n url(r'^exit$', views.exit),\n\n #添加到购物车\n url(r'^addtocart$', views.addtocart),\n\n #查看购物车\n url(r'^cart$', views.cart),\n\n #对购物车中的商品数量加一\n url(r'^addnum$', views.addone),\n\n #对购物车中的商品数量减一\n url(r'^reducenum$',views.reduceone),\n\n #删除购物车一件商品\n url(r'^deletecart$', views.deletecart),\n\n #添加订单\n url(r'^addtoOrder', views.addtoOrder),\n\n #查看订单\n url(r'^order$', views.order),\n\n #搜索功能\n url(r'^search$', views.search),\n\n #找回密码\n url(r'^retrieve$', views.retrieve),\n\n #验证邮箱是否存在\n url(r'^vemail$', views.vemail),\n\n #发送验证码到邮箱\n url(r'^sendcode$', views.sendcode),\n\n #修改密码\n url(r'^motifypassword$', views.modifypassword)\n]","sub_path":"bookstore/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646048898","text":"from sklearn.base import BaseEstimator\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.utils.extmath import safe_sparse_dot\nfrom sklearn.utils.validation import check_array, _check_sample_weight, \\\n FLOAT_DTYPES\n\nfrom ya_glm.loss.LossConfig import get_loss_config\nfrom ya_glm.autoassign import autoassign\nfrom ya_glm.processing import process_X, deprocess_fit\nfrom ya_glm.solver.default import get_default_solver\n\n\nclass Glm(BaseEstimator):\n \"\"\"\n Base class for GLMs.\n\n Parameters\n ----------\n loss: str, ya_glm.LossConfig.LossConfig\n The loss function. If a string is provided the loss function parameters are set to their default values. Otherwise the loss function parameters can be specified by providing a LossConfig object. See ya_glm.LossConfig for available loss functions.\n\n fit_intercept: bool\n Whether or not to fit intercept, which is not penalized.\n\n standardize: bool\n Whether or not to perform internal standardization before fitting the data. Standardization means mean centering and scaling each column by its standard deviation. For the group lasso penalty an additional scaling is applied that scales each variable by 1 / sqrt(group size). Putting each variable on the same scale makes sense for fitting penalized models. Note the fitted coefficient/intercept is transformed to be on the original scale of the input data.\n\n solver: str, ya_glm.GlmSolver\n The solver used to solve the penalized GLM optimization problem. If this is set to 'default' we try to guess the best solver. Otherwise a custom solver can be provided by specifying a GlmSolver object.\n\n Attributes\n ----------\n coef_: array-like, shape (n_features, ) or (n_features, n_responses)\n The fitted coefficient vector or matrix (for multiple responses).\n\n intercept_: None, float or array-like, shape (n_features, )\n The fitted intercept.\n\n classes_: array-like, shape (n_classes, )\n A list of class labels known to the classifier.\n\n opt_data_: dict\n Data output by the optimization algorithm.\n \"\"\"\n @autoassign\n def __init__(self, loss='lin_reg',\n fit_intercept=True, standardize=False,\n solver='default'):\n pass\n\n def _get_loss_config(self):\n \"\"\"\n Returns the loss function config.\n\n Output\n ------\n loss: ya_glm.LossConfig.LossConfig\n The loss function config object.\n \"\"\"\n return get_loss_config(loss=self.loss)\n\n @property\n def _estimator_type(self):\n \"\"\"\n Type of the estimator.\n\n Output\n ------\n _estimator_type: str\n Either 'regressor' or 'classifier'.\n \"\"\"\n return self._get_loss_config()._estimator_type\n\n def _get_solver(self):\n \"\"\"\n Returns the solver config.\n\n Output\n ------\n solver: ya_glm.GlmSolver\n The solver config object.\n \"\"\"\n\n if type(self.solver) == str and self.solver == 'default':\n # try to guess the best solver for our purposes\n # e.g. FISTA does not work for quantile regression loss\n return get_default_solver(loss=self._get_loss_config(),\n penalty=self._get_penalty_config())\n\n else:\n return self.solver\n\n def fit(self, X, y, sample_weight=None):\n \"\"\"\n Fits the penalized GLM.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The training covariate data.\n\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The training response data.\n\n sample_weight: None or array-like, shape (n_samples,)\n Individual weights for each sample.\n\n Output\n ------\n self\n Fitted estimator.\n \"\"\"\n\n # basic formattin check\n X, y, sample_weight = self._validate_data(X=X, y=y,\n sample_weight=sample_weight)\n\n # run prefitting procedures including preprocessing the X, y data\n X_pro, y_pro, pre_pro_out, penalty_data =\\\n self.prefit(X=X, y=y, sample_weight=sample_weight)\n\n # get the loss, penalty and solver config\n loss = self._get_loss_config()\n penalty = self._get_penalty_config()\n solver = self._get_solver()\n\n # possibly add information to the penalty\n # e.g. the initial coefficient for concave penalities\n if penalty_data is not None and len(penalty_data) > 0:\n penalty.set_data(penalty_data)\n\n # solve the optimzation problem!!!\n coef, intercept, out_data = \\\n solver.solve(X=X_pro, y=y_pro,\n loss=loss,\n penalty=penalty,\n fit_intercept=self.fit_intercept,\n sample_weight=sample_weight)\n\n # set the fit coefficient e.g. undo preprocessing scaling\n self._set_fit(fit_out={'coef': coef,\n 'intercept': intercept,\n 'opt_data': out_data},\n pre_pro_out=pre_pro_out)\n\n return self\n\n def prefit(self, X, y, sample_weight=None):\n \"\"\"\n Preprocesses data and possibly performs other prefitting routines e.g. fitting an initial estimator.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The covariate data.\n\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The response data.\n\n sample_weight: None or array-like, shape (n_samples,)\n Individual weights for each sample.\n\n Output\n ------\n X_pro: array-like, shape (n_samples, n_features)\n The processed covariate data.\n\n y_pro: array-like, shape (n_samples, )\n The processed response data.\n\n pro_pro_out: dict\n Data from preprocessing e.g. X_center, X_scale.\n\n penalty_data: None, dict\n Additional data that the penalty needs to know about.\n \"\"\"\n # preproceess X, y\n X_pro, y_pro, pre_pro_out = \\\n self.preprocess(X=X, y=y, sample_weight=sample_weight, copy=True)\n\n # by default we dont do any thing here\n penalty_data = None\n\n return X_pro, y_pro, pre_pro_out, penalty_data\n\n def _validate_data(self, X, y, sample_weight=None, accept_sparse=True):\n \"\"\"\n Validates the X/y data. This should not change the raw input data, but may reformat the data (e.g. convert pandas to numpy).\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The covariate data.\n\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The response data.\n \"\"\"\n\n X = check_array(X, accept_sparse=accept_sparse,\n dtype=FLOAT_DTYPES)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X,\n dtype=X.dtype)\n\n # make sure y is numpy and of same dtype as X\n # TODO: do we actually want this for log_reg/multinomial?\n y = check_array(y, ensure_2d=False)\n\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.reshape(-1)\n\n # make sure X, y have same number of samples\n if y.shape[0] != X.shape[0]:\n raise ValueError(\"X and y must have the same number of rows!\")\n\n return X, y, sample_weight\n\n def preprocess(self, X, y, sample_weight=None, copy=True, check_input=True):\n \"\"\"\n Preprocesses the data for fitting. This method may transform the data e.g. centering and scaling X. If sample weights are provided then these are used for computing weighted means / standard deviations for standardization. For the group lasso penalty an additional scaling is applied that scales each variable by 1 / sqrt(group size).\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The covariate data.\n\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The response data.\n\n sample_weight: None or array-like, shape (n_samples,)\n Individual weights for each sample.\n\n copy: bool\n Whether or not to copy the X/y arrays or modify them in place.\n\n Output\n ------\n X_pro, y_pro, pre_pro_out\n\n X_pro: array-like, shape (n_samples, n_features)\n The possibly transformed covariate data.\n\n y_pro: array-like, shape (n_samples, )\n The possibly transformed response data.\n\n pro_pro_out: dict\n Data from preprocessing e.g. X_center, X_scale.\n \"\"\"\n groups = self.groups if hasattr(self, 'groups') else None\n\n X, out = process_X(X,\n standardize=self.standardize,\n groups=groups,\n sample_weight=sample_weight,\n copy=copy,\n check_input=check_input,\n accept_sparse=True,\n allow_const_cols=not self.fit_intercept)\n\n # subclass should implement this\n y, y_out = self._process_y(X=X, y=y,\n sample_weight=sample_weight,\n copy=copy)\n out.update(y_out)\n\n return X, y, out\n\n def _set_fit(self, fit_out, pre_pro_out):\n \"\"\"\n Sets the fit from the ouptut of the optimization algorithm.\n For example, this undoes any centering and scaling we have performed on the data so the fitted coefficient matches the raw input data.\n\n Parameters\n ----------\n fit_out: dict\n Contains the output of solve e.g.\n fit_out['coef'], fit_out['intercept'], fit_out['opt_data']\n\n pre_pro_out: None, dict\n Output of preprocess\n \"\"\"\n coef = fit_out['coef']\n intercept = fit_out.pop('intercept', None)\n\n self.coef_, self.intercept_ = \\\n deprocess_fit(coef=coef,\n intercept=intercept,\n pre_pro_out=pre_pro_out,\n fit_intercept=self.fit_intercept)\n\n if 'opt_data' in fit_out:\n self.opt_data_ = fit_out['opt_data']\n\n # for classification models\n if 'classes' in pre_pro_out:\n self.classes_ = pre_pro_out['classes']\n\n def decision_function(self, X):\n \"\"\"\n The GLM decision function i.e. z = X.T @ coef + interept\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The covariate data.\n\n Output\n ------\n z: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The decision function values.\n \"\"\"\n check_is_fitted(self)\n X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])\n\n # TODO: for multi-response our coef_ is the transpose of sklearn's\n # convention. I think our choice of (n_features, n_responses)\n # Do we want to be stick with this choice?\n z = safe_sparse_dot(X, self.coef_, # .T\n dense_output=True)\n\n if hasattr(self, 'intercept_') and self.intercept_ is not None:\n z += self.intercept_\n\n return z\n\n def _more_tags(self):\n return {'requires_y': True}\n\n ################################\n # sub-classes should implement #\n ################################\n\n # this is set by the LossMixin\n def _process_y(self, X, y, sample_weight=None, copy=True):\n \"\"\"\n Processing for the y data e.g. transform class labels to indicator variables for multinomial.\n\n Parameters\n ---------\n y: array-like, shape (n_samples, ) or (n_samples, n_responses)\n The response data.\n\n sample_weight: None or array-like, shape (n_samples,)\n Individual weights for each sample\n\n copy: bool\n Whether or not to copy the X/y arrays or modify them in place.\n\n Output\n ------\n y: array-like\n The possibly transformed response data.\n \"\"\"\n # subclass should overwrite\n raise NotImplementedError\n\n def _get_penalty_config(self):\n \"\"\"\n Gets the penalty config.\n\n Output\n ------\n penalty: ya_glm.PenaltyConfig.PenaltyConfig\n A penalty config object.\n \"\"\"\n # subclass should implement!\n raise NotImplementedError\n\n def get_pen_val_max(self, X, y, sample_weight=None):\n \"\"\"\n Returns the largest reasonable penalty parameter for a given dataset.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The training covariate data.\n\n y: array-like, shape (n_samples, )\n The training response data.\n\n sample_weight: None or array-like, shape (n_samples,)\n Individual weights for each sample.\n\n Output\n ------\n pen_val_max: float\n Largest reasonable tuning parameter value.\n \"\"\"\n # subclasses should implement!\n raise NotImplementedError\n","sub_path":"ya_glm/base/Glm.py","file_name":"Glm.py","file_ext":"py","file_size_in_byte":13421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334437629","text":"import os\nimport shutil\npicpath = './Kenya_Refugee_Camps/250/'\nxmlpath = './xml_file/250/20180513/'\nannopath = './250_dataset/20180513/'\ntifpath = './250_dataset/20180513/'\nfor f in os.listdir(xmlpath):\n if not f.startswith('.'):\n if 'none' in f:\n continue\n tifname,_ = f.split('.')\n tifname += '.tif'\n num,rest = f.split('_')\n sub,typ = rest.split('.')\n newnum = int(num)*16+int(sub)\n zerolen = 6 - len(str(newnum))\n padded = ['0' for i in range(zerolen)]\n padded = ''.join(padded)\n newnum = padded+str(newnum)\n final = newnum+'.'+typ\n finaltif = newnum + '.' + 'tif'\n # print (f + ' is changed to ' + final)\n fulpath = picpath + tifname\n # print ('tif name: ' + tifname + ' exist: ' +\\\n # str(os.path.isfile(fulpath)))\n shutil.copy2(xmlpath+f, annopath+final)\n shutil.copy2(fulpath, tifpath+finaltif)\n\n\n","sub_path":"dataset/preprocessing_script/matchpic.py","file_name":"matchpic.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213948459","text":"#_*_ coding:utf-8 _*_\n\nimport scrapy\nfrom scrapy.http import Request\nfrom qsbk.items import QsbkItem\n\nclass QsbkSpider(scrapy.Spider):\n name = \"qsbk\"\n allowed_domains = [\"qiushibaike.com\"]\n start_urls = [\n\"http://www.qiushibaike.com/hot\"\n ]\n url_header = \"http://www.qiushibaike.com\"\n page = 1\n\n author_db = []\n content_db = []\n support_db = []\n comment_db = []\n \n#伪造头部\n# headers = {\n# \"User-Agent\":\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0\"\n# }\n \n# url_head = \"http://www.qiushibaike.com/hot/page/\"\n# url_tail = \"/?s=4875630\"\n\n#通过伪造的头部获取Request和response\n#也可以将头部定义在setting.py\n# def start_requests(self):\n# for url in self.start_urls:\n# yield self.make_requests_from_url(url)\n# def make_requests_from_url(self,url):\n# return(Request(url,headers = self.headers,dont_filter = True))\n\n#通过response获取爬取的信息\n def parse(self,response):\n# filename = \"test\"\n# with open(filename,\"w\") as f:\n# f.write(response.body)\n item = QsbkItem()\n selector = scrapy.selector.Selector(response)\n infos = selector.xpath('//*[@id=\"content-left\"]/div[@class=\"article block untagged mb15\"]')\n\n for each_info in infos:\n #获取作者\n author = each_info.xpath('div[@class=\"author clearfix\"]/a[2]/@title').extract()\n # print(\"***\" * 20)\n self.author_db.extend(author)\n # print(author[0].encode(\"utf-8\"))\n\n #获取内容\n content = each_info.xpath('div[@class=\"content\"]/text()').extract()\n for i in range(len(content)):\n self.content_db.append(content[i].strip()) #去除内容中的空格\n # self.content_db.extend(content)\n # print(content[0].encode(\"utf-8\"))\n\n #获取点赞数\n support = each_info.xpath('div[@class=\"stats\"]/span[1]/i/text()').extract()\n self.support_db.extend(support)\n # print(support[0].encode(\"utf-8\"))\n\n #获取评论数\n comment = each_info.xpath('div[@class=\"stats\"]/span[2]/a/i/text()').extract()\n self.comment_db.extend(comment)\n # print(comment[0].encode(\"utf-8\"))\n\n item[\"author\"] = author\n item[\"content\"] = content\n item[\"support\"] = support\n item[\"comment\"] = comment\n yield item\n\n # with open(\"Author\",\"w\") as f:\n # for each_author in self.author_db:\n # f.write(each_author.encode(\"utf-8\") + \"\\n\")\n # with open(\"Content\",\"w\") as f:\n # for each_content in self.content_db:\n # f.write(each_content.encode(\"utf-8\") + \"\\n\")\n\"\"\"\n next_page = selector.xpath('//*[@id=\"content-left\"]/ul[@class=\"pagination\"]/li[last()]/a/@href').extract()\n next_page = next_page[0].encode(\"utf-8\")\n print(\"**\" * 20)\n if next_page[:4] == \"/hot\" and self.page < 3:\n # print(\"--\" * 20)\n next_url = self.url_header + next_page\n # print(next_url)\n self.page += 1\n yield scrapy.Request(next_url,callback = self.parse)\n else:\n print(\"--\" * 20)\n print(\"This is the last page!\")\n print(\"Total pages = \" + str(self.page))\n print(\"--\" * 20)\n\"\"\"","sub_path":"qsbk/spiders/qsbk_spider.py","file_name":"qsbk_spider.py","file_ext":"py","file_size_in_byte":3474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"364995790","text":"import numpy\nimport requests\nimport json\nimport time\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.metrics.pairwise import cosine_distances\nfrom gateway import ontology_as_classes\n\n# based on instance matching in the IoT server website\n# calling the ontology and specifying variables\n# euclidean distance the lower the value, the similarity is higher. 0 the values are equal. there is no maximum\n# cosine distance lower value, higher similarity. 0 the values are equal. 1 is the maximum.\n# synset similarity ranges from 0 to 1. 1 is equal 0 is opposite\nenable_semantic = False\nif enable_semantic: from nltk.corpus import wordnet as wn\n\n\ndef syntactic_match(corpus_list_p, method_selection):\n # creating syntactic array results and extracting features from corpus\n syntactic_array_p = numpy.array([])\n vectorizer = CountVectorizer()\n features = vectorizer.fit_transform(corpus_list_p).todense()\n # creating semantic array results\n if enable_semantic: semantic_array_p = numpy.array([])\n syntactic_time = 0\n semantic_time = 0\n for i in range(1, len(features)):\n if method_selection is \"cosine\":\n # Calculating cosine distance between the features requirement and each app. Adding the result to the array\n start_syntactic = time.perf_counter()\n distance = cosine_distances(features[0], features[i])\n syntactic_array_p = numpy.append(syntactic_array_p, 1-distance)\n stop_syntactic = time.perf_counter()\n # calculating semantic similarity of the synonyms of the requirement and the description if each app\n if enable_semantic:\n semantic_sim = semantic_matching(corpus_list_p[0], corpus_list_p[i])\n semantic_array_p = numpy.append(semantic_array_p, 1-semantic_sim)\n stop_semantic = time.perf_counter()\n syntactic_time += (stop_syntactic - start_syntactic)\n semantic_time += (stop_semantic - stop_syntactic)\n else:\n # idem but using euclidean distance\n distance = euclidean_distances(features[0], features[i])\n syntactic_array_p = numpy.append(syntactic_array_p, distance)\n if enable_semantic:\n semantic_sim = semantic_matching(corpus_list_p[0], corpus_list_p[i])\n semantic_array_p = numpy.append(semantic_array_p, semantic_sim)\n if enable_semantic:\n return syntactic_array_p, semantic_array_p, syntactic_time, semantic_time\n else:\n return syntactic_array_p, syntactic_time, semantic_time\n\n\ndef semantic_matching(word1, word2):\n # getting synonyms set from the first word of the corpus (requirement)\n synonyms_1 = wn.synsets(word1)\n # getting synonyms set from the app\n synonyms_2 = wn.synsets(word2)\n # if there are no synonyms for the word, words are opposite\n if len(synonyms_1) == 0 or len(synonyms_2) == 0:\n max_sim = 1\n return max_sim\n else:\n # it compares all the synsets and find the highest similarity\n max_sim = -1\n best_pair = None, None\n for synonym in synonyms_1:\n for synonym_2 in synonyms_2:\n sim = synonym.path_similarity(synonym_2)\n if sim is None:\n continue\n if sim > max_sim:\n max_sim = sim\n best_pair = synonym, synonym_2\n max_sim = 1 - max_sim\n return max_sim\n\n\ndef subscribe(match_app, previus_match, requirement1):\n # specifying data for post to subscribe serveces and apps\n header = {\"X-M2M-Origin\": \"admin:admin\", \"Accept\": \"application/json\", 'content-type': 'application/json;ty=23'}\n url = match_app[0][\"app\"]+\"/DATA\"\n data = {\n \"m2m:sub\": {\n \"xmlns:m2m\": \"http://www.onem2m.org/xml/protocols\",\n \"nu\": requirement1,\n \"nct\": \"2\"\n }\n }\n # deleting previous subscriptions if exists\n for match in previus_match:\n if \"subscription\" in match:\n requests.delete(match[\"subscription\"], headers=header)\n print(\"borrando subs\", match[\"subscription\"])\n # performing subscription with the best match, which is position 0 of the list because is ordered\n subscription = requests.post(url, data=json.dumps(data), headers=header)\n subs_url = url+\"/\"+json.loads(subscription.content)[\"m2m:sub\"][\"rn\"]\n match_app[0][\"subscription\"] = subs_url\n return match_app\n\n\ndef instance_matching(requirement, method_selection, new_app, services):\n # creating dictionary for corpus and a list of comparing service and apps\n start_matching_time = time.perf_counter()\n match_list = []\n corpus = {}\n match_list.append(requirement)\n # creating a corpus for each location, category and unit functional properties, the first entry on the lists are\n # are requirements values\n for key in requirement.keys():\n if key in [\"Location\", \"Category\", \"Unit\"]:\n corpus[key] = [requirement[key]]\n # getting list with all the available services\n # services = ontology_as_classes.get_providers(new_app) if new_app else ontology_as_classes.get_providers()\n services = ontology_as_classes.get_providers(new_app) if new_app else services\n # adding the value of the location, category and unit properties of the app to the corpus\n for service in services:\n app = dict()\n try:\n if not service.hasLocation() and not service.madeObservation():\n continue\n app[\"Location\"] = service.hasLocation() if service.hasLocation() else \"\"\n if service.madeObservation():\n app[\"Category\"] = service.madeObservation().observedProperty() if service.madeObservation().observedProperty() else \"\"\n app[\"Unit\"] = service.madeObservation().hasUnit() if service.madeObservation().hasUnit() else \"\"\n app[\"Base URL\"] = service.url\n app[\"Ontology\"] = service\n for key in corpus.keys():\n try:\n corpus[key].append(app[key])\n except Exception as e:\n print(str(e))\n continue\n match_list.append(app)\n except Exception as e:\n print(str(e))\n continue\n # defining threshold for the matching algorithm\n threshold = len(corpus.keys()) * 2 * requirement[\"threshold\"] if enable_semantic is True else len(corpus.keys()) * requirement[\"threshold\"]\n print(\"threshold\", threshold)\n print(\"max value\", len(corpus.keys())*2) if enable_semantic is True else print(\"max value\", len(corpus.keys()))\n # creating an array to store matching results\n match_total_array = numpy.zeros(shape=(1, len(match_list)-1))\n\n # getting the corpus for each property(location, category and unit) and calculating systanctic and\n # semantic similarity\n stop_corpus_time = time.perf_counter()\n semantic_time1 = 0\n syntactic_time1 = 0\n for key in corpus.keys():\n corpus_list = corpus[key]\n print()\n # print(\"corpus list\", corpus_list)\n if enable_semantic:\n syntactic_array, semantic_array, syntactic_time, semantic_time = syntactic_match(corpus_list, method_selection)\n else:\n syntactic_array, syntactic_time, semantic_time = syntactic_match(corpus_list, method_selection)\n # print(\"syntactic\", syntactic_array)\n if enable_semantic: print(\"semantic\", semantic_array)\n # adding syntactic and semantic similarity results to the total array\n match_total_array = numpy.sum([match_total_array, syntactic_array], axis=0)\n if enable_semantic: match_total_array = numpy.sum([match_total_array, semantic_array], axis=0)\n semantic_time1 += semantic_time\n syntactic_time1 += syntactic_time\n stop_syntactic_semantic_time = time.perf_counter()\n # print()\n # print(\"total\", match_total_array[0])\n # calculating the indexes of the values higher than the defined threshold if cosine similarity is used\n if method_selection is \"cosine\":\n match_indexes = numpy.argwhere(match_total_array[0] > threshold)\n print(match_indexes.size)\n else:\n # this is not implemented for euclidean distance\n match_indexes = numpy.argwhere(match_total_array[0] == numpy.min(match_total_array[0]))\n # print(\"match indexes\", match_indexes)\n # making the list of matching apps\n founded_ontologies = []\n for index in match_indexes:\n # print(\"match\", match_list[index[0]+1])\n try:\n if new_app is None:\n founded_ontologies.append({match_list[index[0]+1][\"Ontology\"]: match_total_array[0][index[0]]})\n else:\n founded_ontologies.append({match_list[index[0]+1][\"Ontology\"]: match_total_array[index[0]]})\n except Exception as e:\n print(str(e))\n continue\n stop_matching_time = time.perf_counter()\n corpus_time = stop_corpus_time - start_matching_time\n ordering_time = stop_matching_time - stop_syntactic_semantic_time\n return founded_ontologies, corpus_time, semantic_time1, syntactic_time1, ordering_time\n\n\n","sub_path":"GatewayIoT/gateway/functional_matching.py","file_name":"functional_matching.py","file_ext":"py","file_size_in_byte":9235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312312015","text":"'''\r\n250.Count Univalue Subtrees\r\n Given a binary tree, count the number of uni-value subtrees.\r\n\r\n   A Uni-value subtree means all nodes of the subtree have the same value.\r\n\r\nFor example:\r\nGiven binary tree,\r\n\r\n 5\r\n / \\\r\n 1 5\r\n / \\ \\\r\n 5 5 5\r\nreturn 4.\r\n'''\r\nclass Solution:\r\n def maxPathSum(self, root: TreeNode) -> int:\r\n self.ans=0\r\n def helper(root)->bool:\r\n if root==None:\r\n return True\r\n l=helper(root.left)\r\n r=helper(root.right)\r\n if l and r:\r\n if root.left!=None and root.val!=root.left.val:\r\n return False\r\n if root.right!=None and root.val!=root.right.val:\r\n return False\r\n self.ans+=1\r\n return True\r\n helper(root)\r\n return self.ans\r\n","sub_path":"250.Count Univalue Subtrees.py","file_name":"250.Count Univalue Subtrees.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9909434","text":"import os\nimport sys\n\nimport pygame as pg\nfrom constantes import *\n\nsys.path.append(os.getcwd()+\"/motor/\")\nsys.path.append(os.getcwd()+\"/enemigos/\")\n\nclass Bala_base(pg.sprite.Sprite):\n def __init__(self, pos,direccion):\n super().__init__()\n self.image = pg.Surface([20, 20])\n self.image.fill(ROJO)\n self.rect = self.image.get_rect()\n self.rect.x = pos[0]\n self.rect.y = pos[1]\n self.velx = 0\n self.vely = 0\n self.direccion = direccion\n self.tipo = \"bala base\"\n self.daño = 50\n\n def update(self):\n self.rect.x += self.velx\n self.rect.y += self.vely\n","sub_path":"enemigos/clases/balas/bala_base.py","file_name":"bala_base.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34417572","text":"# List alternative names of all menu items here. Hyphens are stripped\n# out before the lists of alternative names are consulted; therefore,\n# if there's an alternative name that might include a hyphen (e.g.,\n# \"coca-cola\"), omit the hyphen here (\"cocacola\").\n\nbroths = {'shio': {'other_names': {'shios', 'shio broth', 'shio broths'}},\n 'shoyu': {'other_names': {'shoyus', 'shoyu broth', 'shoyu broths'}},\n 'miso': {'other_names': {'misos', 'miso broth', 'miso broths'}},\n 'tonkatsu': {'other_names': {'tonkatsus', 'tonkatsu broth',\n 'tonkatsu broths'}},\n 'vegan': {'other_names': {'vegans', 'vegan broth', 'vegan broths'}}\n }\nramen = {'ramen': {'other_names': {'ramens'}},\n 'soup': {'other_names': {'soups'}}\n }\ncont_nouns = {'bowl': {'other_names': {'bowls'}}}\nspiciness = {'mild': {'other_names': {'not spicy', 'mildness spicy',\n 'mild spicy',\n 'medium spiciness'}},\n 'medium': {'other_names': {'somewhat spicy',\n 'medium spicy',\n 'medium spiciness'}},\n 'hot': {'other_names': {'very spicy', 'extremely spicy',\n 'real spicy'}}\n }\nproteins = {'tofu': {'other_names': {'tofus'}},\n 'beef': {'other_names': {'beefs'}},\n 'pork': {'other_names': {'porks'}},\n 'chicken': {'other_names': {'chickens'}},\n 'vegetable': {'other_names': {'vegetables', 'veggie', 'veggies',\n 'veg'}}\n }\ntoppings = {'egg': {'other_names': {'eggs', 'tomagoyaki'}},\n 'fishcake': {'other_names': {'fish cake', 'fishcakes',\n 'fish cakes', 'naruto'}},\n 'mushrooms': {'other_names': {'mushroom', 'mush rooms',\n 'mush room'}},\n 'bean_sprouts': {'other_names': {'bean sprouts', 'sprouts',\n 'bean sprout', 'sprout'}},\n 'kimchi': {'other_names': {}},\n 'bok_choy': {'other_names': {'bok choy', 'bokchoy',\n 'bok choi', 'bokchoi'}},\n 'seaweed': {'other_names': {'sea weed', 'nori'}}\n } \nsauces= {'chili_oil': {'other_names': {'chili oil', 'chili',\n 'chili sauce', 'hot sauce',\n 'chili oils', 'chilis',\n 'chili sauces', 'hot sauces'},\n 'price': .25},\n 'sriracha_sauce': {'other_names': {'sriracha sauce',\n 'sriracha',\n 'sriracha sauces',\n 'srirachas'},\n 'price': .25},\n 'soy_sauce' :{'other_names': {'soy', 'soy sauce', 'soys',\n 'soy sauces'},\n 'price': .25},\n 'gyoza_sauce' : {'other_names': {'gyoza sauce',\n 'gyoza sauces'},\n 'price': .25}\n }\napps = {'gyoza': {'other_names': {},\n 'price': 7},\n 'dumplings': {'other_names': {'dumpling'},\n 'price': 5},\n 'edamame': {'other_names': {},\n 'price': 5},\n 'spring_rolls': {'other_names': {'spring rolls', 'spring roll'},\n 'price': 3},\n 'egg_rolls': {'other_names': {'egg rolls', 'egg roll'},\n 'price': 3},\n 'squid_balls': {'other_names': {'squid balls', 'squid ball',\n 'squidball', 'squid balls',\n 'squidballs', 'takoyaki'},\n 'price': 7}\n }\ndrinks = {'coke': {'other_names': {'cola', 'coca cola', 'cocacola',\n 'coka cola', 'cokacola', 'cokes',\n 'colas', 'coca colas', 'cocacolas',\n 'coka colas', 'cokacolas'},\n 'price': 1.5},\n 'diet_coke': {'other_names': {'diet coke', 'diet cola',\n 'diet coca cola',\n 'diet cocacola',\n 'diet colas', 'diet coca colas',\n 'diet cocacolas'},\n 'price': 1.5},\n 'sprite': {'other_names': {'sprites'},\n 'price': 1.5},\n 'lemonade': {'other_names': {'minute maid', 'minutemaid',\n 'minute maid lemonade',\n 'minutemaid lemonade',\n 'minute maids', 'minutemaids',\n 'minute maid lemonades',\n 'minutemaid lemonades',\n 'lemonades'},\n 'price': 1.5},\n 'sencha_tea': {'other_names': {'sencha tea', 'sencha',\n 'sencha teas', 'senchas'},\n 'price': 2},\n 'jasmine_tea': {'other_names': {'jasmine', 'jasmine tea',\n 'jasmine pearl', 'green tea',\n 'jasmines', 'jasmine teas',\n 'jasmine pearls',\n 'green teas'},\n 'price': 2},\n 'bancha_tea': {'other_names': {'bancha tea', 'bancha',\n 'bancha teas', 'banchas'},\n 'price': 2},\n 'water' : {'other_names': {'waters'},\n 'price': 2}\n } \nsizes = {'half': {'other_names': {'small', 'tiny', 'baby', 'halfs',\n 'halves', 'smalls', 'tinys', 'babys',\n 'babies'},\n 'price': 9},\n 'full': {'other_names': {'whole', 'large', 'big', 'jumbo',\n 'fulls', 'wholes', 'larges', 'bigs',\n 'jumbos'},\n 'price': 12}\n }\n","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536463830","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\nfrom sklearn.datasets import make_blobs\n\nclass GaussianNB:\n def fit(self, X, y):\n self.mean = {}\n self.var = {}\n self.classes = np.unique(y)\n for suby in self.classes:\n idx = np.where(y == suby)[0]\n self.mean[suby] = np.mean(X[idx])\n self.var[suby] = np.var(X[idx])\n\n def predict(self, X, proba=False):\n pred = []\n for x in X:\n res = {}\n for suby in self.classes:\n tmp = 1 / (math.sqrt(math.pi * self.var[suby]))\n res[suby] = tmp * np.mean(np.exp(-(x - self.mean[suby]) / 2 * self.var[suby]))\n\n p = np.array(list(res.values()))\n p = p / np.sum(p)\n if not proba:\n p = np.argmax(p)\n pred.append(p)\n\n return np.array(pred)\n\n def predict_proba(self, X):\n return self.predict(X, proba=True)\n\n\nif __name__ == '__main__':\n X, y = make_blobs(100, 2, centers=2, random_state=2, cluster_std=1.5)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu')\n plt.show()\n\n model = GaussianNB()\n model.fit(X, y)\n\n # New data\n rng = np.random.RandomState(0)\n Xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2)\n ynew = model.predict(Xnew)\n\n plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu')\n lim = plt.axis()\n plt.scatter(Xnew[:, 0], Xnew[:, 1], c=ynew, s=20, cmap='RdBu', alpha=0.1)\n plt.axis(lim)\n plt.show()\n","sub_path":"naive_bayes/gaussian_nb.py","file_name":"gaussian_nb.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440654701","text":"class Solution:\n def minDistance(self, word1: str, word2: str) -> int:\n row, col = len(word1), len(word2)\n dp = [[0]* (col+1) for _ in range(row+1)]\n for i in range(row):\n for j in range(col):\n if word1[i] == word2[j]:\n dp[i+1][j+1] = 1 + dp[i][j]\n else:\n dp[i+1][j+1] = max(dp[i][j+1], dp[i+1][j])\n same = dp[row][col]\n return len(word1) + len(word2) - 2*same\n","sub_path":"src/dp/583.py","file_name":"583.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294386879","text":"#!/usr/bin/env python\nimport os\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\nCRITICAL = 50\nFATAL = CRITICAL\nERROR = 40\nWARNING = 30\nWARN = WARNING\nINFO = 20\nDEBUG = 10\nNOTSET = 0\n\nCURRENT_PATH = os.path.dirname(os.path.abspath(__file__))\nROOT_PATH = os.path.join(CURRENT_PATH, os.pardir)\nLOG_PATH = os.path.join(ROOT_PATH, 'log')\n\nclass LogHandler(logging.Logger):\n def __init__(self,name,level = DEBUG,stream = True,file = True):\n self.name = name\n self.level = level\n # unbound method,has parameter 'self'\n #logging.Logger.__init__(self,self.name,level = level)\n # bound method,omit parameter 'self'\n super(LogHandler,self).__init__(name,level = level)\n if stream:\n self.__setStreamHandler__()\n if file:\n self.__setFileHandler__()\n\n def __setFileHandler__(self,level = None):\n file_name = os.path.join(LOG_PATH,'{name}.log'.format(name = self.name))\n file_handler = TimedRotatingFileHandler(filename = file_name,when = 'D',interval = 1,backupCount = 7)\n file_handler.suffix = '%Y%m%d.log'\n if not level:\n file_handler.setLevel(self.level)\n else:\n file_handler.setLevel(level)\n\n formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n file_handler.setFormatter(formatter)\n\n self.file_handler = file_handler\n self.addHandler(file_handler)\n\n def __setStreamHandler__(self,level = None):\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')\n stream_handler.setFormatter(formatter)\n if not level:\n stream_handler.setLevel(self.level)\n else:\n stream_handler.setLevel(self.level)\n\n self.addHandler(stream_handler)\n\n def resetName(self,name):\n self.name = name\n self.removeHandler(self.file_handler)\n self.__setFileHandler__()\n\nif __name__ == '__main__':\n log = LogHandler('test')\n log.info('this is a test msg')\n","sub_path":"pythonScripts/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"363055892","text":"'''\nProblem 4\nWrite a function that given a list of non negative integers,\narranges them such that they form the largest possible number.\nFor example, given [50, 2, 1, 9], the largest formed number is 95021.\n'''\n\ndef main():\n listA = [50, 2, 10, 9]\n print(\"The List \")\n print(listA)\n listA.sort()\n print(\"The List least to greatest\")\n print(listA)\n listA.sort(reverse=True)\n print(\"The List greatest to least\")\n listA.sort(reverse=True)\n print(listA)\n\n\n i = 0\n listA = [55, 22, 340, 4, 99, 20]\n\n while i < len(listA):\n w = 10\n while listA[i] > w:\n w *= 10\n if listA[i] > 10:\n listA[i] = listA[i] / w\n if listA[i] < 1:\n listA[i] *= 10\n\n i += 1\n\n listA.sort(reverse=True)\n\n print(listA)\n\n\n\n","sub_path":"Sorter.py","file_name":"Sorter.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433168524","text":"import numpy as np\r\nfrom dezero import Variable\r\nimport dezero.functions as F\r\n\r\n\r\nnp.random.seed(0)\r\nx = np.random.rand(100, 1)\r\ny = np.sin(2 * np.pi * x) + np.random.rand(100, 1)\r\n\r\nI, H, O = 1, 10, 1\r\nW1 = Variable(0.01 * np.random.randn(I, H))\r\nb1 = Variable(np.zeros(H))\r\nW2 = Variable(0.01 * np.random.randn(H, O))\r\nb2 = Variable(np.zeros(O))\r\n\r\ndef predict(x):\r\n x2 = F.sigmoid(F.linear(x, W1, b1))\r\n x2 = F.linear(x2, W2, b2)\r\n return x2\r\n\r\n\r\ndef mean_squared_error(x0, x1):\r\n diff = x0 - x1\r\n return F.sum(diff ** 2) / len(diff)\r\n\r\nlr = 0.5\r\niters = 100000\r\n\r\n\r\nfor i in range(iters):\r\n y_pred = predict(x)\r\n loss = mean_squared_error(y_pred, y)\r\n \r\n W1.clear_grad()\r\n b1.clear_grad()\r\n W2.clear_grad()\r\n b2.clear_grad()\r\n loss.backward()\r\n\r\n W1.data -= lr * W1.grad.data\r\n b1.data -= lr * b1.grad.data\r\n W2.data -= lr * W2.grad.data\r\n b2.data -= lr * b2.grad.data\r\n if i % 1000 == 0:\r\n print(loss)\r\n\r\n","sub_path":"sample_neural_network.py","file_name":"sample_neural_network.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253315761","text":"import pytest\nimport requests\n\nfoo1 = \"bar12\"\nfoo2 = \"config_bar2\"\nfoo3 = \"bar32\"\nexpect_foo1 = \"config_bar1\"\nexpect_foo2 = \"config_bar2\"\nbase_url = \"https://postman-echo.com\"\n\n\n@pytest.mark.incremental\ndef test_post_with_raw_text_request():\n global response_body\n url = f\"{base_url}/post\"\n post_headers = {'user-agent': 'HttpRunner/3.1.4',\n 'Content-Type': 'text/plain'}\n post_data = f\"This is expected to be sent back as part of response body: {foo1}-{foo2}-{foo3}.\"\n\n response = requests.request(\n \"POST\", url, json=post_data, headers=post_headers)\n response_body = response.json()\n\n assert response.status_code == 200, f\"status_code: {response.status_code}, reason:{response.reason}\"\n\n\ndef test_post_with_raw_text_data():\n\n assert response_body['data'] == '\"This is expected to be sent back as part of response body: bar12-config_bar2-bar32.\"'\n\n\n@pytest.mark.incremental\ndef test_post_form_data_request():\n global post_response_body\n foo2 = \"bar23\"\n url = f\"{base_url}/post\"\n form_data = f\"foo1={foo1}&foo2={foo2}&foo3={foo3}\"\n post_headers = {\"user-agent\": \"HttpRunner/3.1.4\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n post_response = requests.post(url, data=form_data, headers=post_headers)\n post_response_body = post_response.json()\n\n assert post_response.status_code == 200, f\"status_code: {post_response.status_code}, reason:{post_response.reason}\"\n\n\ndef test_post_form_data_foo1():\n assert post_response_body['form']['foo1'] == expect_foo1\n\n\ndef test_post_form_data_foo2():\n assert post_response_body['form']['foo2'] == \"bar23\"\n\n\ndef test_post_form_data_foo3():\n assert post_response_body['form']['foo3'] == \"bar21\"\n","sub_path":"test_HtRn_demo/test_post_request_demo.py","file_name":"test_post_request_demo.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589089218","text":"import sys\nimport timeit\nimport argparse\nimport numpy\nimport subprocess\nimport json\nimport time\nfrom time import process_time\n\nMAX_TRIAL = 5\nCOOLDOWN_PERIOD = 10 # seconds\nZ_VALUE_BOUND = 3\n\nclass PerfTestCase():\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Process some integers.')\n self.parser.add_argument('--compare', dest='compare_data_file_path', action='store',\n help='perf test data to compare with')\n self.parser.add_argument('--update', dest='update_data_file_path', action='store',\n help='perf test data to update')\n\n # yf225: couldn't figure out how to let Bash pass variables without quotes, so have to use this workaround\n args_str = ''\n if len(sys.argv) > 2:\n args_str = ' '.join(str(x) for x in sys.argv[1:])\n elif len(sys.argv) == 2:\n args_str = sys.argv[1]\n args_list = None\n if args_str != '':\n args_list = args_str.split(' ')\n self.args = self.parser.parse_args(args_list)\n\n self.should_compare = False\n self.should_update = False\n self.only_test_name = None\n\n if self.args.compare_data_file_path:\n self.should_compare = True\n with open(self.args.compare_data_file_path) as compare_data_file:\n self.compare_data = json.load(compare_data_file)\n\n if self.args.update_data_file_path:\n self.should_update = True\n with open(self.args.update_data_file_path) as update_data_file:\n self.update_data = json.load(update_data_file)\n\n def measure(self, test_name, stmt, setup, number, repeat, trial=0):\n if self.only_test_name and not test_name == self.only_test_name:\n return\n print('Testing: {} ...'.format(test_name))\n trial += 1\n\n runtimes = []\n\n # Measure using timeit\n # for i in range(repeat):\n # runtimes += [timeit.timeit(stmt=stmt, setup=setup, number=number)]\n\n # Measure using time.process_time()\n try:\n for i in range(repeat):\n exec(setup)\n start_time = process_time()\n for i in range(number):\n exec(stmt)\n elapsed_time = process_time() - start_time\n runtimes += [elapsed_time]\n except Exception as err:\n print(\"Unexcepted error: {} {}\".format(test_name, err))\n\n sample_mean = numpy.mean(runtimes)\n sample_sigma = numpy.std(runtimes)\n print(\"sample mean: \", sample_mean)\n print(\"sample sigma: \", sample_sigma)\n \n if self.should_compare:\n if test_name in self.compare_data:\n baseline_mean = self.compare_data[test_name]['mean']\n baseline_sigma = self.compare_data[test_name]['sigma']\n else:\n baseline_mean = sys.maxsize\n baseline_sigma = 0.01\n z_value = (sample_mean - baseline_mean) / baseline_sigma\n print(\"z-value: {}\".format(z_value))\n if z_value >= Z_VALUE_BOUND:\n if trial == MAX_TRIAL:\n raise Exception('''\\n\nz-value >= {} in all {} trials, there is perf regression.\\n\n'''.format(Z_VALUE_BOUND, trial))\n else:\n print(\"z-value >= {}, doing another trial in {} seconds.\".format(Z_VALUE_BOUND, COOLDOWN_PERIOD))\n time.sleep(COOLDOWN_PERIOD)\n self.measure(test_name, stmt, setup, number, repeat, trial)\n else:\n print(\"z-value < {}, no perf regression detected.\".format(Z_VALUE_BOUND))\n\n if self.should_update:\n if not test_name in self.update_data:\n self.update_data[test_name] = {}\n self.update_data[test_name]['mean'] = sample_mean\n self.update_data[test_name]['sigma'] = max(sample_sigma, sample_mean * 0.1) # Allow a larger margin\n #self.update_data[test_name]['sigma'] = sample_sigma\n with open(self.args.update_data_file_path, 'w') as update_data_file:\n json.dump(self.update_data, update_data_file, indent=4)\n","sub_path":"modules/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"528219296","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom django.core import serializers\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom PIL import Image\nimport base64\nimport sys\nimport json\n\nfrom acutserver.core.models import User, upload_file, Photo, Battle_Log, Like_table\nfrom acutserver.form.forms import upload_image_form\n\n\n@csrf_exempt\ndef create(request):\n\n if request.method == 'POST':\n\n data = json.load(request)\n #data = request.POST\n\n u_idx = data['user_index']\n #p_info = data['photo_info']\n #p_loc = data['location']\n img_text = data['user_text']\n\n img = data['img']\n\n img_content = base64.b64decode(img)\n img_result = SimpleUploadedFile('temp.jpg', img_content ,getattr(img,\"content_type\",\"application/octet-stream\"))\n\n request.FILES[u'file'] = img_result\n\n p_img = \"\"\n #form = upload_image_form(request.POST, request.FILES)\n user_obj = User.objects.filter(index = u_idx)\n\n #if form.is_valid():\n image_file = Photo(user= user_obj[0], img = request.FILES[u'file'], text = img_text)\n \n image_file.save()\n #p_img = image_file.image\n \n #else :\n # return HttpResponse(\"is not valid\")\n\n\n # hash_tags = data['hash_tags']\n\n\n #have to think about the search speed because it wiil compare all hash\n #tags with hash tables word\n\n #post_obj = Post(user_index = user_obj[0], post_img = p_img, post_content = p_content, post_longitude = p_loc[0], post_latitude = p_loc[1])\n #try :\n # post_obj.save()\n #except Error as e :\n # return HttpResponse(\"%s\" %e.message)\n\n #for info in p_info:\n # has_meta = Photo_meta.objects.filter( photo_info_name = info['photo_info_name'])\n\n # if not has_meta.exists():\n # meta_obj = Photo_meta(photo_info_name = info['photo_info_name'])\n # try :\n # meta_obj.save()\n # except Error as e:\n # return HttpResponse(\"%s\" %e.message)\n\n # has_meta.refresh_from_db()\n\n # photo_info_obj = Photo_info(photo_meta_index = \"%s\" %has_meta.photo_meta_index, info_type = info['info_type'], post_index = \"%s\" %post_obj.post_index)\n\n #try :\n # photo_info_obj.save()\n #except Error as e :\n # return HttpResponse(\"%s\" %e.message)\n\n\n ######fixfixfixfixfixfix########################################################################\n #for tags in hash_tags:\n # has_tag = Hash_table.objects.filter(hash_name = tags['tag_name'])\n\n #if not has_tag.exists():\n # table_obj = Hash_table(hash_name = tags['tag_name'])\n # table_obj.save()\n # has_tag.refresh_from_db()\n\n #hash_tag_obj = Hash_tag(hash_index = \"%s\" %has_tag.hash_index, post_index\n # = \"%s\" %post_obj.post_index)\n\n return HttpResponse(\"success\")\n return HttpResponse(\"bad access\")\n\n@csrf_exempt\ndef show_lounge(request):\n if request.method == 'POST':\n\n lounge_photos = Photo.objects.filter(lounge = True).exclude(visible = False).order_by('-upload_time')\n\n if len(lounge_photos) == 0 :\n return HttpResponse(\"no photos in lounge\")\n #json_encode = serializers.serialize('json',lounge_photos)\n img_prefix = \"https://s3.ap-northeast-2.amazonaws.com/acut-fullsize-image/\"\n\n json_arr = {'lounge_photos' : []}\n\n for p in lounge_photos :\n\n json_obj = {\n 'index' : p.index,\n 'img' : img_prefix+str(p.img),\n 'user_index' : str(p.user.index),\n 'text' : p.text if not p.text is None else \"\"\n }\n\n json_arr['lounge_photos'].append(json_obj)\n\n\n\n json_encode = json.dumps(json_arr)\n #json_encode = serializers.serialize('json', json_str)\n\n return HttpResponse(json_encode, content_type=\"application/json\")\n return HttpResponse(\"bad access\")\n\n@csrf_exempt\ndef change_photo_info(request):\n if request.method == \"POST\":\n data = json.load(request)\n photo_index = data['photo_index']\n photo_obj = Photo.objects.filter(index = photo_index)\n\n if len(photo_obj) == 1:\n return HttpResponse(\"no photo\")\n\n photo_obj = photo_obj[0]\n\n comment = data['user_text'] if data['user_text'] else photo_obj.text\n show_lounge = data['lounge'] if data['lounge'] else photo_obj.lounge\n\n json_obj = {'result' : []}\n\n try :\n photo_obj.update(text = comment, lounge = show_lounge)\n json_obj['result'].append(\"1\")\n except Photo.DoesNotExist : \n json_obj['result'].append(\"2\")\n \n return HttpResponse(json.dumps(json_obj), content_type = \"application/json\")\n\n return HttpResponse(\"bad access\")\n\n@csrf_exempt\ndef show_my_lounge(request):\n if request.method == 'POST':\n data = json.load(request)\n u_idx = data['user_index']\n user_obj = User.objects.filter(index = u_idx)\n\n\n if len(user_obj) == 0:\n return HttpResponse(\"no User\")\n\n user_obj = user_obj[0]\n my_lounge_photos = Photo.objects.filter(user = user_obj,lounge = True).exclude(visible = False).order_by('-created_at')\n img_prefix = \"https://s3.ap-northeast-2.amazonaws.com/acut-fullsize-image/\"\n\n\n json_arr = {'lounge_photos' : []}\n for p in my_lounge_photos:\n json_obj = {\n 'photo_index' : p.index,\n 'img' : img_prefix+str(p.img),\n 'user_index' : str(p.user.index),\n 'text' : p.text\n }\n\n json_arr['lounge_photos'].append(json_obj)\n\n json_encode = json.dumps(json_arr)\n\n return HttpResponse(json_encode, content_type=\"application/json\")\n return HttpResponse(\"bad access\")\n","sub_path":"acutserver/views/photo_management.py","file_name":"photo_management.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312331373","text":"import os\nimport zlib\nimport gzip\nfrom StringIO import StringIO\n\n# prints 256 colors\nfor i in range(256):\n print(\"\\033[\"+str(i)+\"mx\\033[0m\")\n\nIMAGE_DEBUG = False\n\nclass Commons(object):\n\n def _chunks(self, l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in xrange(0, len(l), n):\n yield l[i:i + n]\n\n def bytes2int(self, data):\n try:\n return int(data.encode('hex'), 16)\n except ValueError:\n return None\n\n def decompress(self, frmt, data):\n return {\n 'gz': lambda :zlib.decompress(data, zlib.MAX_WBITS|16),\n 'zlib': lambda :zlib.decompress(data),\n 'deflate': lambda :zlib.decompress(data, -zlib.MAX_WBITS)\n }[frmt]()\n\nclass PortableNetworkGraphics(Commons):\n\n def __init__(self, path):\n with open(path, 'rb') as f:\n self.blob = f\n self.file_header = self.blob.read(8)\n chunk_length_size = 4\n chunk_type_size = 4\n crc_size = 4\n self.chunks = {}\n while True:\n chunk_length = self.bytes2int(self.blob.read(\n chunk_length_size))\n if not chunk_length:\n break\n chunk_type = self.blob.read(chunk_type_size)\n chunk_data = self.blob.read(chunk_length)\n crc = self.blob.read(crc_size)\n self.chunks[chunk_type] = {\n 'type': chunk_type,\n 'data': chunk_data,\n 'crc': crc,\n 'is_critical': chunk_type[0].isupper()\n }\n IHDR = self.chunks['IHDR']['data']\n self.width = self.bytes2int(IHDR[0:4]) # 4 bytes\n self.height = self.bytes2int(IHDR[4:8]) # 4 bytes\n self.bit_depth = self.bytes2int(IHDR[8:9]) # 1 byte\n self.color_type = self.bytes2int(IHDR[9:10]) # 1 byte\n self.compression_method = self.bytes2int(IHDR[11:12]) # 1 byte\n self.filter_method = self.bytes2int(IHDR[13:14]) # 1 byte\n self.interlace_method = self.bytes2int(IHDR[15:16]) # 1 byte\n # Palette\n try:\n PLTE = self.chunks['PLTE']['data']\n self.palette = []\n for chunk in list(self._chunks(PLTE, 3)):\n self.palette.append({\n 'r': self.bytes2int(chunk),\n 'g': self.bytes2int(chunk),\n 'b': self.bytes2int(chunk)})\n except KeyError:\n pass # Palette key not present\n # Pixel Array\n IDAT = self.chunks['IDAT']['data']\n self.decompress('zlib', IDAT)\n #try: # uncompressing gz\n # with gzip.GzipFile(fileobj=StringIO(IDAT), mode='rb') as f:\n # IDAT = f.read()\n #except Exception as e:\n # print(e)\n self.pixels = []\n import binascii\n print('decompressing IDAT with bit depth {}'.format(self.bit_depth))\n for chunk in list(self._chunks(IDAT, self.bit_depth)):\n print('chunk <{}>'.format(chunk))\n self.pixels.append(chunk)\n print('{} pixels'.format(\n len(self.pixels)\n ))\n\n def _bit_depth_restrictions(self):\n '''\n Color Allowed Interpretation (Each pixel)\n Type Bit Depths\n \n 0 1,2,4,8,16 is a grayscale sample.\n 2 8,16 is an R,G,B triple.\n 3 1,2,4,8 is a palette index; a PLTE chunk must appear.\n 4 8,16 is a grayscale sample,followed by an alpha sample.\n 6 8,16 is an R,G,B triple, followed by an alpha sample.\n '''\n return reduce(lambda acc, new: acc or new, [\n self.color_type == 0 and self.bit_depth in [1, 2, 4, 8, 16],\n self.color_type == 2 and self.bit_depth in [8, 16],\n self.color_type == 3 and self.bit_depth in [1, 2, 4, 8],\n self.color_type == 4 and self.bit_depth in [8, 16],\n self.color_type == 6 and self.bit_depth in [8, 16]])\n\n def get_data(self):\n return self.chunks['IDAT']\n\nfilename = 'rubber-duck-small.png'\nimg = PortableNetworkGraphics('rubber-duck-small.png')\nprint(img.file_header)\nprint(img.chunks.keys())\nprint(\n '{} - data size {}'.format(filename, len(img.chunks['IDAT']['data']))\n)\nprint('width {}'.format(img.width))\nprint('height {}'.format(img.height))\nprint('color type {}'.format(img.color_type))\nprint('interlace_method {}'.format(img.interlace_method))\n","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91212903","text":"import unittest\nfrom memory_profiler import profile\n@profile\ndef factorial(a):\n k=1\n if type(a)!=int:\n raise TypeError('Введенное значение не является целым числом')\n if a < 0:\n raise ValueError('Введенное значение явлется отрицательным числом')\n if a==0:\n return 1\n for i in range(1,a+1):\n k=k*i\n return k\nclass Test_Factorial(unittest.TestCase):\n def test_one(self):\n self.assertEqual(factorial(1),1)\n def test_zero(self):\n self.assertEqual(factorial(0),1)\n def test_Natural(self):\n self.assertEqual(factorial(5),120)\n self.assertEqual(factorial(2),2)\n self.assertEqual(factorial(4),24)\n def test_error_string(self):\n self.assertRaises(TypeError, factorial, 'lol')\n self.assertRaises(TypeError, factorial, '')\n self.assertRaises(TypeError, factorial, ' ')\n def test_error_neg(self):\n self.assertRaises(ValueError, factorial, -4)\n self.assertRaises(ValueError, factorial, -81)\nif __name__=='__main__':\n unittest.main()\n \n # Используемые значения при подсчете времени затрачиваемого на программу:\n#300\n#Время: Нерекурсивный 0,0002356470000000055\n #Рекурсивный 0,0004815000000000236\n#5\n#Память: Нерекурсивный 17,7 MiB\n #Рекурсивный 17,9 MiB\n \n#450\n#Время: Нерекурсивный 0,000194521000000000305\n #Рекурсивный 0,001099888700000000216\n#15\n#Память: Нерекусивный 17,7 MiB\n #Рекурсивный 17,7 MiB\n\n#600\n#Время: Нерекурсивный 0,00033230800000000034\n #Рекурсивный 0,001618911999999999724\n#25\n#Память: Нерекурсивный 17,7 MiB\n #Рекурсивный 17,9 MiB\n#750\n#Время: Нерекурсивный 0,0003437150000000222\n #Рекурсивный 0,0021502429999999996\n#50\n#Память: Нерекурсиный 17,7 MiB\n #Рекурсивный 18,2 MiB\n \n#900\n#Время: Нерекурсивный 0,0011001869999999747\n #Рекурсвный 0,001510544000000000026\n#75\n#Память: Нерекурсивный 17,7 MiB\n #Рекурсивный 18,3 MiB\n#Вывод:\n#Рекурсионный способ занимает больше памяти и дольше выполняется, чем нерекурсионный.\n#Каждая рекурсия добавляет в стек памяти новый уровень, что увеличивает время исполнения и затраты памяти.\n","sub_path":"hw2/Факториал без рекурсии.py","file_name":"Факториал без рекурсии.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392383382","text":"\n# Create a program that asks the user to enter their name and their age.\n# Print out a message addressed to them that tells them the year that they will turn 100 years old.\n\n# Extras:\n\n# Add on to the previous program by asking the user for another\n# number and printing out that many copies of the previous message.\n# (Hint: order of operations exists in Python)\n# Print out that many copies of the previous message on separate lines.\n# (Hint: the string \"\\n is the same as pressing the ENTER button)\n\nimport datetime\n\nyear_now = datetime.datetime.now().year\nname = input(\"Ciao! Inserisci il tuo nome -> \")\nprint(\"Ciao\", name, \"!\")\nwhile True:\n try:\n age = int(input(\"Inserisci la tua età -> \"))\n if age < 0:\n print(\"Inserisci un'età valida.\")\n break\n except(ValueError):\n print(\"Devi inserire un numero, figaaaa!!\")\n\n\n\nyear_fut = year_now + 100 - age\n\nif age > 100:\n print(\"Hai già più di 100 anni da\", age - 100, \"anni, vecchio di merda!\")\n msg = \"Hai già più di 100 anni da \" + str(age - 100) + \" anni, vecchio di merda!\"\nelse:\n print(name,\", avrai 100 anni nel \", year_now + 100 - age, \"! :O\")\n msg = name + \" avrai 100 anni nel \" + str(year_now + 100 - age) + \"! :O\"\n\nwhile True:\n try:\n times = int(input(\"Inserisci il numero di volte in cuui vuoi ripetere il precedente messaggio! -> \"))\n if times <= 0:\n print(\"Devi inserire un numero positivo\")\n break\n except(ValueError):\n print(\"Devi inserire un numeroooooo\")\n\nfor i in range(1, times+1):\n print(i, \".\", msg)\n","sub_path":"practicepython.org/ex1_char_input.py","file_name":"ex1_char_input.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"339391063","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\nfrom flask import Flask\nfrom webassets.loaders import PythonLoader\nfrom flask.ext.assets import Environment, Bundle\nfrom flod_tilskudd_portal.api import create_api\n\nfrom api.auth import create_bouncer\n\napp = Flask(__name__)\napp.debug = os.environ.get('DEBUG') == 'True'\napi_version = \"v1\"\n\nassets = Environment(app)\nassets.debug = app.debug\nbundles = PythonLoader('assetbundle').load_bundles()\nfor name, bundle in bundles.iteritems():\n assets.register(name, bundle)\n\nfrom flod_tilskudd_portal import views, proxy\n\n\ndef check_environment(app):\n if 'AUTH_TOKEN_SECRET' not in os.environ:\n raise EnvironmentError(('Environment variable AUTH_TOKEN_SECRET must '\n 'be set'))\n\n\ncheck_environment(app)\n\ncreate_api(app, api_version)\ncreate_bouncer(app)\n\n# support for remote debugging in Intellij and pycharm\n#\n# Set IDEA_TILSKUDD_PORTAL_REMOTE_DEBUG_ON to True in your environment\n# prior to starting the application to get remote debugging.\n#\n# Set IDEA_REMOTE_DEBUG_SERVER to the ip/hostname of the machine running the\n# debug server.\n#\n# Set IDEA_TILSKUDD_PORTAL_REMOTE_DEBUG_SERVER to the port of the debug server prosess\n#\n# For the remote debugging to work you will also have to make sure\n# the pycharm-debug.egg is on your path (check your environment file).\nif os.environ.get('IDEA_TILSKUDD_PORTAL_REMOTE_DEBUG_ON') == 'True':\n server = os.environ.get('IDEA_REMOTE_DEBUG_SERVER')\n port = os.environ.get('IDEA_TILSKUDD_PORTAL_REMOTE_DEBUG_PORT')\n app.logger.info(\"Idea remote debugging is on! Will connect to debug server running on %s:%s\" % (server, port))\n import pydevd\n pydevd.settrace(server, port=int(port), suspend=False, stdoutToServer=True, stderrToServer=True)\n","sub_path":"flod_tilskudd_portal/flod_tilskudd_portal/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226627603","text":"\n\n#You should probably use an HTTP client like requests\n#to get the document behind the URL, and feed that document to Beautiful Soup.\n\n#Pull data from frontpage deals\n#sort prices, find next best deal?\n\nimport requests, bs4\n\nres= requests.get('https://slickdeals.net')\ntry:\n res.raise_for_status()\nexcept Exception as exc:\n print('There was a problem: %s' %(exc))\n \nres.status_code == requests.codes.ok \n#make sure its a valid website\n\n#write to to text, html\nplayFile = open('deals.html', 'wb')\nfor chunk in res.iter_content(1000000):\n playFile.write(chunk)\n\nplayFile.close()\n\n#beautiful soup\nnoStarch = bs4.BeautifulSoup(res.text)\nprint(noStarch.title.string)\n\n\n#look for price line tag id and get listprice, itemPercentOff, title, itemprice\nitemPrice = noStarch.select('.priceLine')\nprint(len(itemPrice))\nprint(type(itemPrice[0]))\nprint(itemPrice[0].getText())\n\nfor item in itemPrice:\n print(item)\n \n","sub_path":"slickdeals.py","file_name":"slickdeals.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195798046","text":"# -*- coding: utf-8 -*-\n\nimport pytest\nfrom base import TestSearchEngineBase\nfrom uszipcode.model import Zipcode, ZipcodeType\n\n\nclass TestSearchEngineCensusData(TestSearchEngineBase):\n def test(self):\n z = self.search.by_zipcode(\"10001\")\n z.bounds\n if self.search.zip_klass is Zipcode:\n z.population_by_age\n z.head_of_household_by_age\n z.polygon\n\n def test_by_zipcode_non_standard(self):\n \"\"\"\n Test by_zipcode should return any type zipcode.\n \"\"\"\n z = self.search.by_zipcode(48874)\n assert z.zipcode_type != ZipcodeType.Standard\n assert z.lat is not None\n\n\nif __name__ == \"__main__\":\n import os\n\n basename = os.path.basename(__file__)\n pytest.main([basename, \"-s\", \"--tb=native\"])\n","sub_path":"tests/search/test_census_data.py","file_name":"test_census_data.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"216942801","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport ScrapeFunctions\n\nyear = \"2016-17\"\nBASE_URL = \"http://naccsports.org/sports/bsb/\" # add constants to ScrapeFunctions.py\nHITTING_COLS = ['no.', 'name', 'yr', 'pos', 'g', 'ab', 'r', 'h', '2b', 'hr', 'avg', 'obp', 'slg']\nTEAM_ABBREVIATIONS = {\n 'Aurora': 'AUR',\n 'Benedictine': 'BEN',\n 'Concordia Chicago': 'CUC',\n 'Concordia Wisconsin': 'CUW',\n 'Dominican': 'DOM',\n 'Edgewood': 'EDG',\n 'Lakeland': 'LAK',\n 'MSOE': 'MSOE',\n 'Marian': 'MAR',\n 'Maranatha': 'MARN',\n 'Rockford': 'ROCK',\n 'Wisconsin Lutheran': 'WLC'\n}\n\n\ndef get_soup(url, verbose=False):\n # returns a Beautiful Soup object from the specified URL\n # will be placed in ScrapingFunctions.py\n if verbose:\n print(\"GET \" + url)\n request = requests.get(url)\n text = request.text\n return BeautifulSoup(text, \"html.parser\")\n\n\ndef get_text(html_tag):\n # get the text from an html tag\n # returns a string\n return html_tag.text.strip()\n\n\ndef get_href(html_tag):\n # get the href attribute from an html tag\n # returns a string\n return html_tag.attrs['href']\n\n\ndef find_table(soup_obj, header_values):\n # find the index of the first table that contains specific values in header\n tables = soup_obj.find_all('table')\n i = 0\n while i < len(tables):\n table = tables[i]\n header = table.find_all('th')\n columns = [x.text.strip().lower() for x in header]\n count = 0\n for col in header_values:\n count += int(col in columns)\n if count == len(header_values):\n return i\n i += 1\n return -1\n\n\nsoup = get_soup(BASE_URL + year + \"/leaders\")\n\n# search the page for the target element\ntarget = soup.find_all(\"table\", {\"class\": \"teamSummary\"})\nif not len(target) == 1:\n print(\"Houston, we have a problem!\")\n exit(1)\n\n# create a list of links that are children of the target element\nlinks = [link for link in target[0].find_all('a') if 'href' in link.attrs]\n\n# create list of dicts\n# including team name, abbreviation, and url\nteamList = []\nfor link in links:\n teamList.append({\n 'team': get_text(link),\n 'abbr': TEAM_ABBREVIATIONS[get_text(link)],\n 'url': get_href(link)\n })\n\n# iterate over the teams\n# for team in teamList:\n# print(team)\n\nteamSoup = get_soup(BASE_URL + year + '/' + teamList[0]['url'], verbose=True)\n\n# scrape hitting and extended hitting for overall and conference\n\ntableNum = find_table(teamSoup, HITTING_COLS)\ndata = ScrapeFunctions.scrape_table(teamSoup, tableNum + 1)\nprint(data.head())\n\n\n","sub_path":"tests/ScrapeIndividualOffense.py","file_name":"ScrapeIndividualOffense.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"476808246","text":"from tkinter import *\n\nfrom anime_display import *\nfrom urllib import request\nimport json\n\n\nclass HummingbirdApp(Tk):\n\n SAVE_FILE = \"hummingbird_data.json\"\n ADDRESS = \"http://hummingbird.me/api/v1/users/EkonFain/library\"\n\n def __init__(self, debug = False, forceUpdate = False):\n super().__init__()\n self.title(\"HummingbirdApp [inDev]\")\n self._debug = debug\n\n self._animeList = Frame(self)\n\n self._listBox = Listbox(self._animeList,\n exportselection = 0)\n self._listBox.scrollbar = Scrollbar(self._animeList,\n command = self._listBox.yview)\n self._listBox.config(yscrollcommand = self._listBox.scrollbar.set)\n self._listBox.pack(side = \"left\", fill = BOTH)\n self._listBox.scrollbar.pack(side = \"left\", fill = Y)\n self._animeList.grid(row = 0, column = 0, sticky = N+S)\n\n self._animeDisplay = AnimeDisplay(self)\n self._animeDisplay.grid(row = 0, column = 1, sticky = N+W)\n \n\n\n # initialize debug display\n if self._debug:\n self._debugDisplay = Frame(self)\n self._debugDisplay.grid(row = 1,\n column = 0,\n columnspan = 2,\n sticky = W+E)\n self._debugText = Text(self._debugDisplay,\n width = 80,\n height = 10,\n state = DISABLED)\n self._debugText.pack(side = \"left\", fill = BOTH, expand = True)\n self._debugScroll = Scrollbar(self._debugDisplay,\n command = self._debugText.yview)\n self._debugScroll.pack(side = \"left\", fill = Y)\n self._debugText.config(yscrollcommand = self._debugScroll.set)\n \n self._debugText.tag_config(\"key\", foreground = \"#FF6600\") # orange\n self._debugText.tag_config(\"value\", foreground = \"#0000DD\") # blue\n\n\n self._data = self._load(forceUpdate)\n self._data = sorted(self._data, key = lambda k: k[\"anime\"][\"title\"].lower())\n\n for item in self._data:\n self._listBox.insert(END, item[\"anime\"][\"title\"])\n self._listBox.bind(\"\", self._show_item)\n self._listBox.bind(\"\", self._show_item)\n\n\n def _load(self, forceUpdate = False):\n if not forceUpdate:\n try:\n return self._load_from_file()\n except:\n return self._load_from_web()\n else:\n return self._load_from_web()\n\n def _load_from_file(self):\n f = open(HummingbirdApp.SAVE_FILE, 'r')\n data = json.loads(f.read())\n f.close()\n return data\n\n def _load_from_web(self):\n dataString = request.urlopen(HummingbirdApp.ADDRESS).read().decode()\n data = json.loads(dataString)\n f = open(HummingbirdApp.SAVE_FILE, 'w')\n f.write(json.dumps(data))\n f.close()\n return data\n\n def _listbox_callback(self, *args):\n self.after(10, self._show_item)\n\n def _show_item(self, *args):\n index = self._listBox.curselection()[0]\n self._animeDisplay.load(self._data[index])\n\n # update debug window\n if self._debug:\n self._debugText.config(state = NORMAL)\n self._debugText.delete(1.0, END)\n self._debugText.insert(END, \"---anime---\\n\")\n for k, v in self._data[index][\"anime\"].items():\n self._debugText.insert(END, str(k), \"key\")\n self._debugText.insert(END, \" : \")\n self._debugText.insert(END, str(v), \"value\")\n self._debugText.insert(END, \"\\n\")\n\n self._debugText.insert(END, \"---general---\\n\")\n for k, v in self._data[index].items():\n if k != \"anime\":\n self._debugText.insert(END, str(k), \"key\")\n self._debugText.insert(END, \" : \")\n self._debugText.insert(END, str(v), \"value\")\n self._debugText.insert(END, \"\\n\")\n\n self._debugText.config(state = DISABLED)\n\n\n\ndef main():\n root = HummingbirdApp()\n root.mainloop()\n\nmain()","sub_path":"hummingbird_app.py","file_name":"hummingbird_app.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241938685","text":"from model import *\nimport os\n# path is the path retrieved from iterating the whole directors\npath = '/media/olle/Seagate Expansion Drive/DRD_master_thesis_olle_holmberg/augen_clinic_data/image_data/'\nnumber_of_files = 0\nnumber_of_HD_files = 0\n\npatients = os.listdir(path)\nprint(\"THE LIST ORDER OF THE PATIENTS IS:\")\nprint(patients)\n","sub_path":"list_order.py","file_name":"list_order.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"311361616","text":"import sys\nimport os\nfrom osgeo import gdal, gdalconst, osr, gdal_array\nfrom collections import OrderedDict\nimport numpy as np\nimport netCDF4 as nc\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n\n#The purpose of this code is to find the global area coverage of each land cover category for Land-Use Harmonization version 2 data\n\nDATA_DIR = 'LUH_data/data/'\nNC_FILENAME = DATA_DIR+'baseline_states.nc'\nSTATIC_DATA_FILENAME = DATA_DIR+'staticData_quarterdeg.nc'\nSTART_YEAR = 850\n\nVARIABLES = ['urban','c3ann','c4ann','c3per','c4per','c3nfx','range','pastr','primf','secdf','primn','secdn']\nNEW_VARIABLES = ['urban','crops','range','pastr','forest','nonforest']\nNEW_VARIABLE_NAMES = ['Urban','Cropland','Rangeland','Pastureland','Forest','Nonforest']\nMETA_VARIABLE = 'primf'\n\nOUTPUT_CSV = DATA_DIR + 'LUH_baseline_global_area.csv'\n\nTARGET_YEARS = np.arange(850,2016)\n\n\ndef GetnetCDFGlobalMetaData(in_filename):\n \"\"\"\n Function to read global metadata of netcdf file\n \"\"\"\n with nc.Dataset(in_filename) as src:\n return src.__dict__\n\ndef GetnetCDFVariableMetaData(in_filename,var_name):\n \"\"\"\n Function to read variable metadata of netcdf file\n \"\"\"\n with nc.Dataset(in_filename) as src:\n return src[var_name].__dict__\n\ndef GetnetCDFInfobyName(in_filename,var_name):\n \"\"\"\n Function to read the original file's projection\n \"\"\"\n #Open netCDF file\n src_ds = gdal.Open(in_filename)\n if src_ds is None:\n print(\"Open failed\")\n sys.exit()\n if len(src_ds.GetSubDatasets()) > 1:\n #If exists more than one var in the NetCDF\n subdataset = 'NETCDF:\"'+in_filename+'\":'+var_name\n src_ds_sd = gdal.Open(subdataset)\n \n #begin to read info of the named variable\n NDV = src_ds_sd.GetRasterBand(1).GetNoDataValue()\n xsize = src_ds_sd.RasterXSize\n ysize = src_ds_sd.RasterYSize\n GeoT = src_ds_sd.GetGeoTransform()\n Projection = osr.SpatialReference()\n #Projection.ImportFromWkt(src_ds_sd.GetProjectionRef())\n Projection.ImportFromEPSG(4326)\n \n #Close the subdataset and the whole dataset\n src_ds_sd = None\n src_ds = None\n return NDV, xsize, ysize, GeoT, Projection\n \n\ndef GetnetCDFDataByName(in_filename,var_name,index=0):\n '''\n Reads data for a specified year from netCDF\n '''\n with nc.Dataset(in_filename) as src:\n return src.variables[var_name][index,:,:]\n \ndef GetStaticnetCDFDataByName(in_filename,var_name):\n '''\n Reads data for a specified year from netCDF\n '''\n with nc.Dataset(in_filename) as src:\n return src.variables[var_name][:,:]\n \ndef GetNCDataByNewVariable(year_index,ysize,xsize,NDV,NC_FILENAME=NC_FILENAME,NEW_VARIABLES=NEW_VARIABLES):\n '''\n Aggregate original variables to desired ones\n '''\n #Get data for variables and aggregate when necessary\n nc_data = np.zeros((len(NEW_VARIABLES),ysize,xsize))\n for var_index, var in enumerate(NEW_VARIABLES):\n #If not in the aggregating variables (crops, forest, nonforest) just keep variable\n temp_variables = [var]\n #Otherwise aggregate\n if var == 'crops':\n temp_variables = ['c3ann','c4ann','c3per','c4per','c3nfx']\n elif var == 'forest':\n temp_variables = ['primf','secdf']\n elif var == 'nonforest':\n temp_variables = ['primn','secdn']\n #\"Aggregating\"\n for temp_var in temp_variables:\n nc_data[var_index] = nc_data[var_index]+ GetnetCDFDataByName(NC_FILENAME,temp_var,index=year_index).data\n #Make NDV values uniform\n nc_data[nc_data>=NDV] = NDV\n return nc_data\n \ndef getGlobalArea(var_data,cell_area,NDV):\n #Set no data value to 0\n var_data[var_data>=NDV] = 0\n #Multiply percent cover matrix by grid cell area matrix\n area_coverage = np.multiply(var_data,cell_area)\n #Sum over values to get total coverage\n area_sum = np.sum(area_coverage)\n return area_coverage,area_sum\n \n#Get static data for LUH\n#Get netcdf metadata\nNDV, xsize, ysize, GeoT, Projection = GetnetCDFInfobyName(NC_FILENAME,META_VARIABLE)\n#Get area per grid cell\ncell_area = GetStaticnetCDFDataByName(STATIC_DATA_FILENAME,'carea')\n#Get ice/water fraction of cell, this is the amount of the grid cell covered in ice and water\nice_water_fraction = GetStaticnetCDFDataByName(STATIC_DATA_FILENAME,'icwtr')\n#Multiply grid cell area by 1 - fraction of cell covered in ice and water to get\n# area of grid cell that is terrestrial\ncell_area = np.multiply(cell_area,1-ice_water_fraction)\n\n#Create empty dataframe that will hold area coverage over all years\ncolumns = ['Year']+NEW_VARIABLE_NAMES+['Total']\ndf = pd.DataFrame(columns=columns)\n\n#Iterate through the years and calculate coverage sum\nfor index,year in enumerate(TARGET_YEARS):\n print(year)\n #Re-index year \n year_index = year-START_YEAR\n #Get data for new variables from netcdf\n nc_data = GetNCDataByNewVariable(year_index,ysize,xsize,NDV,NC_FILENAME,NEW_VARIABLES)\n #Create empty row to be appended to dataframe\n df_row = np.zeros(len(NEW_VARIABLES)+2)\n #First entry is the year\n df_row[0] = year\n total = 0\n #For each variable find the area coverage\n for var_index, var in enumerate(NEW_VARIABLES):\n area_coverage,area_sum = getGlobalArea(nc_data[var_index,:,:],cell_area,NDV)\n #added 1 to index to reflect first entry is year\n df_row[var_index+1] = area_sum\n total = total + area_sum\n df_row[-1] = total\n #Insert into dataframe\n df.loc[index] = df_row\n\n#Save to csv\ndf.to_csv(OUTPUT_CSV,index=False)","sub_path":"land-use-harmonization/luh_world_coverage.py","file_name":"luh_world_coverage.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225581514","text":"import sys\nimport numpy as np\n\ndef main():\n args = sys.argv\n #args[1] : iter num\n iter = int(args[1])\n lines = open('data/format.txt', 'r').readlines()\n index = 0\n for line in lines:\n if(index == 0):\n states = int(line[:-1])\n transition = np.zeros([states,states])\n observed = list()\n probability = list()\n trans = list()\n for i in range(states):\n observed.append(2+i)\n for i in range(states):\n probability.append(observed[-1]+1+i)\n for i in range(states):\n trans.append(probability[-1]+1+i)\n if(index == 1):\n vocab = int(line[:-1])\n dice = np.zeros([states, 2, vocab])\n if(index in observed):\n line = line.split()\n for i in range(vocab):\n dice[index-2, 0, i] = line[i]\n if(index in probability):\n line = line.split()\n for i in range(vocab):\n dice[index-observed[-1]-1, 1, i] = float(line[i])\n if(index in trans):\n line = line.split()\n for i in range(states):\n transition[index-probability[-1]-1, i] = float(line[i])\n index += 1\n column = cast(dice, transition, 1461, states, vocab)\n write_data(column, iter)\n\ndef cast(dice, transition, length, states, vocab):\n column = list()\n current_state = 0\n for i in range(length):\n z = np.random.multinomial(1,dice[current_state, 1])\n for i in range(vocab):\n if(z[i] == 1):\n break\n column.append(current_state)\n column.append(int(dice[current_state, 0, i]))\n state = np.random.multinomial(1,transition[current_state])\n for i in range(states):\n if(state[i] == 1):\n break\n current_state = i\n\n return column\n\ndef write_data(column, iter):\n output = open('data/sample' + str(iter) +'.txt', 'w')\n for i in range(len(column)):\n if(i % 2 == 1):\n output.write(str(column[i]) + ' ')\n output.write('\\n')\n for i in range(len(column)):\n if(i % 2 == 0):\n output.write(str(column[i]) + ' ')\n output.write('\\n')\n\nif(__name__ == '__main__'):\n main()\n","sub_path":"make_data.py","file_name":"make_data.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420315234","text":"__author__ = 'jerome'\nimport subprocess\nfrom datetime import datetime, timedelta\nimport re\nimport pymysql\n\ndef run_sql(sql):\n try:\n #获取一个数据库连接,注意如果是UTF-8类型的,需要制定数据库\n conn=pymysql.connect(host='10.9.170.118', user='root', passwd='Zqbxzxh1', db='lottery', port=3306, charset='utf8')\n cur=conn.cursor()#获取一个游标\n cur.execute(sql)\n data=cur.fetchall()\n cur.close()#关闭游标\n conn.commit()\n conn.close()\n return data\n except Exception as ex:\n print(ex)\n return None\n\ndef load_data():\n dt = datetime.strptime(\"20180615\", \"%Y%m%d\")\n while dt.timestamp() < datetime.strptime(\"20180617\", \"%Y%m%d\").timestamp():\n print('curl https://www.pk106.com/draw-speed5-{}.html'.format(dt.strftime(\"%Y%m%d\")))\n file = subprocess.check_output('curl https://www.pk106.com/draw-speed5-{}.html'.format(dt.strftime(\"%Y%m%d\")), shell=True)\n with open(\"fast_file/{}.html\".format(dt.strftime(\"%Y%m%d\")), \"w\") as f:\n f.write(file.decode())\n parse_file(file.decode(), dt.strftime(\"%Y%m%d\"))\n dt += timedelta(days=1)\n\n\ndef parse_file(file, date):\n # print(datetime.datetime.now())\n p = re.compile('id=\"tr-.*?[]?([0-9]*)[]?(.*?).*?speed5-num\".*?>([0-9]*).*?>([0-9]*).*?>([0-9]*).*?>([0-9]*).*?>([0-9]*).*?', re.S)\n al = []\n insert = \"insert ignore fast_ssc(`index`, `date`, `time`, `num1`, `num2`, `num3`, `num4`, `num5`, `sum`) values \"\n for i, item in enumerate(re.findall(p, file)):\n item = [i.replace(\" \", \"\").strip() for i in item]\n # print(item)\n if item[1].find(item[0]) > 0:\n # print(\"split \", item)\n item[1] = item[1].split(item[0])[1].replace(\"\", \"\")\n if i < 10:\n print(item)\n\n insert += \"({}, '{}', '{}', {}, {}),\".format(item[0], date, item[1], \",\".join(item[2:]), sum([int(i) for i in item[2:]]))\n # print(insert)\n insert = insert.strip(',')\n # print(insert)\n run_sql(insert)\n return sorted(al)\n\ndef save_data():\n dt = datetime.strptime(\"20180323\", \"%Y%m%d\")\n for i in range(4):\n\n f = open(\"fast_file/{}.html\".format(dt.strftime(\"%Y%m%d\"))).read()\n parse_file(f, dt.strftime(\"%Y%m%d\"))\n dt += timedelta(days=1)\n\nload_data()\n# save_data()\n","sub_path":"load_data_subprocess.py","file_name":"load_data_subprocess.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114134504","text":"#!/usr/bin/env python\n\nimport sys\nimport os\nimport subprocess\n\ncwd = os.getcwd()\n\ndef create_tmp_filter_file(iterate, filename='laplacian-smoothing.mlx'):\n\n filter_script_mlx = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \"\"\".format(iterate=iterate)\n\n\n print(\"***********************************\")\n print(filter_script_mlx)\n print(\"***********************************\")\n\n with open(cwd + '/' + filename, 'w') as f:\n f.write(filter_script_mlx)\n return cwd + '/'+ filename\n\n\ndef laplacian_smoothing(in_file, out_file, iterate):\n\n filter_script_path = create_tmp_filter_file(iterate)\n\n print(filter_script_path)\n\n # Add input mesh\n command = 'xvfb-run -a -s \"-screen 0 800x600x24\" meshlabserver -i ' + in_file\n # Add the filter script\n command += \" -s \" + filter_script_path\n # Add the output filename and output flags\n command += \" -o \" + out_file\n # Execute command\n print(\"Going to execute: \" + command)\n output = subprocess.call(command, shell=True)\n last_line = output\n print()\n print(\"Done:\")\n print(in_file + \" > \" + out_file + \": \" + str(last_line))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print(\"Usage:\")\n print(sys.argv[0] + \" /path/to/input_mesh iterate\")\n print(\"Example:\")\n print(sys.argv[0] + \" /home/myuser/mymesh.stl 3\")\n exit(0)\n\n in_mesh = sys.argv[1]\n filename = in_mesh.split('/')[-1].split('.')[0]\n iterate = int(sys.argv[2])\n\n tmp_folder_name = cwd + '/result/'\n 3\n print(\"Input mesh: \" + in_mesh + \" (filename: \" + filename + \")\")\n print(\"Iterate size: \" + str(iterate))\n print(\"Output folder: \" + tmp_folder_name)\n print()\n\n out_mesh = tmp_folder_name + filename + \"_laplacian_{iterate}\".format(iterate=iterate) + \".stl\"\n laplacian_smoothing(in_mesh, out_mesh, iterate)\n\n print()\n print(\"Done Laplacian Smoothing, find the files at: \" + tmp_folder_name)\n\n\n","sub_path":"laplacian_smoothing.py","file_name":"laplacian_smoothing.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610273441","text":"import os\nimport numpy as np\nimport h5py\nfrom utils.utils import save_matv73\nimport glob\n\n\nmat_path0 = './test_results1/'\nmat_path1 = './test_results2/'\nmat_path2 = './test_results3/'\nmat_path3 = './test_results4/'\n\nsave_path = './final_test_results/'\nif not os.path.exists(save_path):\n os.makedirs(save_path)\nmat_path0_name = glob.glob(os.path.join(mat_path0, '*.mat'))\nmat_path1_name = glob.glob(os.path.join(mat_path1, '*.mat'))\nmat_path2_name = glob.glob(os.path.join(mat_path2, '*.mat'))\nmat_path3_name = glob.glob(os.path.join(mat_path3, '*.mat'))\nmat_path0_name.sort()\nmat_path1_name.sort()\nmat_path2_name.sort()\nmat_path3_name.sort()\n\nfor i in range(len(mat_path1_name)):\n hf0 = h5py.File(mat_path0_name[i])\n data0 = hf0.get('cube')\n res0 = np.transpose(np.array(data0), [2, 1, 0])\n\n hf1 = h5py.File(mat_path1_name[i])\n data1 = hf1.get('cube')\n res1 = np.transpose(np.array(data1), [2, 1, 0])\n\n hf2 = h5py.File(mat_path2_name[i])\n data2 = hf2.get('cube')\n res2 = np.transpose(np.array(data2), [2, 1, 0])\n\n hf3 = h5py.File(mat_path3_name[i])\n data3 = hf3.get('cube')\n res3 = np.transpose(np.array(data3), [2, 1, 0])\n\n res = 0.25 * res0 + 0.25 * res1 + 0.25 * res2 + 0.25 * res3\n\n print(mat_path0_name[i].split('/')[-1], mat_path1_name[i].split('/')[-1], mat_path2_name[i].split('/')[-1], mat_path3_name[i].split('/')[-1])\n\n mat_dir = os.path.join(save_path, mat_path1_name[i].split('/')[-1])\n save_matv73(mat_dir, 'cube', res)\n\n\n","sub_path":"AWAN_Clean/test/test_ensemble.py","file_name":"test_ensemble.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27846030","text":"import torch.utils.data as data\n\nfrom PIL import Image\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\ndef find_classes(class_series):\n classes = class_series.unique()\n classes.sort()\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n\ndef make_dataset(df, class_to_idx):\n images = []\n for i, row in df.iterrows():\n if is_image_file(row.Path):\n item = (row.Path, class_to_idx[row.Class])\n images.append(item)\n\n return images\n\n\ndef pil_loader(path):\n return Image.open(path).convert('RGB')\n\n\ndef default_loader(path):\n return pil_loader(path)\n\n\nclass PandasDataset(data.Dataset):\n\n def __init__(self, df, transform=None, target_transform=None,\n loader=default_loader):\n df = df[[\"Class\", \"Path\"]]\n classes, class_to_idx = find_classes(df.Class)\n imgs = make_dataset(df, class_to_idx)\n if len(imgs) == 0:\n raise (RuntimeError(\"Found 0 images in DataFrame\"\n \"Supported image extensions are: \" + \",\".join(IMG_EXTENSIONS)))\n\n self.df = df\n self.imgs = imgs\n self.classes = classes\n self.class_to_idx = class_to_idx\n self.transform = transform\n self.target_transform = target_transform\n self.loader = loader\n\n def __getitem__(self, index):\n path, target = self.imgs[index]\n img = self.loader(path)\n if self.transform is not None:\n img = self.transform(img)\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n","sub_path":"src/scripts/pandas_dataset.py","file_name":"pandas_dataset.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95053838","text":"from pyramid.response import Response\nfrom pyramid.view import view_config, view_defaults\nfrom pyramid.httpexceptions import HTTPFound, HTTPBadRequest\n\nfrom sqlalchemy.exc import DBAPIError\n\nfrom ..models import Question, Answer\n\nfrom collections import deque\nfrom cgi import escape\nfrom random import shuffle\n\nimport os, shutil, uuid, imghdr\n\nQUESTION_DEQUE_LENGTH = 10000\nSUPPORTED_IMG_EXTENSIONS = ('jpeg', 'gif', 'png', 'bmp')\n\n# next question id queue so they don't repeat too often\nquestion_deque = deque(maxlen=QUESTION_DEQUE_LENGTH)\n\n@view_defaults(route_name='home')\nclass TriviaViews:\n def __init__(self, request):\n self.request = request\n\n @view_config(renderer='templates/index.jinja2')\n def index(self):\n return dict()\n\n @view_config(xhr=True, renderer='templates/question.jinja2')\n def get_question(self):\n if not question_deque:\n questions_ids = self.request.dbsession\\\n .query(Question.id).limit(QUESTION_DEQUE_LENGTH).all()\n\n shuffle(questions_ids)\n question_deque.extend(questions_ids)\n q = self.request.dbsession.query(Question).get(question_deque.pop())\n a = q.answers\n streak = self.request.session.get('streak', 0)\n return dict(image=q.get_img, question=q, answers=a, streak=streak)\n\n @view_config(request_method='POST', xhr=True, renderer='json')\n def submit_answer(self):\n try:\n question_id = self.request.POST['q_id']\n answer_id = self.request.POST['answer_id']\n\n answ = self.request.dbsession.query(Answer).get(answer_id)\n q = self.request.dbsession.query(Question).get(question_id)\n except Exception as e:\n raise HTTPBadRequest()\n\n correct_answer = q.get_correct_answer\n\n session = self.request.session\n streak = session.get('streak', 0)\n\n if answ == correct_answer:\n session['streak'] = streak + 1\n return dict(correct_answer=correct_answer.id, streak=streak + 1)\n else:\n session['streak'] = 0\n return dict(correct_answer=correct_answer.id, streak=0)\n\n @view_config(route_name='create', renderer='templates/create.jinja2')\n def create_form(self):\n return dict()\n\n @view_config(route_name='create', renderer='templates/create.jinja2',\n request_method='POST')\n def create_submit(self):\n input_file = self.request.POST['image'].file\n file_extension = imghdr.what(input_file)\n\n if not (file_extension in SUPPORTED_IMG_EXTENSIONS):\n return dict(message=\"Only jpeg, gif, png and bmp files allowed\")\n\n new_filename = '%s.%s' % (uuid.uuid4(), file_extension)\n\n file_path = os.path.join('quicktrivia', 'media', new_filename)\n\n temp_file_path = file_path + '~'\n\n input_file.seek(0)\n with open(temp_file_path, 'wb') as output_file:\n shutil.copyfileobj(input_file, output_file)\n\n os.rename(temp_file_path, file_path)\n\n question_text = escape(self.request.POST['question'])\n q = Question(content=question_text, img_name=new_filename)\n self.request.dbsession.add(q)\n\n answers = (\n escape(self.request.POST['answer-a']),\n escape(self.request.POST['answer-b']),\n escape(self.request.POST['answer-c']),\n escape(self.request.POST['answer-d']),\n )\n\n correct_answer_number = self.request.POST['correct-answer']\n\n for counter, answer in enumerate(answers):\n correct = True if counter == int(correct_answer_number) else False\n self.request.dbsession.add(Answer(question=q, content=answer,\n is_correct=correct))\n\n return dict(message=\"Created question \" + question_text)\n","sub_path":"quicktrivia/views/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"137955972","text":"\n\nfrom xai.brain.wordbase.nouns._banknote import _BANKNOTE\n\n#calss header\nclass _BANKNOTES(_BANKNOTE, ):\n\tdef __init__(self,): \n\t\t_BANKNOTE.__init__(self)\n\t\tself.name = \"BANKNOTES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"banknote\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_banknotes.py","file_name":"_banknotes.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14154760","text":"import fresh_tomatoes\nimport media\n\ntoy_story = media.Movie(\"Toy Story\",\n \"A story of a boy and his toys that come to life\",\n \"http://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg\",\n \"https://www.youtube.com/watch?v=vwyZH85NQC4\")\n\n#print (toy_story.storyline)\n#toy_story.show_trailer()\n\navatar = media.Movie(\"Avatar\",\n \"A marine figting aliens\",\n \"https://www.movieposter.com/posters/archive/main/98/MPW-49433\",\n \"https://www.youtube.com/watch?v=cX0R3mXaod8\")\n#print(avatar.storyline)\n#avatar.show_trailer()\n\nschool_of_rock = media.Movie(\"School of Rock\",\n \"A rock band guitarist becomes teacher at school.\",\n \"http://upload.wikimedia.org/wikipedia/en/1/11/School_of_Rock_Poster.jpg\",\n \"https://www.youtube.com/watch?v=5afGGGsxvEA\")\n\nratatouille = media.Movie(\"Ratatouille\",\n \"A cartoon about a mice that learns to cook\",\n \"http://upload.wikimedia.org/wikipedia/en/5/50/RatatouillePoster.jpg\",\n \"https://www.youtube.com/watch?v=1yKqLNnxGZw\")\n\nmidnight_in_paris = media.Movie(\"Midnight in Paris\",\n \"Poet travels back time\",\n \"http://upload.wikimedia.org/wikipedia/en/9/9f/Midnight_in_Paris_Poster.jpg\",\n \"https://www.youtube.com/watch?v=BYRWfS2s2v4\")\n\n\nhunger_games = media.Movie(\"Hunger Games\",\n \"Strange game of future\",\n \"http://upload.wikimedia.org/wikipedia/en/4/4b/Hunger_Games_Film_Poster.jpg\",\n \"https://www.youtube.com/watch?v=C_Tsj_wTJkQ\")\nmovies = [toy_story, avatar, school_of_rock, ratatouille, midnight_in_paris]\nfresh_tomatoes.open_movies_page(movies)\n","sub_path":"entertainment.py","file_name":"entertainment.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184698387","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-08-18\n# @Author : dhj\n# @Link : http://example.org\n# @Version : $Id$\n\n#检查单据数据异常:零售单超大异常数据\n\nfrom execsqlscript import *\nfrom send_my_qywx import *\nimport datetime\nimport sys\nsys.path.append(\"F:\\\\Python\\\\config\")\nfrom sms_receiver_list import *\nfrom Sql_Script import *\n\n#1、执行脚本\nconnstr = get_conn_oracle_f4()\n\nreceivers = ID_OM_ERP\ncontent = \"\"\nrowlist = []\n\nfor key,value in FILE_F4_BILL_CHECK.items():\n#\tprint(key + ':' + value)\n\texec_oracle_noreturn(connstr, get_sql_script_file(value))\n\nfor key,value in SQL_F4_BILL_QUERY.items():\n\trowlist = exec_oracle_return(connstr, value)\n\t#print(value + ';\\n')\n\tif len(rowlist) > 1:\n\t\t# print(value)\n\t\tcontent = get_str_by_sql_return(rowlist)\n\t\tif key in Remark.keys():\n\t\t\tcontent = Remark[key] + content\n\t\tsend_ky_qywx_text(receivers,content) ","sub_path":"src/Check_WeChat_Qywx_F4_Bill.py","file_name":"Check_WeChat_Qywx_F4_Bill.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379547343","text":"#!/usr/bin/env python\n#\n# Copyright (C) 2017, Hewlett-Packard Development Company\n# Author: Dave Brookshire \n#\n#\nimport time\nimport serial\nimport platform\n\nfrom rhusb.sensor import RHUSB\n\ndelay = 1\ncount = 10\n\nif __name__ == '__main__':\n print(\"Platform: {0}\".format(platform.system()))\n if platform.system() == \"Windows\":\n device = \"COM4\"\n else:\n device = \"/dev/ttyUSB0\"\n print(\"Device: {0}\".format(device))\n print()\n\n try:\n sens = RHUSB(device=device)\n print(\"PA: [{0}]\".format(sens.PA()))\n print(\"C: [{0}]\".format(sens.C()))\n print(\"F: [{0}]\".format(sens.F()))\n print(\"H: [{0}]\".format(sens.H()))\n\n print(\"\\nStarting {0} periodic readings every {1} seconds\".format(count, delay))\n\n while count:\n print(\"--> {0}\".format(sens.PA()))\n count -= 1\n time.sleep(delay)\n\n except serial.serialutil.SerialException:\n print(\"Error: Unable to open RH-USB Serial device {0}.\".format(device))","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497861760","text":"#!/usr/bin/python3\n\"\"\"\n Start Flask Application\n\"\"\"\nfrom models import storage\nfrom flask import Flask, Blueprint, make_response, jsonify\nfrom flask_cors import CORS\nfrom api.v1.views import app_views\nfrom os import getenv\n\napp = Flask(__name__)\nCORS(app, resources={r\"/*\": {\"origins\": \"0.0.0.0\"}})\napp.register_blueprint(app_views)\n\n\n@app.teardown_appcontext\ndef finish(NaN):\n storage.close()\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\nif __name__ == '__main__':\n host = getenv(\"HBNB_API_HOST\", '0.0.0.0')\n port = getenv(\"HBNB_API_PORT\", '5000')\n app.run(host=host, port=port, threaded=True, debug=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568676097","text":"import matplotlib\nfrom maskgen.maskgen_loader import MaskGenLoader\nfrom maskgen.ui.semantic_frame import SemanticFrame\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\nmatplotlib.use(\"TkAgg\")\nimport logging\nfrom matplotlib.figure import Figure\nfrom Tkinter import *\nimport matplotlib.patches as mpatches\nimport ttk\nimport tkMessageBox\nfrom PIL import ImageTk\nfrom maskgen.support import getValue\nfrom maskgen.tool_set import imageResizeRelative, openImage,get_username, GrayBlockOverlayGenerator, compose_overlay_name\nimport os\nimport numpy as np\nimport maskgen.qa_logic\nfrom maskgen.video_tools import get_end_time_from_segment\nimport maskgen.tool_set\nimport random\nimport maskgen.scenario_model\nfrom maskgen.services.probes import ProbeGenerator, DetermineTaskDesignation, fetch_qaData_designation, cleanup_temporary_files\nimport maskgen.validation\nfrom maskgen.tool_set import openFile\nimport webbrowser\nfrom maskgen.graph_meta_tools import MetaDataExtractor\n\n\nclass Chkbox:\n\n def __init__(self, parent, dialog, label=None, command=None, value=False):\n self.value = BooleanVar(value=value)\n self.box = Checkbutton(parent, variable=self.value, command=dialog.check_ok if command is None else command)\n self.label = label\n\n def __nonzero__(self):\n return self.value.get()\n\n def set_value(self, value):\n self.value.set(value=value)\n\n def grid_info(self):\n return self.box.grid_info()\n\n def grid(self):\n self.label.grid()\n self.box.grid()\n\n def grid_remove(self):\n self.box.grid_remove()\n self.label.grid_remove()\n\n\nclass CheckboxGroup:\n \"\"\"\n boxes: list of wrapped Checkboxes\n condition: either 'all'- all checkboxes in the group must be true or 'any'- any true value will return true.\n \"\"\"\n\n def __init__(self, boxes = [], condition = 'all'):\n self.boxes = boxes\n self.condition = condition\n\n def __nonzero__(self):\n if len(self.boxes) == 0:\n return True\n if self.condition == 'any':\n return any(bool(value) for value in self.boxes)\n else:\n return all(bool(value) for value in self.boxes)\n\n def hide_group(self):\n for ck in self.boxes:\n ck.grid_remove()\n\n def show_group(self):\n for ck in self.boxes:\n ck.grid()\n\n def grid_info(self, index = -1):\n \"\"\"\n Get the grid_info of the checkbox at the index. default is last index\n :return:\n \"\"\"\n return self.boxes[index].grid_info() if len(self.boxes) > 0 else {}\n\nclass MannyPage(Frame):\n\n \"\"\"\n Displays mascot with instructions and status information on probe and QA page generation.\n \"\"\"\n checkboxes = CheckboxGroup()\n manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]\n\n def __init__(self, master):\n Frame.__init__(self, master)\n self.statusLabelText = StringVar()\n self.statusLabelText.set('Probes Generating')\n self.heading = Label(self, text=\"Welcome to the QA Wizard. Press Next to begin the QA Process or Quit to stop. This is \"\n \"Manny; He is here to help you analyze the journal. The tool is currently generating the probes. \"\n \"This could take a while. When the next button is enabled you may begin.\",\n wraplength=400)\n self.heading.grid(column=0, row=0, rowspan=2, columnspan=2)\n manny_color = maskgen.tool_set.get_icon('Manny_icon_color.jpg')\n manny_mask = maskgen.tool_set.get_icon('Manny_icon_mask.jpg')\n self.mannyFrame = Frame(self)\n self.mannyFrame.grid(column=0, row=2, columnspan=2)\n self.canvas = Canvas(self.mannyFrame, width=510, height=510)\n self.canvas.pack()\n manny_img = openImage(manny_color)\n manny_img_mask = openImage(manny_mask).to_mask()\n manny_img_mask = imageResizeRelative(manny_img_mask, (500, 500), manny_img_mask.size)\n self.manny = ImageTk.PhotoImage(\n imageResizeRelative(manny_img, (500, 500), manny_img.size).overlay(manny_img_mask,self.manny_colors[\n random.randint(0, len(self.manny_colors) - 1)]).toPIL())\n self.image_on_canvas = self.canvas.create_image(510 / 2, 510 / 2, image=self.manny, anchor=CENTER, tag='things')\n self.statusLabelObject = Label(self, textvariable=self.statusLabelText)\n self.statusLabelObject.grid(column=0, row=3, columnspan=2, sticky=E + W)\n self.canvas.bind(\"\", master.help)\n self.wquit = Button(self, text='Quit', command=master.exitProgram, width=20)\n self.wquit.grid(column=0, row=4, sticky=W, padx=5, pady=5)\n self.wnext = Button(self, text='Next', command=master.nex, state=DISABLED, width=20)\n self.wnext.grid(column=1, row=4, sticky=E, padx=5, pady=5)\n\nclass FinalPage(Frame):\n \"\"\"\n Final QA page, handles comments, final approval.\n \"\"\"\n def __init__(self, master):\n Frame.__init__(self, master)\n row = 0\n col = 0\n self.infolabel = Label(self, justify=LEFT, text='QA Checklist:').grid(row=row, column=col)\n row += 1\n qa_list = [\n 'Base and terminal node images should be the same format. -If the base was a JPEG, the Create JPEG/TIFF option should be used as the last step.',\n 'All relevant semantic groups are identified.']\n self.checkboxes = CheckboxGroup(boxes=[])\n for q in qa_list:\n box_label = Label(self, text=q, wraplength=600, justify=LEFT)\n ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_state())\n ck.box.grid(row=row, column=col)\n ck.label.grid(row=row, column=col + 1, sticky='W')\n self.checkboxes.boxes.append(ck)\n row += 1\n master.checkboxes[master.current_qa_page] = self.checkboxes\n if len(self.master.errors) > 1:\n Label(self, text='Probes were generated with errors. They can be reviewed, but QA cannot be accepted. Check the log for errors.').grid(row=row, column=col+1)\n row += 1\n Label(self, text='QA Signoff: ').grid(row=row, column=col)\n col += 1\n self.reporterStr = StringVar()\n self.reporterStr.set(get_username())\n self.reporterEntry = Entry(self, textvar=self.reporterStr)\n self.reporterEntry.grid(row=row, column=col, columnspan=3, sticky='W')\n row += 2\n col -= 1\n self.acceptButton = Button(self, text='Accept', command=lambda: master.qa_done('yes'), width=15,\n state=DISABLED)\n self.acceptButton.grid(row=row, column=col + 2, columnspan=2, sticky='W')\n self.rejectButton = Button(self, text='Reject', command=lambda: master.qa_done('no'), width=15)\n self.rejectButton.grid(row=row, column=col + 1, columnspan=1, sticky='E')\n self.previButton = Button(self, text='Previous', command=master.pre, width=15)\n self.previButton.grid(row=row, column=col, columnspan=2, sticky='W')\n\n row += 1\n self.commentsLabel = Label(self, text='Comments: ')\n self.commentsLabel.grid(row=row, column=col, columnspan=3)\n row += 1\n textscroll = Scrollbar(self)\n textscroll.grid(row=row, column=col + 4, sticky=NS)\n self.commentsBox = Text(self, height=5, width=100, yscrollcommand=textscroll.set, relief=SUNKEN)\n self.commentsBox.grid(row=row, column=col, padx=5, pady=5, columnspan=3, sticky=NSEW)\n textscroll.config(command=self.commentsBox.yview)\n currentComment = master.parent.scModel.getProjectData('qacomment')\n self.commentsBox.insert(END, currentComment) if currentComment is not None else ''\n\nclass QAPage(Frame):\n \"\"\"\n A standard QA Page, allows review and user validation of probe spatial, temporal aspects\n \"\"\"\n\n #TODO: Refactor to put page data with the page.\n \"\"\"\n subplots = []\n pltdata = []\n successIcon = None\n displays = []\n pathboxes = []\n \"\"\"\n\n def __init__(self, master, link):\n Frame.__init__(self, master=master)\n self.master = master\n self.link = link\n self.checkboxes = CheckboxGroup(boxes=[])\n #Find this probe- could probably do this elsewhere and pass it in.\n self.edgeTuple = tuple(link.split(\"<-\"))\n if len(self.edgeTuple) < 2:\n self.finalNodeName = link.split(\"->\")[1]\n self.edgeTuple = tuple(link.split(\"->\"))\n else:\n self.finalNodeName = None\n if (len(link.split('->'))>1):\n probe = [probe for probe in master.probes if\n probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.finalNodeId in master.lookup[self.edgeTuple[1]]][0]\n else:\n probe = \\\n [probe for probe in master.probes if\n probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.donorBaseNodeId in\n master.lookup[\n self.edgeTuple[1]]][0]\n self.probe = probe\n iFrame = Frame(self)\n c = Canvas(iFrame, width=35, height=35)\n c.pack()\n\n #Success Icon\n img = openImage(maskgen.tool_set.get_icon('RedX.png') if probe.failure else maskgen.tool_set.get_icon('check.png'))\n self.successIcon = ImageTk.PhotoImage(imageResizeRelative(img, (30, 30), img.size).toPIL())\n c.create_image(15, 15, image=self.successIcon, anchor=CENTER, tag='things')\n\n #Layout\n row = 0\n col = 0\n self.optionsLabel = Label(self, text=self.link, font=(None, 10))\n self.optionsLabel.grid(row=row, columnspan=3, sticky='EW', padx=(40, 0), pady=10)\n iFrame.grid(column=0, row=0, columnspan=1, sticky=W)\n row += 1\n self.operationVar = StringVar(value=\"Operation [ Semantic Groups ]:\")\n self.operationLabel = Label(self, textvariable=self.operationVar, justify=LEFT)\n self.semanticFrame = SemanticFrame(self)\n self.semanticFrame.grid(row=row + 1, column=0, columnspan=2, sticky=N + W, rowspan=1, pady=10)\n row += 2\n #cImageFrame is used for plot, image and overlay\n self.cImgFrame = ttk.Notebook(self)\n self.cImgFrame.bind('<>', lambda a: self.frameMove())\n self.cImgFrame.grid(row=row, rowspan=8)\n self.descriptionVar = StringVar()\n self.descriptionLabel = Label(self, textvariable=self.operationVar, justify=LEFT)\n row += 8\n self.operationLabel.grid(row=row, columnspan=3, sticky='W', padx=10)\n row += 1\n textscroll = Scrollbar(self)\n textscroll.grid(row=row, column=col + 1, sticky=NS)\n self.commentBox = Text(self, height=5, width=80, yscrollcommand=textscroll.set, relief=SUNKEN)\n self.master.commentsBoxes[self.link] = self.commentBox\n self.commentBox.grid(row=row, column=col, padx=5, pady=5, columnspan=1, rowspan=2, sticky=NSEW)\n textscroll.config(command=self.commentBox.yview)\n col = 3\n row = 0\n scroll = Scrollbar(self)\n scroll.grid(row=row, column=col + 2, rowspan=5, columnspan=1, sticky=NS)\n\n self.pathList = Listbox(self, width=30, yscrollcommand=scroll.set, selectmode=EXTENDED, exportselection=0)\n self.pathList.grid(row=row, column=col - 1, rowspan=5, columnspan=3, padx=(30, 10), pady=(20, 20))\n self.master.pathboxes[self] = self.semanticFrame.getListbox()\n scroll.config(command=self.pathList.yview)\n self.transitionVar = StringVar()\n\n edge = master.scModel.getGraph().get_edge(probe.edgeId[0], probe.edgeId[1])\n self.operationVar.set(self.operationVar.get() + master._compose_label(edge))\n master.edges[self] = [edge, self.semanticFrame.getListbox()]\n for sg in edge['semanticGroups'] if 'semanticGroups' in edge else []:\n self.semanticFrame.insertListbox(ANCHOR, sg)\n operation = master.scModel.getGroupOperationLoader().getOperationWithGroups(edge['op'])\n\n #QA checkboxes\n if operation.qaList is not None:\n args = getValue(edge, 'arguments', {})\n self.curOpList = [x for x in operation.qaList]\n for item_pos in range(len(self.curOpList)):\n item = self.curOpList[item_pos]\n try:\n self.curOpList[item_pos] = item.format(**args)\n except:\n pass\n else:\n self.curOpList = []\n row += 5\n if self.curOpList is None:\n master.qaData.set_qalink_status(self.link, 'yes')\n\n for q in self.curOpList:\n box_label = Label(self, text=q, wraplength=250, justify=LEFT)\n ck = Chkbox(parent=self, dialog=master, label=box_label, value=master.qaData.get_qalink_status(link=link))\n ck.box.grid(row=row, column=col - 1)\n ck.label.grid(row=row, column=col, columnspan=4, sticky='W')\n self.checkboxes.boxes.append(ck)\n row += 1\n master.checkboxes[self] = self.checkboxes\n\n # Main Features- load the overlay for images, load plot graph & overlay page for videos\n if ('<-' in self.link and probe.donorVideoSegments is None) or probe.targetVideoSegments is None:\n self.load_overlay(initialize=True)\n else:\n self.transitionString(None)\n self.setUpFrames()\n\n #Comment section\n currentComment = master.qaData.get_qalink_caption(self.link)\n self.commentBox.delete(1.0, END)\n self.commentBox.insert(END, currentComment if currentComment is not None else '')\n\n #Navigation Buttons\n self.acceptButton = Button(self, text='Next', command=master.nex, width=15)\n self.acceptButton.grid(row=12, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))\n self.prevButton = Button(self, text='Previous', command=master.pre, width=15)\n self.prevButton.grid(row=12, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))\n\n self.acceptnButton = Button(self, text='Next Unchecked', command=master.nexCheck, width=15)\n self.acceptnButton.grid(row=13, column=col + 2, columnspan=2, sticky='E', padx=(20, 20))\n self.prevnButton = Button(self, text='Previous Unchecked', command=master.preCheck, width=15)\n self.prevnButton.grid(row=13, column=col - 1, columnspan=2, sticky='W', padx=(20, 20))\n row = 14\n #Progress Bar\n pb = ttk.Progressbar(self, orient='horizontal', mode='determinate', maximum=100.0001)\n pb.grid(row=row, column=0, sticky=EW, columnspan=8)\n pb.step(master.progress * 100)\n\n master.progressBars.append(pb)\n\n def setUpFrames(self):\n \"\"\"\n Lays out inner display for video temporal and spatial review\n :return:\n \"\"\"\n displays = [TemporalReviewDisplay(self)]\n if any(segment.filename != None for segment in self.probe.targetVideoSegments):\n displays.append(SpatialReviewDisplay(self))\n self.checkboxes.boxes.append(CheckboxGroup(boxes=[d.checkbox for d in displays], condition='any'))\n self.master.pageDisplays[self] = [0, displays]\n\n def _add_to_listBox(self, box, string):\n if len(string) < 20:\n box.insert(END, string)\n return 1\n box.insert(END, string[0:15]+\"...\")\n box.insert(END, \" \" + string[max(15-int(len(string)),-10):])\n return 2\n\n def transitionString(self, probeList):\n tab = \" \"\n current = 0\n c = 0\n if self.finalNodeName == None:\n self._add_to_listBox(self.pathList, self.edgeTuple[1])\n self.pathList.insert(END, 2*tab + \"|\")\n self.pathList.insert(END, tab + \"Donor\")\n self.pathList.insert(END, 2*tab + \"|\")\n self.pathList.insert(END, 2*tab + \"V\")\n self._add_to_listBox(self.pathList, self.edgeTuple[0])\n self.pathList.select_set(6)\n return self.edgeTuple[0] + \"\\n|Donor|\\nV\\n\" + self.edgeTuple[1]\n self._add_to_listBox(self.pathList,self.master.backs[self.finalNodeName][0].start)\n for p in self.master.backs[self.finalNodeName]:\n edge = self.master.scModel.getGraph().get_edge(p.start, p.end)\n self.pathList.insert(END, 2 * tab + \"|\")\n c += self._add_to_listBox(self.pathList, edge['op'])\n self.pathList.insert(END, 2 * tab + \"|\")\n self.pathList.insert(END, 2 * tab + \"V\")\n c += 3\n c += self._add_to_listBox(self.pathList, self.master.getFileNameForNode(p.end))\n if self.master.getFileNameForNode(p.end) == self.edgeTuple[0]:\n current = c\n\n self.pathList.selection_set(current)\n self.pathList.see(max(0,current-5))\n return \"\"\n\n def load_overlay(self, initialize):\n \"\"\"\n Lays out display for spatial overlay for image probes\n :param initialize:\n :return:\n \"\"\"\n edgeTuple = self.edgeTuple\n message = 'final image'\n if (len(self.link.split('->')) > 1):\n probe = [probe for probe in self.master.probes if\n probe.edgeId[1] in self.master.lookup[self.edgeTuple[0]] and probe.finalNodeId in self.master.lookup[\n self.edgeTuple[1]]][0]\n n = self.master.scModel.G.get_node(probe.finalNodeId)\n finalFile = os.path.join(self.master.scModel.G.dir,\n self.master.scModel.G.get_node(probe.finalNodeId)['file'])\n final = openImage(finalFile)\n finalResized = imageResizeRelative(final, (500, 500), final.size)\n imResized = imageResizeRelative(probe.targetMaskImage, (500, 500),\n probe.targetMaskImage.size if probe.targetMaskImage is not None else finalResized.size)\n\n\n else:\n message = 'donor'\n probe = \\\n [probe for probe in self.master.probes if probe.edgeId[1] in self.master.lookup[edgeTuple[0]] and probe.donorBaseNodeId in self.master.lookup[edgeTuple[1]]][0]\n final, final_file = self.master.scModel.G.get_image(probe.donorBaseNodeId)\n finalResized = imageResizeRelative(final, (500, 500), final.size)\n imResized = imageResizeRelative(probe.donorMaskImage, (500, 500),\n probe.donorMaskImage.size if probe.donorMaskImage is not None else finalResized.size)\n\n edge = self.master.scModel.getGraph().get_edge(probe.edgeId[0],probe.edgeId[1])\n\n if initialize is True:\n self.c = Canvas(self.cImgFrame, width=510, height=510)\n self.c.pack()\n self.transitionString(None)\n try:\n finalResized = finalResized.overlay(imResized)\n except IndexError:\n tex = self.c.create_text(250,250,width=400,font=(\"Courier\", 20))\n self.c.itemconfig(tex, text=\"The mask of link {} did not match the size of the {}.\".format(self.link, message))\n return\n self.master.photos[self.link] = ImageTk.PhotoImage(finalResized.toPIL())\n self.image_on_canvas = self.c.create_image(255, 255, image=self.master.photos[self.link], anchor=CENTER, tag='imgc')\n\n def frameMove(self):\n \"\"\"\n change pages on inner display for videos\n :return:\n \"\"\"\n if self in self.master.pageDisplays:\n displays = self.master.pageDisplays[self][1]\n d_index = self.cImgFrame.index('current')\n displays[d_index].checkbox.grid()\n for display in displays:\n if display != displays[d_index]:\n display.checkbox.grid_remove()\n\n\n def scrollplt(self, *args):\n \"\"\"\n Handle scrolling function on temporal review graph.\n :param args:\n :return:\n \"\"\"\n if (args[0] == 'moveto'):\n na = self.master.pltdata[self]\n end = na[-1]\n total = end[3]-end[2] + 20000\n curframe = self.master.subplots[self].get_children()[1].xaxis.get_view_interval()\n space = curframe[1]-curframe[0]\n total *= float(args[1])\n self.master.subplots[self].get_children()[1].xaxis.set_view_interval(total, total + space, ignore=True)\n self.master.subplots[self].canvas.draw()\n elif (args[0] == 'scroll'):\n self.master.subplots[self].get_children()[1].xaxis.pan(int(args[1]))\n self.master.subplots[self].canvas.draw()\n\n def cache_designation(self):\n \"\"\"\n Cache the QA validation of probe designation.\n :return:\n \"\"\"\n self.master.check_ok()\n displays = self.master.pageDisplays[self][1] if self in self.master.pageDisplays else []\n if len(displays) > 0:\n validation = {'temporal': bool(displays[0].checkbox), 'spatial': bool(displays[1].checkbox) if len(displays) > 1 else False}\n elegibility = [key for key in validation.keys() if validation[key] == True]\n designation = '-'.join(elegibility) if len(elegibility) else 'detect'\n else:\n designation = self.probe.taskDesignation\n self.master.qaData.set_qalink_designation(self.link, designation)\n\nclass DummyPage(Frame):\n def __init__(self, master, labeltext = ''):\n Frame.__init__(self, master=master)\n self.mainlabel = Label(self, text= labeltext)\n self.mainlabel.pack()\n self.nextButton = Button(self, text='NEXT', command=master.nex)\n self.nextButton.pack()\n\n\nclass SpatialReviewDisplay(Frame):\n \"\"\"\n The spatial review display for video\n \"\"\"\n\n def __init__(self, page):\n Frame.__init__(self, master=page.cImgFrame, height=500,width=50)\n page.cImgFrame.add(self, text='Spatial')\n self.dialog = self.winfo_toplevel()\n #Add Checkbox for spatial review\n checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}\n chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5\n chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4\n spatial_box_label = Label(master=page, text='Spatial Overlay Correct?', wraplength=250, justify=LEFT)\n self.checkbox = Chkbox(parent=page, dialog=page.master, label=spatial_box_label, command=page.cache_designation,\n value=page.master.qaData.get_qalink_designation(page.link) is not None)\n self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col -1)\n self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')\n self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function\n\n if (len(page.link.split('->')) > 1):\n probe = [probe for probe in page.master.probes if\n probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.finalNodeId in\n page.master.lookup[page.edgeTuple[1]]][0]\n else:\n probe = \\\n [probe for probe in page.master.probes if\n probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]] and probe.donorBaseNodeId in\n page.master.lookup[\n page.edgeTuple[1]]][0]\n\n if probe.targetVideoSegments is not None:\n to = os.path.join(self.dialog.scModel.get_dir(),probe.finalImageFileName)\n overlay_file = compose_overlay_name(target_file=to, link=page.link)\n total_range = (probe.targetVideoSegments[0].starttime/1000, probe.targetVideoSegments[-1].endtime/1000)\n\n self.buttonText = StringVar()\n\n self.buttonText.set(value=('PLAY: ' if os.path.exists(overlay_file) else 'GENERATE: ') + os.path.split(overlay_file)[1])\n self.playbutton = Button(master=self, textvariable=self.buttonText,\n command=lambda: self.openOverlay(probe=probe,\n target_file=to,\n overlay_path=overlay_file))\n self.playbutton.grid(row=0, column=0, columnspan=2, sticky='W')\n self.range_label = Label(master=self, text='Range: ' + '{:.2f}'.format(total_range[0]) + 's - ' + '{:.2f}'.format(total_range[1]) + 's')\n self.range_label.grid(row=0, column= 3, columnspan = 1, sticky='W')\n\n def openOverlay(self, probe=None, target_file = '', overlay_path=''):\n if not os.path.exists(overlay_path):\n GrayBlockOverlayGenerator(locator=self.dialog.meta_extractor.getMetaDataLocator(probe.edgeId[0]),\n segments=probe.targetVideoSegments,\n target_file=target_file, output_file=overlay_path).generate()\n self.buttonText.set('PLAY: ' + os.path.split(overlay_path)[1])\n openFile(overlay_path)\n\nclass TemporalReviewDisplay(Frame):\n \"\"\"\n The temporal review display for video\n \"\"\"\n\n def __init__(self, page):\n Frame.__init__(self, master=page.cImgFrame)\n page.cImgFrame.add(self, text='Temporal')\n # Add Checkbox for spatial review\n checkbox_info = page.checkboxes.boxes[-1].grid_info() if len(page.checkboxes.boxes) > 0 else {}\n chkboxes_row = int(checkbox_info['row']) + 1 if len(checkbox_info) > 0 else 5\n chkboxes_col = int(checkbox_info['column']) + 1 if len(checkbox_info) > 0 else 4\n temporal_box_label = Label(master=page, text='Temporal data correct?', wraplength=250, justify=LEFT)\n self.checkbox = Chkbox(parent=page, dialog=page.master, label=temporal_box_label, command=page.cache_designation,\n value=page.master.qaData.get_qalink_designation(page.link) is not None)\n self.checkbox.box.grid(row=chkboxes_row, column=chkboxes_col - 1)\n self.checkbox.label.grid(row=chkboxes_row, column=chkboxes_col, columnspan=4, sticky='W')\n self.checkbox.grid_remove() #hide for now, Will be gridded by the frameMove function\n\n ps = [mpatches.Patch(color=\"red\", label=\"Target Video\"),\n mpatches.Patch(color=\"blue\", label=\"Current Manipulations\"),\n mpatches.Patch(color=\"green\", label=\"Other Manipulations\")]\n data = []\n f = Figure(figsize=(6, 4), dpi=100)\n subplot = f.add_subplot(111)\n subplot.legend(handles=ps, loc=8)\n prolist = []\n maxtsec = 0\n\n for probe in page.master.probes:\n maxtsec = max(maxtsec, probe.max_time())\n if (page.finalNodeName == None):\n if probe.donorBaseNodeId is not None and page.master.getFileNameForNode(probe.donorBaseNodeId) == \\\n page.edgeTuple[1]:\n prolist.append(probe)\n else:\n if (page.master.getFileNameForNode(probe.finalNodeId) == page.edgeTuple[1]):\n prolist.append(probe)\n try:\n tsec = get_end_time_from_segment(\n page.master.meta_extractor.getMetaDataLocator(page.master.lookup[page.edgeTuple[1]][0]).getMaskSetForEntireVideo(\n media_types=probe.media_types())[0]) / 1000.0\n except Exception as ex:\n logging.getLogger(\"maskgen\").error(ex.message)\n logging.getLogger(\"maskgen\").error(\n \"{} Duration could not be found the length displayed in the graph is incorrect\".format(\n page.edgeTuple[1]))\n tsec = maxtsec\n ytics = []\n ytic_lbl = []\n count = 0\n high = 0\n low = tsec * 1000 + 20000\n for probe in prolist:\n count += 1\n col = 2\n cur = False\n if (probe.edgeId[1] in page.master.lookup[page.edgeTuple[0]]):\n col = 1\n cur = True\n if page.finalNodeName == None:\n for mvs in probe.donorVideoSegments if probe.donorVideoSegments is not None else []:\n data.append([count, col, mvs.starttime, mvs.endtime])\n if cur:\n high = max(high, mvs.endtime)\n low = min(low, mvs.starttime)\n subplot.text(mvs.starttime - 100, count - 0.5, \"F:\" + str(int(mvs.startframe)),\n {'size': 10})\n subplot.text(mvs.endtime + 100, count - 0.5, \"F:\" + str(int(mvs.endframe)), {'size': 10})\n subplot.text(mvs.starttime - 100, count - 0.20, \"T:\" + str(int(mvs.starttime)),\n {'size': 10})\n subplot.text(mvs.endtime + 100, count - 0.20, \"T:\" + str(int(mvs.endtime)), {'size': 10})\n else:\n for mvs in probe.targetVideoSegments if probe.targetVideoSegments is not None else []:\n data.append([count, col, mvs.starttime, mvs.endtime])\n if cur:\n high = max(high, mvs.endtime)\n low = min(low, mvs.starttime)\n subplot.text(mvs.starttime, count - 0.5, \"F:\" + str(int(mvs.startframe)), {'size': 10})\n subplot.text(mvs.endtime, count - 0.5, \"F:\" + str(int(mvs.endframe)), {'size': 10})\n subplot.text(mvs.starttime, count - 0.20, \"T:\" + str(int(mvs.starttime)), {'size': 10})\n subplot.text(mvs.endtime, count - 0.20, \"T:\" + str(int(mvs.endtime)), {'size': 10})\n ytics.append(count)\n ytic_lbl.append(str(page.master.abreive(probe.edgeId[0])))\n\n color_mapper = np.vectorize(lambda x: {0: 'red', 1: 'blue', 2: 'green'}.get(x))\n data.append([count + 1, 0, 0.0, tsec * 1000.0])\n ytics.append(count + 1)\n ytic_lbl.append(page.master.abreive(page.edgeTuple[1]))\n numpy_array = np.array(data)\n subplot.hlines(numpy_array[:, 0], numpy_array[:, 2], numpy_array[:, 3], color_mapper(numpy_array[:, 1]),\n linewidth=10)\n subplot.set_yticks(ytics)\n subplot.set_yticklabels(ytic_lbl)\n subplot.set_xlabel('Time in Milliseconds')\n subplot.grid()\n i = subplot.yaxis.get_view_interval()\n if (i[1] - i[0] < 10):\n i[0] = i[1] - 8\n subplot.yaxis.set_view_interval(i[0], i[1])\n i = subplot.xaxis.get_view_interval()\n if (i[1] - i[0] > 2000):\n i[0] = low - 1000\n i[1] = high + 1000\n subplot.xaxis.set_view_interval(i[0], i[1])\n page.master.pltdata[page] = numpy_array\n canvas = Canvas(self, height=50, width=50)\n imscroll = Scrollbar(self, orient=HORIZONTAL)\n imscroll.grid(row=1, column=0, sticky=EW)\n imscroll.config(command=page.scrollplt)\n fcanvas = FigureCanvasTkAgg(f, master=canvas)\n fcanvas.draw()\n fcanvas.get_tk_widget().grid(row=0, column=0)\n fcanvas._tkcanvas.grid(row=0, column=0)\n canvas.grid(row=0, column=0)\n canvas.config(height=50, width=50)\n page.master.subplots[page] = f\n\nclass QAProjectDialog(Toplevel):\n \"\"\"\n Host window for QA pages\n \"\"\"\n manny_colors = [[155, 0, 0], [0, 155, 0], [0, 0, 155], [153, 76, 0], [96, 96, 96], [204, 204, 0], [160, 160, 160]]\n\n def __init__(self, parent):\n self.parent = parent\n self.scModel = parent.scModel\n self.meta_extractor = MetaDataExtractor(parent.scModel.getGraph())\n self.probes = None\n Toplevel.__init__(self, parent)\n self.type = self.parent.scModel.getEndType()\n self.pages = []\n self.current_qa_page = None\n self.checkboxes = {} #Checkboxes, keyed by page\n self.backs = {}\n self.lookup = {}\n self.subplots ={}\n self.pltdata = {}\n self.backsProbes={}\n self.photos = {}\n self.commentsBoxes = {}\n self.edges = {}\n self.qaList = []\n self.pathboxes = {}\n self.qaData = maskgen.qa_logic.ValidationData(self.scModel)\n self.resizable(width=False, height=False)\n self.progressBars = []\n self.narnia = {}\n self.pageDisplays = {} #Frames that go inside pages, keyed by page.\n self.valid = False\n self.mannypage = MannyPage(self)\n self.switch_frame(self.mannypage)\n self.lastpage = None #Assigned in generate Pages\n self.pages.append(self.mannypage)\n self.getProbes()\n if self.probes is None:\n self.mannypage.statusLabelText.set('Probe Generation failed. Please consult logs for more details.')\n self.parent.update()\n else:\n self.errors = [p for p in self.probes if p.failure]\n if len(self.errors) > 0:\n self.mannypage.statusLabelText.set('Probes Complete with errors. Generating Preview Pages.')\n else:\n self.mannypage.statusLabelText.set('Probes Complete. Generating Preview Pages.')\n self.generate_pages()\n\n def getProbes(self):\n try:\n generator = ProbeGenerator(\n scModel=self.scModel,\n processors=[\n DetermineTaskDesignation(\n scModel=self.scModel,\n inputFunction=fetch_qaData_designation)])\n\n self.probes = generator(saveTargets=False, keepFailures=True)\n except Exception as e:\n logging.getLogger('maskgen').error(str(e))\n self.probes = None\n\n def getFileNameForNode(self, nodeid):\n try:\n fn = self.scModel.getFileName(nodeid)\n if fn not in self.lookup:\n self.lookup[fn] = []\n if nodeid not in self.lookup[fn]:\n self.lookup[fn].append(nodeid)\n except TypeError:\n fn = None\n logging.getLogger('maskgen').warn(\"Unable to locate File for node with Id {}\".format(nodeid))\n return fn\n\n def pre(self):\n self.move(-1,False)\n\n def nex(self):\n self.move(1, False)\n\n def exitProgram(self):\n self.destroy()\n cleanup_temporary_files(probes=self.probes, scModel=self.scModel)\n\n def help(self,event):\n URL = MaskGenLoader.get_key(\"apiurl\")[:-3] + \"journal\"\n webbrowser.open_new(URL)\n\n def generate_pages(self):\n self.crit_links = ['->'.join([self.getFileNameForNode(p.edgeId[1]), self.getFileNameForNode(p.finalNodeId)]) for\n p in self.probes] if self.probes else []\n self.crit_links = list(set(self.crit_links))\n\n self.finNodes = []\n for x in range(0, len(self.crit_links)):\n for y in range(x, len(self.crit_links)):\n link1 = self.crit_links[x]\n link2 = self.crit_links[y]\n fin1 = link1.split(\"->\")[1]\n fin2 = link2.split(\"->\")[1]\n self.finNodes.append(fin2)\n if (fin1 > fin2):\n self.crit_links[x] = self.crit_links[y]\n self.crit_links[y] = link1\n self.finNodes = list(set(self.finNodes))\n for end in self.finNodes:\n for node in self.lookup[end]:\n if node in self.scModel.finalNodes():\n break\n self.backs[end] = []\n next = self.getPredNode(node)\n while next != None:\n node = next.start\n self.backs[end].append(next)\n next = self.getPredNode(node)\n self.backs[end].reverse()\n\n donors = ['<-'.join([self.getFileNameForNode(p.edgeId[1]), self.getFileNameForNode(p.donorBaseNodeId)]) for p in\n self.probes if\n p.donorMaskImage is not None or p.donorVideoSegments is not None] if self.probes else []\n donors = set(sorted(donors))\n self.crit_links.extend([x for x in donors])\n count = 0.0\n for k in self.qaData.keys():\n count += 1 if self.qaData.get_qalink_status(k) == 'yes' else 0\n self.progress = count / len(self.crit_links) if len(self.crit_links) != 0 else 0.99999\n count = 1\n for link in self.crit_links:\n self.pages.append(QAPage(master=self, link=link))\n count += 1\n self.lastpage = FinalPage(self)\n self.pages.append(self.lastpage)\n self.mannypage.statusLabelText.set('Preview Pages Complete. Press Next to Continue.')\n self.mannypage.wnext.config(state=NORMAL)\n\n\n def validategoodtimes(self):\n v = self.scModel.validate()\n if maskgen.validation.core.hasErrorMessages(v, lambda x: True):\n self.valid = False\n tkMessageBox.showerror(\"Validation Errors!\",\"It seems this journal has unresolved validation errors. \"\n \"Please address these and try again. Your QA progress will be saved.\")\n else:\n self.valid = True\n self.check_ok()\n\n def abreive(self,str):\n if (len(str)>10):\n return(str[:5]+ \"...\\n\" + str[-6:])\n else:\n return str\n\n def _add_to_listBox(self, box, string):\n if len(string) < 20:\n box.insert(END, string)\n return 1\n box.insert(END, string[0:15]+\"...\")\n box.insert(END, \" \" + string[max(15-int(len(string)),-10):])\n return 2\n\n def _compose_label(self,edge):\n op = edge['op']\n if 'semanticGroups' in edge and edge['semanticGroups'] is not None:\n groups = edge['semanticGroups']\n op += ' [' + ', '.join(groups) + ']'\n self.descriptionVar = edge['description']\n return op\n\n def nexCheck(self):\n self.findNextUnchecked()\n\n def preCheck(self):\n self.findNextUnchecked()\n\n def switch_frame(self, frame):\n if self.current_qa_page != None:\n self.current_qa_page.grid_forget()\n self.current_qa_page = frame\n self.current_qa_page.grid()\n\n def findNextUnchecked(self):\n try:\n unchecked = next(page for page in self.pages if not bool(page.checkboxes))\n except StopIteration:\n return None\n if unchecked != self.current_qa_page:\n self.switch_frame(unchecked)\n\n def move(self, dir, checked):\n\n if self.current_qa_page in self.edges.keys():\n self.edges[self.current_qa_page][0]['semanticGroups'] = self.edges[self.current_qa_page][1].get(0, END)\n\n finish = True\n if self.current_qa_page in self.checkboxes.keys():\n for box in self.checkboxes[self.current_qa_page].boxes:\n if bool(box) is False:\n finish = False\n break\n\n #caching in qaData\n ind = self.pages.index(self.current_qa_page)\n step = 0\n if 0<=ind-1Climate_scenario[i][0] and x<=Climate_scenario[i][1]):\n Climate_ratio.append([Climate_scenario[i][0],Climate_scenario[i][0]+Climate_scenario[i][1]])\n else :\n Climate_ratio.append([Climate_scenario[i][0]+Climate_scenario[i][1],1])\n\nfor i in range(1,5,1):\n for j in range(3):\n ecdf=ECDF(DataArray_month[i][j+3]) #(+3) -> April~June\n plt.figure()\n plt.plot(ecdf.x, ecdf.y, 'ko', label=\"Original Noised Data\")\n# plt.plot(ecdf.x, func(ecdf.x, *popt), 'r-', label=\"Fitted Curve\")\n plt.legend()\n plt.show()\n\n for k in range(num_of_days[j]):\n IsZero=random.random()\n if IsZero<=IszeroRate[i-1][j+3]: #(+3) -> April~June\n DataExport[i].append(0)\n else:\n y=random.random()*(Climate_ratio[j][1]-Climate_ratio[j][0])+Climate_ratio[j][0]\n p=0\n while ecdf.y[p] capacity:\n raise CheckError(\"Total capacity \"+str(total_capacity)+\" exceeds mouth capacity of \"+str(capacity)+\".\")\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Check Hamster solution\")\n parser.add_argument('--capacity', type=int, help=\"Mouth capacity\")\n parser.add_argument('--instance', help=\"Instance file\")\n parser.add_argument('--reference', help=\"Reference solution file\")\n parser.add_argument('checkfile', help=\"Check solution file\")\n \n args=parser.parse_args()\n \n try:\n instance = parse_instance(args.instance)\n\n weight = get_weight(args.reference)\n \n check_solution(instance, args.checkfile, weight, args.capacity)\n except IOError as e:\n print( \"I/O error({0}): {1}\".format(e.errno, e.strerror) )\n exit(-1)\n except CheckError as e:\n print( \"Check failed! \" + e.value )\n exit(-1)\n","sub_path":"A3/Hamster-check.py","file_name":"Hamster-check.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"78859697","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def oddEvenList(self, head: ListNode) -> ListNode:\n if head==None or head.next==None:\n return head\n\n # 初始化奇数节点的链表、头结点\n odd_root=head\n odd_node=odd_root\n\n # 初始化偶数数节点的链表、头结点\n even_root=head.next\n even_node=even_root\n\n head=head.next.next\n\n index=3\n while head!=None:\n if index %2==0:\n even_node.next=head\n even_node=even_node.next\n else:\n odd_node.next=head\n odd_node=odd_node.next\n index +=1\n head=head.next\n odd_node.next=even_root\n even_node.next=None\n return odd_root","sub_path":"chap8. 双指针法/328. 奇偶链表/oddEvenList.py","file_name":"oddEvenList.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225400347","text":"from django.shortcuts import render, get_object_or_404, get_list_or_404\nfrom django.views.generic import ListView, DetailView, TemplateView\nfrom taggit.models import Tag\nfrom django.template import RequestContext\nfrom . import models\n\n\nclass Home(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n c = super(Home, self).get_context_data(**kwargs)\n return c\n\n\ndef get_breadcrumbs(page):\n breadcrumbs = []\n while page:\n breadcrumbs.append(page)\n page = page.parent\n return reversed(breadcrumbs)\n\n\ndef category(request, path, instance):\n\n breadcrumbs = get_breadcrumbs(instance)\n\n return render(\n request,\n 'articles/articles_list.html',\n {\n 'category': instance,\n 'breadcrumbs': breadcrumbs\n }\n )\n\n\nclass ArticleDetailView(DetailView):\n model = models.Articles\n\n def get_context_data(self, **kwargs):\n context = super(ArticleDetailView, self).get_context_data(**kwargs)\n context['breadcrumbs'] = get_breadcrumbs(self.object.id_section)\n return context\n\n\ndef tag_view(request, tag):\n obj = get_object_or_404(Tag, slug=tag)\n\n return render(\n request,\n 'articles/tag_list.html',\n {\n 'tag': obj,\n 'news': models.Articles.objects.filter(tags=obj)\n }\n )\n","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541015146","text":"# 使い方:\r\n# python lower_nabcc.py <変換元ファイル名>\r\n# 変換後のファイル名は「変換元ファイル名」に「.blt.txt」を付けたものになります。\r\n# .bltは「点字編集システム」で保存するときの「ぶれいるライト」形式の拡張しで、この形式で保存すると、小文字NABCCになるので。\r\n\r\nimport sys\r\nif len(sys.argv) == 1:\r\n print('変換元ファイルを指定してください')\r\n quit()\r\nelse:\r\n upper_nabcc_file = open(sys.argv[1], 'r')\r\n lower_nabcc_file = open(sys.argv[1] + '.blt.txt', 'w')\r\n for upper_nabcc in upper_nabcc_file:\r\n lower_nabcc = upper_nabcc.lower()\r\n lower_nabcc_file.write(lower_nabcc.translate(str.maketrans('\\\\][@^', '|}{`~')))\r\n upper_nabcc_file.close()\r\n lower_nabcc_file.close()\r\n\r\n","sub_path":"lower_nabcc.py","file_name":"lower_nabcc.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"288700718","text":"import os\nimport time\nimport getpass\ndef rhel_initial():\n os.system(\"clear\")\n print(\"\\n \\t\\t\\tWelcome to the Red Hat Linux Management Console\\n\")\n print(\"\\n\\t\\t **---> Press <---**\\n\")\n time.sleep(1)\n print(\"\\n\\t 1 -> To see date & calender \")\n time.sleep(1)\n print(\"\\n\\t 2 -> To create directory \")\n time.sleep(1)\n print(\"\\n\\t 3 -> To see your memory details \")\n time.sleep(1)\n print(\"\\n\\t 4 -> To see your Hard Disk details\")\n time.sleep(1)\n print(\"\\n\\t 5 -> To setup webserver \")\n time.sleep(1)\n print(\"\\n\\t 6 -> To check connection \")\n time.sleep(1)\n print(\"\\n\\t 7 -> To change to root user \")\n time.sleep(1)\n print(\"\\n\\t 8 -> To install any software \")\n time.sleep(1)\n print(\"\\n\\t 9 -> To start any service or daemon \")\n time.sleep(1)\n print(\"\\n\\t 10 -> To Reboot your machine\")\n time.sleep(1)\n print(\"\\n\\t 11 -> To shut down your machine \")\n time.sleep(1)\n print(\"\\n\\t 12 -> To exit from this console/ Get back to main dashboard \")\n time.sleep(1)\n return\nrhel_initial()\nwhile True :\n# rhel_initial()\n print(\"\\n\\t\\t Enter your choice :\", end=\"\")\n ch = int(input())\n if ch == 1:\n print(\"\\n \\t\\t Press enter to continue ..\\n.\") \n input()\n os.system(\"\\n\\t\\t date\")\n time.sleep(1)\n os.system(\"\\n\\t\\t cal\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\") \n input()\n rhel_initial()\n elif ch == 2:\n print(\"\\n\\t\\t\\t Enter the name of the directory with full path..\\n \", end=\"\")\n dir = input()\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n os.system(\"mkdir {o}\".format(dir))\n time.sleep(1)\n print(\"\\n\\t Successfully created..\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 3:\n time.sleep(1)\n print(\"\\n\\t\\t\\t Your Memory details is shown below :\\n\")\n os.system(\"free -m\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 4:\n time.sleep(1)\n print(\"\\n\\t\\t\\t Your HD details is shown below :\\n\")\n os.system(\"df -h\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 5:\n print(\"\\n\\t\\t Press enter after entering the code of Web Page ..\")\n time.sleep(1)\n os.system(\"yum install httpd -y\")\n os.system(\"cat > /var/www/html/index.html\")\n os.system(\"setenforce 0\")\n os.system(\"systemctl start httpd\")\n print(\"ALL DONE\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 6:\n time.sleep(1)\n print(\"\\n\\t\\t Pinging to 8.8.8.8..\\n\")\n os.system(\"ping -c 4 8.8.8.8\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 7:\n time.sleep(1)\n print(\"\\n\\t\\t Enter your Root password..(if asks)\\n\")\n os.system(\"sudo su - root\")\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 8:\n time.sleep(1)\n print(\"\\n\\t\\t Please enter the name of software you want :\")\n time.sleep(1)\n os.system(\"yum install {0} -y\".format(sw))\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 9:\n time.sleep(1)\n print(\"\\n\\t\\t\\ Please enter the name of service you want to start :\")\n os.system(\"systemctl start {0}\".format(daemon))\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n rhel_initial()\n elif ch == 10:\n time.sleep(1)\n print(\"\\n\\t\\t Rebooting your system...\\n\")\n time.sleep(1)\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n os.system(\"init 6\")\n elif ch == 11:\n time.sleep(1)\n print(\"\\n\\t\\t Shutting down your RHEL 8..\\n\")\n time.sleep(1)\n print(\"\\n \\t\\t Press enter to continue ..\\n.\")\n input()\n os.system(\"init 0\")\n elif ch == 12:\n time.sleep(1)\n print(\"\\n\\t\\t Byee BYee... Going to the main dashboard\")\n time.sleep(2)\n exit()\n else:\n time.sleep(1)\n print(\"\\n\\n\\t\\t Invalid Choice... Try Again\\n\")\n time.sleep(1)\n rhel_initial()\n","sub_path":"linux.py","file_name":"linux.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"251244546","text":"# -*- encoding: utf-8 -*-\n\nfrom openerp.addons.web import http\nfrom openerp.addons.web.controllers.main import Home, db_monodb, login_and_redirect\n\nfrom openerp.modules.registry import RegistryManager\nfrom openerp import SUPERUSER_ID\n\nfrom werkzeug.wrappers import Response\nimport re\n\nimport logging\nlog = logging.getLogger(__name__)\n\nclass SSO(Home):\n \n def get_sso_config(self, req, dbname):\n \"\"\" \n Retrieve the module config (which features are enabled) for the SSO LemonLDAP auto connection\n \"\"\"\n registry = RegistryManager.get(dbname)\n \n config = {\n 'enabled': False\n }\n \n with registry.cursor() as cr:\n icp = registry.get('ir.config_parameter')\n \n forwarded = icp.get_param(cr, SUPERUSER_ID, 'auth_lemonldap.forwarded')\n log.debug('icp forwarded: %s, type: %s', forwarded, type(forwarded))\n forwarded = re.split(r\" *, *\", forwarded) if type(forwarded) == unicode else []\n \n config = {\n 'enabled': icp.get_param(cr, SUPERUSER_ID, 'auth_lemonldap.enabled') == 'True',\n 'lemon_secret': icp.get_param(cr, SUPERUSER_ID, 'auth_lemonldap.secret'),\n 'forwarded': forwarded,\n }\n \n return config\n \n def get_lemon_params(self, req):\n \"\"\"\n Retrieve lemon header parameters\n \"\"\"\n headers = req.httprequest.headers\n return {\n 'forwarded': headers.get('X-Forwarded-Host'),\n 'db': headers.get('OpenERP-Database') if type(headers.get('OpenERP-Database')) == str else None,\n 'user_id': headers.get('OpenERP-User-Id'),\n 'username': headers.get('OpenERP-User-Login'),\n 'secret': headers.get('OpenERP-Secret-Key')\n }\n \n def get_current_db(self, req, db):\n \"\"\"\n Retrieve the database to use, from Lemon header, URI parameter or the default one\n \"\"\"\n db = db if db and len(db) > 0 else db_monodb(req)\n if not db:\n raise Exception('Can not found a database to use...')\n return db\n \n def get_current_session(self, req):\n \"\"\"\n Get the current session if any\n \"\"\"\n # FIXME: don't ask me why, but the req.session is not correct, and to get the real one, we have to check in the cookie and clean up \n # the dirty key, thanks OpenERP... \n cookie = req.httprequest.cookies.get(\"instance0|session_id\") or None\n session_id = cookie.replace(\"%22\",\"\") if cookie else None\n return req.httprequest.session.get(session_id) if session_id else None\n\n \n @http.httprequest\n def index(self, req, s_action=None, db=None, **kw):\n \"\"\"\n If LemonLDAP module is activated, auto login the user and keep him connected automatically\n Warning: Security is not managed on OpenERP anymore, OpenERP has to be behind a proxy and not\n accessible from outside !\n \"\"\"\n \n # handle 500 error manually, to retrieve error as a string, not a json object\n try:\n lemon_params = self.get_lemon_params(req)\n \n log.debug('Lemon > db: %s, user_id: %s, username: %s, secret: %s', lemon_params['db'], lemon_params['user_id'], lemon_params['username'], lemon_params['secret'])\n \n db = self.get_current_db(req, lemon_params['db'] or db)\n config = self.get_sso_config(req, db) \n \n # only process if the module is activated\n if config['enabled']:\n \n log.debug('forwarded > header: %s, config: %s', lemon_params['forwarded'], config['forwarded'])\n \n # some basic security check to identify LemonLDAP and the proxy\n if lemon_params['forwarded'] not in config['forwarded']:\n raise Exception('OpenERP is not behind a proxy or the forwarded domain is wrong') \n \n if config['lemon_secret'] != lemon_params['secret']:\n raise Exception('LemonLDAP secret is not the same than secret configured on OpenERP !') \n \n log.debug(\"db_monodb(req): %s, db: %s\", db_monodb(req), db)\n \n session = self.get_current_session(req)\n \n if db_monodb(req) != db:\n log.info('force re authenticate user %s', lemon_params['username'])\n \n # force the db retrieved from request header\n req.params.update({'db': db })\n url = '/?db=%s' % db\n \n # login with a fake password, the security check has been disabled on res.users model\n return login_and_redirect(req, db, lemon_params['username'], 'nopassword', redirect_url=url)\n \n if not session or str(session._uid) != lemon_params['user_id']:\n log.info('auto-authenticate user %s', lemon_params['username'])\n # login with a fake password, the security check has been disabled on res.users model\n return login_and_redirect(req, db, lemon_params['username'], 'nopassword')\n \n log.info('user %s already authenticated', lemon_params['username'])\n except Exception as e:\n body = \"

    OpenERP - LemonLDAP Authorization Error

    %s

    \" % str(e)\n return Response(body, status=500, headers=[('Content-Type', 'text/html'), ('Content-Length', len(body))])\n \n return super(SSO, self).index(req, s_action, db, **kw)\n \n \n @http.httprequest\n def lemonldap_logout(self, req, s_action=None, db=None, **kw):\n \"\"\"\n Custom page used to logout user from lemonldap\n \"\"\"\n body = \"

    OpenERP - LemonLDAP Logout Page

    You will be redirected to LemonLDAP portail...

    \"\n return Response(body, status=200, headers=[('Content-Type', 'text/html'), ('Content-Length', len(body))])\n \n \n @http.httprequest\n def login(self, req, db, login, key):\n \"\"\"\n If LemonLDAP module is activated, this method should never be called, so raise an exception in this case\n \"\"\"\n # handle 500 error manually, to retrieve error as a string, not a json object\n try:\n lemon_params = self.get_lemon_params(req)\n db = self.get_current_db(req, lemon_params['db'] or db)\n config = self.get_sso_config(req, db) \n \n # only process if the module is activated\n if config['enabled']:\n raise Exception('Login is managed automatically with LemonLDAP, this method should never be called !')\n \n except Exception as e:\n body = \"

    OpenERP - LemonLDAP Authorization Error

    %s

    \" % str(e)\n return Response(body, status=500, headers=[('Content-Type', 'text/html'), ('Content-Length', len(body))])\n \n return super(SSO, self).login(req, db, login, key)\n","sub_path":"auth_lemonldap/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520268927","text":"#coding: utf-8\n\nfrom __future__ import unicode_literals\nfrom apps.admin.views import *\nurls = [\n (r\"\", AdminHandler),\n (r'auditUser/$', AuditUserHandler),\n (r'auditLink/$', AuditLinksHandler),\n (r'links$', AdminLinksHandler),\n (r'ads$', AdminAdsHandler),\n (r'upload/$', UploadHandler),\n]","sub_path":"apps/admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"291500143","text":"import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport math\nimport csv\nfrom pandas import DataFrame\nsys.path.append('../../models')\nsys.path.append('../../data')\nfrom rw_data import readMatData\nfrom train_basic_nn_model import nn_model\nfrom sklearn.metrics import mean_squared_error\nfrom keras.models import save_model, load_model\nfrom data_operations import calculateRMSE, calculatePhysicalLossDensityDepth\n\nfrom rw_data import readMatData, saveMatData\n#read data\nmat = readMatData(os.path.abspath('../../../data/processed/mendota_sampled.mat'))\nx = mat['Xc_doy'][:,0:11]\ny = mat['Modeled_temp_tuned'][:,0]\ndepths = mat['Depth']\ndates = mat['datenums']\nudates = np.sort(mat['udates'])\n\n#################################################################\n# this script runs an experiment for predicting GLM output using\n# a basic feed forward 3 layer ANN. it saves the RMSE and depth \n# inconsistency for a variety of training sizes\n##############################################################\n\n\nn = np.shape(mat['Xc_doy'])[0]\nn10 = n-math.ceil(np.shape(mat['Xc_doy'])[0]*.10)\nn15 = n-math.ceil(np.shape(mat['Xc_doy'])[0]*.15)\nn20 = n-math.ceil(np.shape(mat['Xc_doy'])[0]*.2)\nn25 = n-math.ceil(np.shape(mat['Xc_doy'])[0]*.25)\nn33 = n-math.ceil(np.shape(mat['Xc_doy'])[0]*.33)\nn50 = n-math.ceil(np.shape(mat['Xc_doy'])[0]*.50)\nn66 = n- math.ceil(np.shape(mat['Xc_doy'])[0]*.66)\n\ny = mat['Modeled_temp'][:,0]\nx = mat['Xc_doy'][:,0:11]\nnvec = [n66,n50,n33,n25,n20, n15, n10]\nNN_PHYS_LOSS = np.empty(shape=len(nvec))\nNN_RMSE = np.empty(shape=len(nvec))\nn_trials = 2\n\n# for i in range(0,len(perc_train)):\n# \tfor t in range\n# model = modelvec[i]\n# n = nvec[i]\n# NN_RMSE[i] = math.sqrt(mean_squared_error(model.predict(x[n:,:]),y[n:]))\n# print(model.predict(x).shape, depths.shape, udates.shape, days.shape)\n# NN_PHYS_LOSS[i] = calculatePhysicalLossDensityDepth(model.predict(x), depths, udates, days)\n\ntrain_percs = [.33, .5, .66, .75, .8, .85, .9]\n\nwith open('ANN_results.csv', 'w') as csvfile:\n\tfieldnames = ['train_percent','mean_rmse', 'std_rmse', 'mean_physloss', 'std_physloss']\n\twriter = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\twriter.writeheader()\n\tfor i in range(0,len(train_percs)):\n\t\tRMSE = np.empty(n_trials)\n\t\tPHYS_LOSS = np.empty(n_trials)\n\t\tx_tst = x[(1-nvec[i]):,:]\n\t\ty_tst = y[(1-nvec[i]):]\n\t\tfor t in range(0,n_trials):\n\t\t\tprint(\"trial \", t)\n\t\t\tmodel = nn_model(x, y, epochs=5,validation_split=(1-train_percs[i]))\n\t\t\tpred_tst = model.predict(x_tst)\n\t\t\tRMSE[t] = math.sqrt(mean_squared_error(pred_tst,y_tst))\n\t\t\tprint(\"RMSE, \",RMSE[t])\n\t\t\tpred = model.predict(x)\n\t\t\tPHYS_LOSS[t] = calculatePhysicalLossDensityDepth(pred, depths, udates, dates)\n\t\t\tprint(\"PHYSLOSS, \",PHYS_LOSS[t])\n\t #avg +std\n\t\tmean_rmse = np.mean(RMSE, dtype=np.float64)\n\t\tstd_rmse = np.std(RMSE, dtype=np.float64)\n\t\tprint(mean_rmse, \":\", std_rmse)\n\t\tmean_phys = np.mean(PHYS_LOSS, dtype=np.float64)\n\t\tstd_phys = np.std(PHYS_LOSS, dtype=np.float64)\n\t\twriter.writerow({'train_percent':train_percs[i],\n\t\t\t\t\t\t 'mean_rmse': mean_rmse, 'std_rmse': std_rmse, \n\t\t\t\t\t\t 'mean_physloss': mean_phys, 'std_physloss': std_phys}) \n","sub_path":"src/scripts/one-off/nn_train.py","file_name":"nn_train.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"548036676","text":"import matplotlib.pyplot as plt\nfrom bezier._py_curve_helpers import get_curvature, evaluate_hodograph\nfrom bezier.curve import Curve\nfrom more_itertools import *\nimport pint\nimport numpy as np\nimport math as m\n\nu = pint.UnitRegistry()\nu.setup_matplotlib()\n\n\ndef curvature_at_t(curve, t):\n curve, units = curve\n tangent_vec = evaluate_hodograph(t, curve.nodes)\n curvature = get_curvature(curve.nodes, tangent_vec, t)\n return curvature * u.radians / units\n\n\ndef length_at_t(curve, t):\n curve, units = curve\n curve_to_t = curve.specialize(0, t)\n return curve_to_t.length * units\n\n\ndef calc_accelerations(d):\n (last_time, next_time), (last_velocity, next_velocity) = d\n\n delta_time = next_time - last_time\n delta_velocity = next_velocity - last_velocity\n acceleration = delta_velocity / delta_time\n\n center_time = (next_time + last_time) / 2\n\n return center_time, acceleration\n\n\ndef map_units(f, i):\n results_with_units = list(map(f, i))\n results_without_units = list(map(lambda x: x.magnitude, results_with_units))\n return np.array(results_without_units) * results_with_units[0].units\n\n\ndef extract_units(i):\n results_with_units = list(i)\n results_without_units = list(map(lambda x: x.magnitude, results_with_units))\n return np.array(results_without_units) * results_with_units[0].units\n\n\ndef acceleration_curve(fig, wheelbase: float, wheel_radius: float, mass: float, curve, linear_velocity: float):\n ss = np.linspace(0, 1, 1000)\n\n ls = map_units(lambda t: length_at_t(curve, t), ss)\n\n ts = ls / linear_velocity\n\n curvatures = map_units(lambda t: curvature_at_t(curve, t), ss)\n\n angular_velocities = map_units(lambda curvature: curvature * linear_velocity, curvatures)\n\n left_velocities = map_units(lambda angular_velocity: linear_velocity - angular_velocity * wheelbase / 2, angular_velocities)\n right_velocities = map_units(lambda angular_velocity: linear_velocity + angular_velocity * wheelbase / 2, angular_velocities)\n\n left_acc_ts, left_accelerations = unzip(map(calc_accelerations, zip(windowed(ts, 2), windowed(left_velocities, 2))))\n\n left_acc_ts = extract_units(left_acc_ts)\n left_accelerations = extract_units(left_accelerations)\n\n right_acc_ts, right_accelerations = unzip(\n map(calc_accelerations, zip(windowed(ts, 2), windowed(right_velocities, 2))))\n\n right_acc_ts = extract_units(right_acc_ts)\n right_accelerations = extract_units(right_accelerations)\n\n left_angular_acceleration = left_accelerations / (wheel_radius / u.radian)\n right_angular_acceleration = right_accelerations / (wheel_radius / u.radian)\n\n rotational_inertia = mass * wheel_radius**2 / 2\n\n left_torque = rotational_inertia * left_angular_acceleration\n right_torque = rotational_inertia * right_angular_acceleration\n\n (ax1, ax2), (ax3, ax4) = fig.subplots(2, 2)\n\n ts.ito_base_units()\n curvatures.ito_base_units()\n angular_velocities.ito_base_units()\n left_velocities.ito_base_units()\n right_velocities.ito_base_units()\n left_accelerations.ito_base_units()\n right_accelerations.ito_base_units()\n left_acc_ts.ito_base_units()\n right_acc_ts.ito_base_units()\n left_angular_acceleration.ito_base_units()\n right_angular_acceleration.ito_base_units()\n left_torque.ito(u.newton * u.meter)\n right_torque.ito(u.newton * u.meter)\n\n print(max(left_torque))\n\n ax1.plot(left_acc_ts, left_torque)\n ax1.plot(right_acc_ts, right_torque)\n ax2.plot(ts, curvatures)\n ax2l = ax2.twinx()\n ax2l.plot(ts, angular_velocities)\n ax4.plot(ts, left_velocities)\n ax4.plot(ts, right_velocities)\n ax3.plot(left_acc_ts, left_accelerations)\n ax3.plot(right_acc_ts, right_accelerations)\n ax3l = ax3.twinx()\n ax3l.plot(left_acc_ts, left_angular_acceleration)\n ax3l.plot(right_acc_ts, right_angular_acceleration)\n\n\ndef corner_curve(r, offset):\n units = r.units\n r = r.magnitude\n\n r1 = r * 0.5\n r2 = r * 0.3\n\n nodes = [\n [0, 0, 0, r2, r1, r + offset.to(units).magnitude],\n [r - offset.magnitude, r1, r2, 0, 0, 0]\n ]\n\n return Curve(nodes, degree=5), units\n\n\nfig1, ax1 = plt.subplots(1, 1)\nfig2 = plt.figure()\n\nc = corner_curve(90 * u.mm, 12 * u.mm)\n\nc[0].plot(1000, ax=ax1)\n\nacceleration_curve(fig2, 72 * u.mm, 16 * u.mm, 87 * u.g, c, 0.4 * u.m / u.s)\n\nplt.show()\n","sub_path":"software/micromouse_analysis/motor_requirements.py","file_name":"motor_requirements.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"537305893","text":"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nPROJECT_NAME = 'agronom'\n\nDEBUG = True\nPRODUCTION_MODE = True # Includes several optimizations in cost of some code validation, must be True if not ELK\n\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.gis',\n 'django.contrib.sites',\n\n # 3rd-party libs\n 'rest_framework',\n 'rest_framework_gis',\n 'rest_framework.authtoken',\n # https://www.django-rest-framework.org/api-guide/authentication/\n 'django_filters',\n 'django_prometheus',\n 'cacheops',\n 'django_pickling',\n 'drf_yasg',\n 'leaflet',\n\n # Our apps\n 'core',\n 'customuser',\n 'cadastral',\n 'fields',\n 'weather',\n 'ndvi',\n 'operations'\n]\n\n\nMIDDLEWARE = [\n 'django_prometheus.middleware.PrometheusBeforeMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django_prometheus.middleware.PrometheusAfterMiddleware',\n]\n\nROOT_URLCONF = f'{PROJECT_NAME}.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, 'templates')],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = f'{PROJECT_NAME}.wsgi.application'\n\nSITE_ID = 1\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticroot/')\n\nSTATICFILES_DIRS = (\n [os.path.join(BASE_DIR, 'static')]\n)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\nMEDIA_URL = '/media/'\n","sub_path":"agronom/agronom/settings/components/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"284486007","text":"import urllib\nimport requests\nimport pandas as pd\nimport numba\nimport numpy as np\nimport warnings\nimport json\nimport ast\nimport strax\n\nimport sys\nif any('jupyter' in arg for arg in sys.argv):\n # In some cases we are not using any notebooks,\n # Taken from 44952863 on stack overflow thanks!\n from tqdm import tqdm_notebook as tqdm\nelse:\n from tqdm import tqdm\n\nfrom straxen import uconfig\nexport, __all__ = strax.exporter()\n\n\n@export\nclass SCADAInterface:\n\n def __init__(self, context=None):\n \"\"\"\n Interface to excess the XENONnT slow control data via python.\n\n :param context: Context you are using e.g. st. This is needed\n if you would like to query data via run_ids.\n \"\"\"\n try:\n self.SCData_URL = uconfig.get('scada', 'scdata_url')\n self.SCLastValue_URL = uconfig.get('scada', 'sclastvalue_url')\n self.SCADA_SECRETS = dict(QueryType=uconfig.get('scada', 'querytype'),\n username=uconfig.get('scada', 'username'),\n api_key=uconfig.get('scada', 'api_key')\n )\n except ValueError as e:\n raise ValueError(f'Cannot load SCADA information, from your xenon'\n ' config. SCADAInterface cannot be used.') from e\n \n try:\n # Better to cache the file since is not large:\n with open(uconfig.get('scada', 'pmt_parameter_names')) as f:\n self.pmt_file = json.load(f)\n except (FileNotFoundError, ValueError):\n warnings.warn(('Cannot load PMT parameter names from parameter file.' \n ' \"find_pmt_names\" is disabled for this session.'))\n self.pmt_file = None\n try: \n with open(uconfig.get('scada', 'parameter_readout_rate')) as f:\n self.read_out_rates = json.load(f)\n except (FileNotFoundError, ValueError) as e:\n raise FileNotFoundError(\n 'Cannot load file containing parameter sampling rates.') from e\n\n self.context = context\n\n def get_scada_values(self,\n parameters,\n start=None,\n end=None,\n run_id=None,\n time_selection_kwargs=None,\n interpolation=False,\n filling_kwargs=None,\n down_sampling=False,\n every_nth_value=1):\n \"\"\"\n Function which returns XENONnT slow control values for a given\n set of parameters and time range.\n\n The time range can be either defined by a start and end time or\n via the run_id, target and context.\n\n :param parameters: dictionary containing the names of the\n requested scada-parameters. The keys are used as identifier\n of the parameters in the returned pandas.DataFrame.\n :param start: int representing the start time of the interval\n in ns unix time.\n :param end: same as start but as end.\n :param run_id: Id of the run. Can also be specified as a list or\n tuple of run ids. In this case we will return the time\n range lasting between the start of the first and endtime\n of the second run.\n :param time_selection_kwargs: Keyword arguments taken by\n st.to_absolute_time_range(). Default: {\"full_range\": True}\n :param interpolation: Boolean which decided to either forward\n fill empty values or to interpolate between existing ones.\n :param filling_kwargs: Kwargs applied to pandas .ffill() or\n .interpolate().\n :param down_sampling: Boolean which indicates whether to\n donw_sample result or to apply average. The averaging\n is deactivated in case of interpolated data.\n :param every_nth_value: Defines over how many values we compute\n the average or the nth sample in case we down sample the\n data.\n :return: pandas.DataFrame containing the data of the specified\n parameters.\n \"\"\"\n if not filling_kwargs:\n filling_kwargs = {}\n\n if not time_selection_kwargs:\n time_selection_kwargs = {'full_range': True}\n\n if not isinstance(parameters, dict):\n mes = 'The argument \"parameters\" has to be specified as a dict.'\n raise ValueError(mes)\n\n if np.all((run_id, self.context)):\n # User specified a valid context and run_id, so get the start\n # and end time for our query:\n if isinstance(run_id, (list, tuple)):\n run_id = np.sort(run_id) # Do not trust the user's\n start, _ = self.context.to_absolute_time_range(run_id[0], **time_selection_kwargs)\n _, end = self.context.to_absolute_time_range(run_id[-1], **time_selection_kwargs)\n else:\n start, end = self.context.to_absolute_time_range(run_id, **time_selection_kwargs)\n elif run_id:\n mes = ('You are trying to query slow control data via run_ids' \n ' but you have not specified the context you are '\n 'working with. Please set the context either via '\n '.st = YOURCONTEXT, or when initializing the '\n 'interface.')\n raise ValueError(mes)\n\n if not np.all((start, end)):\n # User has not specified any valid start and end time\n mes = ('You have to specify either a run_id and context.'\n ' E.g. call get_scada_values(parameters, run_id=run)'\n ' or you have to specify a valid start and end time '\n 'in utc unix time ns.')\n raise ValueError(mes)\n\n now = np.datetime64('now')\n if (end // 10**9) > now.astype(np.int64):\n mes = ('You are asking for an endtime which is in the future,'\n ' I may be written by a physicist, but I am neither self-'\n 'aware nor can I predict the future like they can. You '\n f'asked for the endtime: {end // 10**9} but current utc '\n f'time is {now.astype(np.int64)}. I will return for the values for the '\n 'corresponding times as nans instead.')\n warnings.warn(mes)\n\n self._test_sampling_rate(parameters)\n\n # Now loop over specified parameters and get the values for those.\n for ind, (k, p) in tqdm(enumerate(parameters.items()), total=len(parameters)):\n temp_df = self._query_single_parameter(start, end,\n k, p,\n every_nth_value=every_nth_value,\n interpolation=interpolation,\n filling_kwargs=filling_kwargs,\n down_sampling=down_sampling\n )\n\n if ind:\n m = np.all(df.loc[:, 'time'] == temp_df.loc[:, 'time'])\n mes = ('This is odd somehow the time stamps for the query of'\n f' {p} does not match the other time stamps.')\n assert m, mes\n df = pd.concat((df, temp_df[k]), axis=1)\n else:\n df = temp_df\n\n # Adding timezone information and rename index:\n df.set_index('time', inplace=True)\n df = df.tz_localize(tz='UTC')\n df.index.rename('time UTC', inplace=True)\n\n if (end // 10**9) > now.astype(np.int64):\n df.loc[now:, :] = np.nan\n\n return df\n\n def _test_sampling_rate(self, parameters):\n \"\"\"\n Function which test if the specified parameters share all the\n same sampling rates. If not they cannot be put into a single\n DataFrame and an error is raised.\n\n :param parameters: input parameter names.\n \"\"\"\n # Check if queried parameters share the same readout rate if not raise error:\n for rate, parameter_names in self.read_out_rates.items():\n if not hasattr(parameter_names, '__iter__'):\n parameter_names = [parameter_names]\n # Loop over different readout rates. If they belong to the same readout rate...\n input_parameter_names = np.array([v for v in parameters.values()])\n m = np.isin(input_parameter_names, parameter_names)\n\n if not (np.all(m) or np.all(~m)):\n # ...either all parameters are true or false.\n same_rate = input_parameter_names[m]\n not_same_rate = input_parameter_names[~m]\n raise ValueError(('Not all parameters of your inquiry share the same readout rates. '\n f'The parameters {same_rate} are read out every {rate} seconds while '\n f'{not_same_rate} are not. For the your and the developers sanity please make '\n 'two separate inquiries.'))\n\n if np.all(m):\n # Yes all parameters share the same readout rate:\n self.readout_rate = int(rate)\n self.base = 0\n else:\n self.readout_rate = None\n\n def _query_single_parameter(self,\n start,\n end,\n parameter_key,\n parameter_name,\n interpolation,\n filling_kwargs,\n down_sampling,\n every_nth_value=1):\n \"\"\"\n Function to query the values of a single parameter from SCData.\n\n :param start: Start time in ns unix time\n :param end: End time in ns unix time\n :param parameter_key: Key to identify queried parameter in the\n DataFrame\n :param parameter_name: Parameter name in Scada/historian database.\n :param every_nth_value: Defines over how many values we compute\n the average or the nthed sample in case we down sample the\n data.\n\n :returns: DataFrame with a time and parameter_key column.\n \"\"\"\n if every_nth_value < 1:\n mes = (\"SCADA takes only values every second. Cannot ask for a\"\n \" higher sampling rate than one value per second. However\"\n f\" you asked for one value every {every_nth_value} seconds.\")\n raise ValueError(mes)\n if not isinstance(every_nth_value, int):\n raise ValueError('\"value_every_seconds\" must be an int!')\n\n # First we have to create an array where we can fill values with\n # the sampling frequency of scada:\n seconds = np.arange(start, end + 1, 10**9) # +1 to make sure endtime is included\n df = pd.DataFrame()\n df.loc[:, 'time'] = seconds\n df['time'] = df['time'].astype(' 1:\n if interpolation and not down_sampling:\n warnings.warn('Cannot use interpolation and running average at the same time.'\n ' Deactivated the running average, switch to down_sampling instead.')\n down_sampling = True\n\n if down_sampling:\n df = df[::every_nth_value]\n else:\n nt, nv = _average_scada(df['time'].astype(np.int64).values,\n df[parameter_key].values,\n every_nth_value)\n df = pd.DataFrame()\n df['time'] = nt.astype('=(len(seg)):\n break\n i+=1\n # print(lis)\n points.append(lis[-1])\n return points\n \n\nif __name__ == '__main__':\n n=int(input())\n seg=[]\n \n for i in range(n):\n seg.append(list(map(int,input().split(\" \"))))\n points = optimal_points(seg)\n \n print(len(points))\n for i in range(len(points)):\n print(points[i],end=\" \")\n","sub_path":"covering_segments.py","file_name":"covering_segments.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"571820686","text":"import pandas as pd\r\nimport pickle\r\n\r\nwith open('document_wordcount_total_df.pickle', 'rb') as handle:\r\n df = pickle.load(handle)\r\n\r\n\r\ndrop_rows = []\r\nfor i, row in df.iterrows():\r\n document_frequency = 0\r\n for entry in row:\r\n if entry > 0:\r\n document_frequency += 1\r\n if document_frequency < 14:\r\n drop_rows.append(i)\r\n elif '\\t' in i or 'Domestic' in i or 'Budget' in i or 'Gross' in i or 'Release' in i or 'boxoffice' in i\\\r\n or 'Rating' in i or 'www' in i or 'Runtime' in i or 'Total' in i or 'movie' in i:\r\n drop_rows.append(i)\r\n# remove all \\t\r\ndf = df.drop(drop_rows)\r\nvocabulary = list(df.index)\r\nprint(df.head())\r\nprint(vocabulary)\r\nprint(len(vocabulary))\r\n\r\n# make each column vector a unit vector to normalize\r\n\r\nwith open('document_wordcount_df.pickle', 'wb') as handle:\r\n pickle.dump(df, handle)\r\nwith open('vocabulary_clean.pickle', 'wb') as handle:\r\n pickle.dump(vocabulary, handle)","sub_path":"clean_dataframe.py","file_name":"clean_dataframe.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517611278","text":"class Queue:\n def __init__(self):\n self.queue = []\n self.size = 0\n\n def is_empty(self):\n return self.size == 0\n \n def enqueue(self,data):\n self.queue.insert(0,data)\n self.size += 1\n \n def _len(self):\n return self.size\n\n def dequeue(self):\n if self.is_empty():\n return\n self.size -= 1\n return self.queue.pop()\n\n def peek(self):\n return self.queue[-1].data\n\n\n\n\nclass Node:\n def __init__(self,data):\n self.data = data\n self.left = None\n self.right = None\n\nclass Tree:\n def __init__(self,data):\n self.root = Node(data)\n\n def level_order(self,start):\n if start is None:\n return\n queue = Queue()\n queue.enqueue(start)\n traversal = \"\"\n while queue._len() > 0:\n traversal += str(queue.peek())+\"-\"\n node = queue.dequeue()\n if node.left:\n queue.enqueue(node.left)\n if node.right:\n queue.enqueue(node.right)\n return traversal\n\n\n\n\n\ntree = Tree(1)\ntree.root.left = Node(2)\ntree.root.right = Node(3)\ntree.root.left.left = Node(4)\ntree.root.left.right = Node(5)\nprint(tree.level_order(tree.root))\n \n \n","sub_path":"level_order.py","file_name":"level_order.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"478261634","text":"# Copyright (c) 2015 App Annie Inc. All rights reserved.\n\nfrom tests.qa.base import BaseCMTSyncTestCase\nfrom tests.qa.cases.cmt_sync.utility import PrepareContractItemsMixin\nfrom tests.qa.constants.constants import Stores, UsageCategories, Devices\nfrom tests.qa.constants.saleforces_constants import CustomerFields, ContractFields, ContractItemPPFields, \\\n ContractItemPYDFields, ContractItemUsageFields, InMethodology\nfrom tests.qa.utils.cmt_utils import generate_sf_id\nfrom webanalytics.cmt.sync.base import find_by_sf_id\nfrom webanalytics.cmt.models import Customer, Contract, ContractItem\nfrom webanalytics.cmt.sync.salesforce import Login\nfrom tests.qa.services.intelligence.cmt_sync_service import (\n sync_customer, sync_contract, sync_usage, sync_package_pricing, sync_pick_your_data)\n\n\nclass DeleteCustomerSyncTests(BaseCMTSyncTestCase, Login, PrepareContractItemsMixin):\n\n def c35417_delete_customer_without_contract_sync_test(self):\n customer_id = generate_sf_id()\n data = [{\n CustomerFields.id: customer_id,\n }]\n\n sync_customer(data)\n customer = find_by_sf_id(Customer, customer_id)\n sync_customer([], [customer])\n customer_new = find_by_sf_id(Customer, customer_id)\n self.assertIsNone(customer_new)\n\n def c35418_delete_for_1_contract_sync_test(self):\n\n customer_id = generate_sf_id()\n data = [{\n CustomerFields.id: customer_id,\n }]\n\n sync_customer(data)\n customer = find_by_sf_id(Customer, customer_id)\n\n contract_id = generate_sf_id()\n contract_data = [{\n ContractFields.id: contract_id,\n ContractFields.account: {\n ContractFields.id: customer_id,\n }\n }]\n\n sync_contract(contract_data)\n contract = find_by_sf_id(Contract, contract_id)\n self.assertEqual(contract.sf_id, contract_id)\n\n contract_item_id = generate_sf_id()\n pp_data = [{\n ContractItemPPFields.id: contract_item_id,\n ContractItemPPFields.device: 'Android',\n ContractItemPPFields.pricebookEntry: {\n ContractItemPPFields.PricebookEntry.product2: {\n ContractItemPPFields.PricebookEntry.Product2.package_Type: u'Basic',\n ContractItemPPFields.PricebookEntry.Product2.store: u'Google Play',\n ContractItemPPFields.PricebookEntry.Product2.name: u'Google Play Basic'\n }\n },\n ContractItemPPFields.opportunityId: contract_id\n }, ]\n\n sync_package_pricing(pp_data)\n contract_item = find_by_sf_id(ContractItem, contract_item_id)\n self.assertEqual(contract_item.sf_id, contract_item_id)\n\n pyd_id = generate_sf_id()\n pyd_data = [\n {\n ContractItemPYDFields.id: pyd_id,\n ContractItemPYDFields.device: u'Android',\n ContractItemPYDFields.countries: u'United States;Germany',\n ContractItemPYDFields.pricebookEntry: {\n ContractItemPYDFields.PricebookEntry.product2: {\n ContractItemPYDFields.PricebookEntry.Product2.package_Type: u'Premium',\n ContractItemPYDFields.PricebookEntry.Product2.store: u'Google Play',\n ContractItemPYDFields.PricebookEntry.Product2.name: u'PYA Google Play Premium'\n }\n },\n ContractItemPYDFields.app_ID: u'1000600000001802;1000600000001967; 1000600000001793; 1000600000001787',\n ContractItemPYDFields.publisher_ID: u'1000200000004849; 1000200000000122; 1000200000016561',\n ContractItemPYDFields.opportunityId: contract_id\n }\n ]\n\n sync_pick_your_data(pyd_data)\n pyp_contract_item = find_by_sf_id(ContractItem, pyd_id)\n self.assertEqual(pyp_contract_item.sf_id, pyd_id)\n\n usage_id = generate_sf_id()\n usage_data = [{\n ContractItemUsageFields.id: usage_id,\n ContractItemUsageFields.device: u'Android',\n ContractItemUsageFields.methodology: InMethodology.projected,\n ContractItemUsageFields.categories: UsageCategories.get_category(UsageCategories.ALL_GAME_CATEGORY),\n ContractItemUsageFields.pricebookEntry: self.compose_pricebook_usage_entry(Devices.ANDROID),\n ContractItemUsageFields.opportunityId: contract_id\n }]\n sync_usage(usage_data)\n usage_contract_items = find_by_sf_id(ContractItem, usage_id)\n self.assertEqual(usage_contract_items.sf_id, usage_id)\n\n sync_customer([], [customer])\n customer_new = find_by_sf_id(Customer, customer_id)\n self.assertIsNone(customer_new)\n\n contract_new = find_by_sf_id(Contract, contract_id)\n self.assertIsNone(contract_new)\n contract_items_new = find_by_sf_id(ContractItem, contract_item_id)\n self.assertIsNone(contract_items_new)\n usage_contract_items_new = find_by_sf_id(ContractItem, usage_id)\n self.assertIsNone(usage_contract_items_new)\n pyp_contract_item_new = find_by_sf_id(ContractItem, pyd_id)\n self.assertIsNone(pyp_contract_item_new)\n","sub_path":"tests/qa/cases/cmt_sync/customer/test_delete_customer_sync.py","file_name":"test_delete_customer_sync.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101018377","text":"from digg_paginator import DiggPaginator\n\n\nclass DiggPaginatorViewMixin(object):\n\n paginator_class = DiggPaginator\n paginate_by = 12\n\n paginate_orphans = 0\n paginate_allow_empty_first_page = True\n paginate_body = 5\n paginate_padding = 2\n paginate_margin = 2\n paginate_tail = 1\n\n def get_paginator(self, queryset, per_page, orphans=0,\n allow_empty_first_page=True, **kwargs):\n return(super(DiggPaginatorViewMixin, self).get_paginator(\n queryset,\n per_page,\n orphans=self.paginate_orphans,\n allow_empty_first_page=self.paginate_allow_empty_first_page,\n body=self.paginate_body,\n padding=self.paginate_padding,\n margin=self.paginate_margin,\n tail=self.paginate_tail,\n **kwargs)\n )\n","sub_path":"server/core/viewmixins/diggpaginator_viewmixin.py","file_name":"diggpaginator_viewmixin.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419968658","text":"from libs.config import alias, color, gget\nfrom libs.myapp import execute_sql_command\n\n\n@alias(True, _type=\"DATABASE\", t=\"table\")\ndef run(table: str):\n \"\"\"\n db_columns\n\n Output all columns of a table.\n\n eg: db_columns {table}\n \"\"\"\n if (not gget(\"db_connected\", \"webshell\")):\n print(color.red(\"Please run db_init command first\"))\n return\n database = gget(\"db_dbname\", \"webshell\")\n print(execute_sql_command(f\"show columns from {table};\", database))\n","sub_path":"doughnuts/webshell_plugins/db_columns.py","file_name":"db_columns.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123506965","text":"from constant import *\nimport subprocess\nimport json\nimport os\nimport pprint\nfrom bit import wif_to_key\nfrom bit import PrivateKeyTestnet\nfrom web3 import Web3\nfrom dotenv import load_dotenv\nfrom web3.middleware import geth_poa_middleware\nfrom eth_account import Account\nfrom web3 import Web3\nfrom bit.network import NetworkAPI\nfrom web3.middleware import geth_poa_middleware\n\n\nw3 = Web3(Web3.HTTPProvider(\"http://127.0.0.1:8545\"))\nw3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\nmnemonic = os.getenv('MNEMONIC', 'federal title audit cause during enlist avocado grape dash spider aspect hawk')\nprint(mnemonic)\nprint(type(mnemonic))\n\n\ndef derive_wallets(cointype, mnemonic):\n\n command = f'php ./derive -g --mnemonic=\"{mnemonic}\" -cols=address,path,privkey,pubkey --coin={cointype} --numderive=3 --format=json'\n print(command)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n (output, err) = p.communicate()\n keys = json.loads(output) \n return keys\n\n\ndef priv_key_to_account(coin, priv_key):\n \"\"\"\n This will convert the privkey string in a child key to an account object that bit or web3.py can use to transact\n coin -- the coin type (defined in constants.py).\n\n priv_key -- the privkey string will be passed through here.\n \"\"\"\n if (coin == ETH):\n return Account.privateKeyToAccount(priv_key)\n elif (coin == BTCTEST):\n return PrivateKeyTestnet(priv_key)\n \n \ndef create_tx(coin, account, to, amount):\n \"\"\"\n this will create the raw, unsigned transaction that contains all metadata needed to transact.\n coin -- the coin type (defined in constants.py).\n account -- the account object from priv_key_to_account.\n to -- the recipient address.\n amount -- the amount of the coin to send.\n \"\"\"\n if ( coin == ETH):\n gasEstimate = w3.eth.estimateGas(\n {\"from\": account.address, \"to\": to, \"value\": amount}\n )\n return {\n \"from\": account.address,\n \"to\": to,\n \"value\": amount,\n \"gasPrice\": w3.eth.gasPrice,\n \"gas\": gasEstimate,\n \"nonce\": w3.eth.getTransactionCount(account.address),\n }\n elif(coin == BTCTEST):\n return PrivateKeyTestnet.prepare_transaction(account.address, [(to, amount, BTC)])\n \ndef send_tx(coin, account, to , amount):\n \"\"\"\n This will call create_tx, sign the transaction, then send it to the designated network.\n coin -- the coin type (defined in constants.py).\n account -- the account object from priv_key_to_account.\n to -- the recipient address.\n amount -- the amount of the coin to send.\n \"\"\"\n if (coin == ETH):\n tx = create_tx(ETH,account, to, amount)\n signed_tx = account.sign_transaction(tx)\n result = w3.eth.sendRawTransaction(signed_tx.rawTransaction)\n print(result.hex())\n return result.hex()\n elif(coin == BTCTEST):\n tx = create_tx(BTCTEST,account, to, amount) \n signed_tx = account.sign_transaction(tx)\n return NetworkAPI.broadcast_tx_testnet(signed_tx)\n \n \ncoins = {} #initialize coin object\neth = derive_wallets(ETH, mnemonic) #create ETH accounts\ncoins[ETH]=eth\nbtc = derive_wallets(BTCTEST, mnemonic)\ncoins[BTCTEST]=btc\n\npp = pprint.PrettyPrinter(indent=4)\npp.pprint(coins)\n\naccountE = priv_key_to_account(ETH, coins[ETH][0]['privkey'])\nsend_tx(ETH, accountE, \"0xc470b0A110C03636d5a6B8821aa34aED59E9f920\", 1)\n\naccountB = priv_key_to_account(BTCTEST, coins[BTCTEST][0]['privkey']) \nsend_tx(BTCTEST, accountB, \"mjMoK8zFxYaYmQNcG1eUTjtBKWUGAVMorq\", 0.00002)\n\n","sub_path":".ipynb_checkpoints/wallet-checkpoint.py","file_name":"wallet-checkpoint.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33363290","text":"\"\"\"\r\nCreated on Thu Mar 22 13:47:24 2018\r\n\r\n@author: Nodar.Okroshiashvili\r\n\"\"\"\r\n\r\narr = list(map(int, input().rstrip().split()))\r\n\r\n\r\ndef miniMaxSum(arr):\r\n suma = sum(arr)\r\n minimum = suma - max(arr)\r\n maximum = suma - min(arr)\r\n print(minimum, maximum)\r\n \r\n \r\nminiMaxSum(arr)\r\n\r\n\r\n","sub_path":"Problem Solving/Mini-Max Sum.py","file_name":"Mini-Max Sum.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327252659","text":"import yfinance as yf, datetime as dt\n\ndef getHistory(symbol):\n data = yf.Ticker(symbol).history(period='2y', interval='1d')\n data = data.drop(['Dividends', 'Stock Splits'], axis=1)\n columnas_nombres = ['open', 'high', 'low', 'close', 'volume']\n data.columns = columnas_nombres\n data.index.rename('date', inplace=True)\n \n return data\n\ndef getInfo(symbol):\n info = yf.Ticker(symbol).info\n\ndef getEarnings(symbol):\n earnings = yf.Ticker(symbol).calendar\n date_earnings = earnings.iloc[0,0]\n date_earnings = date_earnings.strftime('%d %b %Y')\n \n return date_earnings","sub_path":"yf_api.py","file_name":"yf_api.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"441492833","text":"#! /home/sheffler/.conda/envs/rpxdock/bin/python\n\nimport logging, itertools, concurrent, tqdm, rpxdock as rp\n\ndef get_rpxdock_args():\n arg = rp.options.get_cli_args()\n if not arg.architecture: raise ValueError(\"architecture must be specified\")\n return arg\n\ndef get_spec(arch):\n arch = arch.upper()\n if arch.startswith('P'):\n sym = arch.split('_')[0]\n component_nfold = arch.split('_')[1]\n ismirror = sym[-1] == 'M'\n if len(component_nfold) == 1 and ismirror:\n spec = rp.search.DockSpec1CompMirrorLayer(arch)\n elif len(component_nfold) == 1:\n spec = rp.search.DockSpec1CompLayer(arch)\n elif len(component_nfold) == 2:\n spec = rp.search.DockSpec2CompLayer(arch)\n elif len(component_nfold) == 3:\n spec = rp.search.DockSpec3CompLayer(arch)\n else:\n raise ValueError('number of conponents must be 1, 2 or 3')\n elif len(arch) == 2 or (arch[0] == 'D' and arch[2] == '_'):\n spec = rp.search.DockSpec1CompCage(arch)\n else:\n spec = rp.search.DockSpec2CompCage(arch)\n return spec\n\ndef dock_cyclic(hscore, inputs, architecture, **kw):\n arg = rp.Bunch(kw)\n bodies = [rp.Body(inp, **arg) for inp in arg.inputs1]\n\n exe = concurrent.futures.ProcessPoolExecutor\n # exe = rp.util.InProcessExecutor\n with exe(arg.ncpu) as pool:\n futures = list()\n for ijob, bod in enumerate(bodies):\n futures.append(\n pool.submit(\n rp.search.make_cyclic,\n bod,\n architecture.upper(),\n hscore,\n **arg,\n ))\n futures[-1].ijob = ijob\n result = [None] * len(futures)\n for f in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):\n result[f.ijob] = f.result()\n result = rp.concat_results(result)\n\n # result = rp.search.make_cyclic(body, architecture.upper(), hscore, **arg)\n\n return result\n\ndef dock_onecomp(hscore, **kw):\n arg = rp.Bunch(kw)\n spec = get_spec(arg.architecture)\n # double normal resolution, cuz why not?\n if spec.type == 'mirrorlayer':\n sampler = rp.sampling.hier_mirror_lattice_sampler(spec, resl=10, angresl=10, **arg)\n else:\n sampler = rp.sampling.hier_axis_sampler(spec.nfold, lb=0, ub=100, resl=5, angresl=5,\n axis=spec.axis, flipax=spec.flip_axis)\n bodies = [rp.Body(inp, **arg) for inp in arg.inputs1]\n\n exe = concurrent.futures.ProcessPoolExecutor\n # exe = rp.util.InProcessExecutor\n with exe(arg.ncpu) as pool:\n futures = list()\n for ijob, bod in enumerate(bodies):\n futures.append(\n pool.submit(\n rp.search.make_onecomp,\n bod,\n spec,\n hscore,\n rp.hier_search,\n sampler,\n **arg,\n ))\n futures[-1].ijob = ijob\n result = [None] * len(futures)\n for f in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):\n result[f.ijob] = f.result()\n result = rp.concat_results(result)\n return result\n # result = rp.search.make_onecomp(bodyC3, spec, hscore, rp.hier_search, sampler, **arg)\n\ndef dock_multicomp(hscore, **kw):\n arg = rp.Bunch(kw)\n spec = get_spec(arg.architecture)\n sampler = rp.sampling.hier_multi_axis_sampler(spec, **arg)\n logging.info(f'num base samples {sampler.size(0):,}')\n\n bodies = [[rp.Body(fn, **arg) for fn in inp] for inp in arg.inputs]\n assert len(bodies) == spec.num_components\n\n exe = concurrent.futures.ProcessPoolExecutor\n # exe = rp.util.InProcessExecutor\n with exe(arg.ncpu) as pool:\n futures = list()\n for ijob, bod in enumerate(itertools.product(*bodies)):\n futures.append(\n pool.submit(\n rp.search.make_multicomp,\n bod,\n spec,\n hscore,\n rp.hier_search,\n sampler,\n **arg,\n ))\n futures[-1].ijob = ijob\n result = [None] * len(futures)\n for f in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)):\n result[f.ijob] = f.result()\n result = rp.concat_results(result)\n return result\n\ndef main():\n arg = get_rpxdock_args()\n logging.info(f'weights: {arg.wts}')\n\n hscore = rp.CachedProxy(rp.RpxHier(arg.hscore_files, **arg))\n arch = arg.architecture\n\n if arch.startswith('C'):\n result = dock_cyclic(hscore, **arg)\n elif len(arch) == 2 or (arch[0] == 'D' and arch[2] == '_'):\n result = dock_onecomp(hscore, **arg)\n else:\n result = dock_multicomp(hscore, **arg)\n\n print(result)\n if arg.dump_pdbs:\n result.dump_pdbs_top_score(hscore=hscore, **arg)\n result.dump_pdbs_top_score_each(hscore=hscore, **arg)\n if not arg.suppress_dump_results:\n rp.util.dump(result, arg.output_prefix + '_Result.pickle')\n\nif __name__ == '__main__':\n main()\n","sub_path":"rpxdock/app/dock.py","file_name":"dock.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"654477335","text":"import pandas as pd\nfrom dateutil.relativedelta import relativedelta, FR\n\nlist_of_dicts = [\n {\n 'date': '2018-03-01',\n 'weather': 'cloudy'\n },\n {\n 'date': '2018-03-02',\n 'weather': 'sunny'\n },\n {\n 'date': '2018-03-03'\n },\n {\n 'date': '2018-03-04',\n 'weather': 'rain'\n }\n]\ndf = pd.DataFrame(list_of_dicts)\n\ndf['date'] = pd.to_datetime(df['date'])\ndf['last_friday'] = df['date'].apply(lambda x: x + relativedelta(weekday=FR(-1)))\nprint(df)\n","sub_path":"get_last_friday_with_relativedelta_in_dateutil.py","file_name":"get_last_friday_with_relativedelta_in_dateutil.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519894438","text":"# -*- coding: utf-8 -*-\nimport requests\nimport json\nimport reddit_bot\n\nvalue_dictionary = reddit_bot.reply_dictionary\n\n# Using the unofficial jisho.org API\n# Currently it works by using the following url + a keyword\n\njisho = \"http://jisho.org/api/v1/search/words?keyword=\" + value\n\nresponse = requests.get(jisho)\nresponse.raise_for_status()\njisho_results = json.loads(response.text)\ndata = jisho_results['data'][0]\nreading_1 = data['japanese'][0]['reading']\nenglish1 = data['senses'][0]['english_definitions'][0]\nenglish2 = data['senses'][0]['english_definitions'][1]\nword_type1 = data['senses'][0]['parts_of_speech'][0]\nword_type2 = data['senses'][0]['parts_of_speech'][1]\n\njapanese = 'Hiragana Reading: ' + reading_1\nenglish = 'English Meaning: ' + english1 + ', ' + english2\nword_type = 'Word Use: ' + word_type1 + ', ' + word_type2\n","sub_path":"Programs/Reddit Bot Test/kanji_look.py","file_name":"kanji_look.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253967947","text":"import pymysql\n\n\ndef get_connect():\n conn = pymysql.connect(\n host='127.0.0.1', user='root', password='1234', db='world', charset='utf8')\n if conn:\n print('연결 완료 !!!')\n return conn\n\n# get_connect()\n\n\ndef get_country_list():\n conn = get_connect()\n\n cursor = conn.cursor()\n\n cursor.execute('''\n SELECT Code, Name, Continent, Population, GNP FROM countryTb;\n ''')\n\n country_list = cursor.fetchall()\n \n temp_list=[]\n for row in country_list:\n temp_dict={}\n temp_dict['Code']=row[0]\n temp_dict['Name']=row[1]\n temp_dict['Continent']=row[2]\n temp_dict['Population']=row[3]\n temp_dict['GNP']=row[4]\n temp_list.append(temp_dict)\n\n # print(temp_list)\n conn.close()\n return temp_list\n\n# 데이터베이스에 레코드 추가 함수\ndef add_record(c_code, c_name, c_continent, c_population, c_gnp):\n # db 연결\n conn=get_connect()\n \n sql='''\n INSERT INTO countryTb (Code, Name, Continent, Population, GNP)\n VALUES (%s, %s, %s, %s, %s);\n '''\n\n cursor=conn.cursor()\n\n cursor.execute(sql, (c_code, c_name, c_continent, c_population, c_gnp))\n\n conn.commit()\n\n conn.close()\n\ndef delete_record(c_code):\n conn=get_connect()\n\n sql='''\n DELETE FROM countryTb WHERE code=%s;\n '''\n\n cursor=conn.cursor()\n\n cursor.execute(sql,c_code)\n\n conn.commit()\n\n conn.close()\n\ndef record_view(c_code,c_name,c_continent,c_population,c_gnp):\n conn=get_country_list()\n\n sql='''\n '''\n\n cursor=conn.cursor()\n cursor.execute(sql,c_code,c_name,c_continent,c_population,c_gnp)\n conn.commit()\n conn.close()\n\n# add_record('GIN', 'Guinea', 'Africa', '7430000', '2352.00')\n# print(get_country_list())\n# delete_record('GIN')","sub_path":"web_practice/w0120_back/step5/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"337722444","text":"import tensorflow as tf\nif tf.__version__ >= '2.0.0':\n import tensorflow.keras.layers as nn\nelse:\n import keras.layers as nn\n\nclass Block(object):\n def __init__(self, in_filters, out_filters, stride, name, expansion=1):\n self.conv = nn.Conv2D(filters=in_filters * expansion,\n kernel_size=1, strides=1, padding='same',\n kernel_initializer='glorot_normal', name=f'{name}/1x1conv3')\n self.dconv = nn.DepthwiseConv2D(kernel_size=3, strides=stride, padding='same',\n kernel_initializer='glorot_normal', name=f'{name}/dwconv')\n self.conv1x1 = nn.Conv2D(filters=out_filters, kernel_size=1, strides=1, padding='same',\n kernel_initializer='glorot_normal', name=f'{name}/1x1conv')\n\n self.bn = nn.BatchNormalization(epsilon=1e-3, momentum=0.99, name=f'{name}/bn')\n self.bn2 = nn.BatchNormalization(epsilon=1e-3, momentum=0.99, name=f'{name}/bn2')\n self.bn3 = nn.BatchNormalization(epsilon=1e-3, momentum=0.99, name=f'{name}/bn3')\n\n self.activation = nn.ReLU()\n self.skip_connection = True if (stride == 1) & (in_filters == out_filters) else False\n\n def __call__(self, net):\n shortcut = net\n\n net = self.conv(net)\n net = self.bn(net)\n net = self.activation(net)\n\n # net = self.dconv(net)\n # net = self.bn2(net)\n # net = self.activation(net)\n\n net = self.conv1x1(net)\n net = self.bn3(net)\n\n if self.skip_connection:\n net = nn.Add()([shortcut, net])\n\n return net\n","sub_path":"Models/Blocks/DepthwiseSeparableConvBlock.py","file_name":"DepthwiseSeparableConvBlock.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"514220255","text":"\"\"\"interface to the city of Lappeenranta bus schedule service at http://lprwilima.lappeenranta.fi:8080\n\"\"\"\n\nclassifiers = \"\"\"\\\nDevelopment Status :: 3 - Alpha\nProgramming Language :: Python\nOperating System :: MacOS :: MacOS X\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nTopic :: Software Development :: Libraries :: Python Modules\nTopic :: Software Development :: Libraries\nTopic :: Internet :: WWW/HTTP :: WSGI :: Application\nIntended Audience :: Developers\nLicense :: OSI Approved :: GNU General Public License (GPL)\n\"\"\"\n\nimport sys\nfrom distutils.core import setup\n\nif sys.version_info < (2, 3):\n _setup = setup\n def setup(**kwargs):\n if kwargs.has_key(\"classifiers\"):\n del kwargs[\"classifiers\"]\n _setup(**kwargs)\n\ndoclines = __doc__.split(\"\\n\")\n\nsetup(name=\"pywilima\",\n version=\"0.2\",\n maintainer=\"Petri Savolainen\",\n maintainer_email=\"petri.savolainen@iki.fi\",\n platforms = [\"win32\", \"unix\"],\n packages = [\"pywilima\"],\n package_dir = {\"pywilima\": \"lib\"},\n scripts=['scripts/departures.py',],\n description = doclines[0],\n classifiers = filter(None, classifiers.split(\"\\n\")),\n long_description = \"\\n\".join(doclines[2:]),\n)\n","sub_path":"pypi_install_script/pywilima-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237670072","text":"#!/usr/bin/python3\n\"\"\"\nUsing what is done in task #0, exports data in the CSV format\n\"\"\"\nif __name__ == \"__main__\":\n import json\n from sys import argv\n import requests\n\n newd = {}\n filename = argv[1] + \".json\"\n req = requests.get('https://jsonplaceholder.typicode.com/todos?userId={}'\n .format(argv[1]))\n req_id = requests.get('https://jsonplaceholder.typicode.com/users/{}'\n .format(argv[1]))\n name = (req_id.json().get('username'))\n with open(filename, \"w\") as f:\n r_json = req.json()\n d = [{'task': i.get('title'), 'completed': i.get('completed'),\n 'username': name}for i in r_json]\n newd[argv[1]] = d\n json.dump(newd, f)\n","sub_path":"0x15-api/2-export_to_JSON.py","file_name":"2-export_to_JSON.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"492030820","text":"from UI import ui_manager as ui\n\nfrom math import floor, log10\nfrom datetime import datetime\nimport time\nimport ast\nimport os\nimport re\n\nfrom Robinhood import Robinhood\n\nDATABASE_PATH = re.search(\"(.*)\\\\\\\\.*\", os.path.realpath(__file__)).group(1) + \"\\\\Database\\\\\"\n\n\ndef build_portfolio_report():\n \"\"\"\n Build a report of my stock portfolio, runs once a day at the end of market hours\n :return: None\n \"\"\"\n\n # id for the display\n this_id = str(datetime.now())\n # update the display\n ui.DisplayQueueManager.request_connection([\"Main\", \"Database\"], {\"title\": \"Stock Portfolio\",\n \"color\": ui.YELLOW,\n \"unique_id\": this_id,\n \"TextBox\": [\"Building end of day report...\"]})\n\n # get the file data for the minute reports\n portfolio_data = get_file_data(\"non_static\\\\stock_portfolio.txt\")\n # initialize the list that will eventually be joined into the report\n report_data = []\n # for each line in the data\n for index, line in enumerate(portfolio_data):\n # build it into a processable list\n portfolio_data[index] = portfolio_data[index].split(\"; \")\n portfolio_data[index][1] = float(portfolio_data[index][1].replace(\"\\n\", \"\"))\n # calculate the net change over the day\n days_change = round(portfolio_data[-1][1] - portfolio_data[0][1], 4)\n # calculate the percentage of that change\n days_percent_change = days_change / portfolio_data[0][1]\n\n current_date = str(datetime.now().date()).split(\"-\")\n\n # though next 12 lines, build the basic report values\n report_data.append(\"Portfolio Report for: \" + current_date[1] + \"/\" + current_date[2] + \"/\" + current_date[0])\n report_data.append(\"Current Value: \" + format(portfolio_data[-1][1], \".4f\"))\n if days_change == 0:\n report_data.append(\"Portfolio's value did not change for the day\")\n elif days_change > 0:\n report_data.append(\"Value Gained over day: $\" + format(days_change, \".4f\"))\n report_data.append(\"Percent Increased over day: \" +\n str(round(days_percent_change, -int(floor(log10(abs(days_percent_change)))))) + \"%\")\n else:\n report_data.append(\"Value Lost over day: $\" + format(days_change, \".4f\"))\n report_data.append(\"Percent Decreased over day: \" +\n str(round(days_percent_change, -int(floor(log10(abs(days_percent_change)))))) + \"%\")\n\n # login to robinhood\n robin_trader = Robinhood()\n try:\n robin_trader.login(\"ConradSelig\", open(DATABASE_PATH + \"static\\\\pass.txt\", \"r\").read())\n except (Exception, BaseException):\n # update the display\n ui.DisplayQueueManager.update_data(\"Stock Portfolio\", {\"unique_id\": this_id,\n \"color\": ui.RED,\n \"TextBox\": [\"Building end of day report...\",\n \" CONNECTION FAILED\"]})\n return\n\n # collect the stocks owned from robinhood\n dsecowned = robin_trader.securities_owned()[\"results\"]\n report_data.append(\"\\nStocks Owned: \")\n # for each stock owned\n for position in dsecowned:\n # write the metadata for that report\n id = position['instrument'].split('/')[4]\n if float(position['quantity']) > 0:\n report_data.append(\" Stock Name: \" + robin_trader.instrument(id)['name'])\n report_data.append(\" Stock Symbol: \" + robin_trader.instrument(id)['symbol'])\n num_owned = position['quantity']\n report_data.append(\" Number Owned: \" + num_owned)\n value_per = robin_trader.bid_price(robin_trader.instrument(id)['symbol'])[0][0]\n report_data.append(\" Value per Stock: \" + format(float(value_per), \".4f\"))\n report_data.append(\" Portfolio Value: \" + format((float(num_owned) * float(value_per)), \".4f\"))\n\n # wait for the display to catch up\n time.sleep(1)\n # update the display\n ui.DisplayQueueManager.update_data(\"Stock Portfolio\", {\"unique_id\": this_id,\n \"TextBox\": [\"Building end of day report...\",\n \" Built.\",\n \"Writing to Database...\"]})\n\n # write the report to the database\n write_file_data(\"non_static\\\\portfolio_report.txt\", \"\\n\".join(report_data))\n\n # wait for the display to catch up\n time.sleep(1)\n # update the display\n ui.DisplayQueueManager.update_data(\"Stock Portfolio\", {\"unique_id\": this_id,\n \"color\": ui.GREEN,\n \"TextBox\": [\"Building end of day report...\",\n \" Built.\",\n \"Writing to Database...\",\n \" Written.\"],\n \"lifespan\": 3})\n\n return\n\n\ndef update_stock_portfolio_record():\n \"\"\"\n Collects the current portfolio value and writes it to the database\n :return: None\n \"\"\"\n # open the known reports file and read the data. Reading the data for the display\n file = open(DATABASE_PATH + \"non_static\\\\stock_portfolio.txt\", \"r\")\n excising_data = file.readlines()\n file.close()\n\n # id for the display\n this_id = str(datetime.now())\n # update the display\n ui.DisplayQueueManager.request_connection([\"Database\"], {\"title\": \"Updating Portfolio Record\",\n \"color\": ui.YELLOW,\n \"unique_id\": this_id,\n \"TextBox\": [\"Number of old records:\",\n \" \" + str(len(excising_data)),\n \"\",\n \"Adding new record...\"]})\n\n # login to robinhood\n robin_trader = Robinhood()\n try:\n robin_trader.login(\"ConradSelig\", open(DATABASE_PATH + \"static\\\\pass.txt\", \"r\").read())\n except (Exception, BaseException):\n # update the display\n ui.DisplayQueueManager.update_data(\"Updating Portfolio Record\", {\"color\": ui.RED,\n \"unique_id\": this_id,\n \"TextBox\": [\"Number of old records:\",\n \" \" + str(len(excising_data)),\n \"\",\n \"Adding new record...\",\n \" LOGIN FAILED\"],\n \"lifespan\": 5})\n return\n\n profile_data = robin_trader.portfolios()\n\n # open the database file and write the new record\n file = open(DATABASE_PATH + \"non_static\\\\stock_portfolio.txt\", \"a\")\n file.write(str(datetime.now()) + \"; \" + profile_data[\"equity\"] + \"\\n\")\n file.close()\n\n # update the display\n ui.DisplayQueueManager.update_data(\"Updating Portfolio Record\", {\"color\": ui.GREEN,\n \"unique_id\": this_id,\n \"TextBox\": [\"Number of old records:\",\n \" \" + str(len(excising_data)),\n \"\",\n \"Adding new record...\",\n \" Done\"],\n \"lifespan\": 3})\n\n return\n\n\ndef store_calendar_events(new_events):\n \"\"\"\n Take in the new events and write the events to the database\n :param new_events: string of calendar events\n :return: None\n \"\"\"\n # update the display\n ui.DisplayQueueManager.request_connection([\"Database\"], {\"title\": \"Updating Calendar\",\n \"color\": ui.YELLOW,\n \"TextBox\": [\"Opening the database...\"]})\n\n # wait for the display to catch up\n time.sleep(1)\n # update the display\n ui.DisplayQueueManager.update_data(\"Updating Calendar\", {\"TextBox\": [\"Opening the database...\",\n \" Complete\",\n \"Adding new Events...\"]})\n # open the file for writing\n file = open(DATABASE_PATH + \"non_static\\\\calendar_events.txt\", \"w\")\n\n # count for the new events\n added_events = 0\n # for each new event\n for event in new_events:\n # if that event is not a known event\n try:\n # write the event to the file\n file.write(str(event) + \"\\n\")\n except MemoryError:\n ui.DisplayQueueManager.update_data(\"Updating Calendar\", {\"color\": ui.RED,\n \"TextBox\": [\"Opening the database...\",\n \" Complete\",\n \"Adding new Events...\",\n \" FAILED (MemoryError)\"],\n \"lifespan\": 15})\n return\n added_events += 1\n\n # close the file\n file.close()\n\n # wait for the display to catch up\n time.sleep(1)\n # update the display\n ui.DisplayQueueManager.update_data(\"Updating Calendar\", {\"color\": ui.GREEN,\n \"TextBox\": [\"Opening the database...\",\n \" Complete\",\n \"Adding new Events...\",\n \" Events Added \"\n \"(\" + str(added_events) + \")\"],\n \"lifespan\": 3})\n\n return\n\n\ndef parse_calendar_event(event):\n \"\"\"\n Takes in an event string read from google API or database, parses into usable list\n :param event: string event\n :return: list event\n \"\"\"\n # parse the data out of the string\n date_re = re.search(\"^\\[\\['(..)', '(..)', '(....)']\", event)\n # build the data ordered for the datetime library\n date = [date_re.group(1), date_re.group(2), date_re.group(3)]\n # parse the event times and name from the string\n time_name_re = re.search(\".{24}(..:..)', '(..:..)', '(.*)']$\", event)\n # build into list including the date list\n event = [date, time_name_re.group(1), time_name_re.group(2), time_name_re.group(3)]\n # return the parsed event.\n return event\n\n\ndef get_file_data(file_name, read_lines=True):\n \"\"\"\n Instead of having each module read from the database, the modules will go through this function in the db_manager\n to get data from the database.\n :param file_name: Does not include the DATABASE_PATH, id \"non_static\\\\calendar_events.txt\"\n :param read_lines: optional modifier, specifies the read mode of the function\n :return: File data as a string or a list\n \"\"\"\n\n # parse the file_name with the DATABASE_PATH\n file_name = DATABASE_PATH + file_name\n # open the file\n file = open(file_name, \"r\")\n\n # if the file is being read as a list\n if read_lines:\n # read with readlines\n data = file.readlines()\n # else being read as a string\n else:\n # read as a single string\n data = file.read()\n # close the file\n file.close()\n # return the file data.\n return data\n\n\ndef write_file_data(file_name, data, append=False):\n \"\"\"\n Instead of having each module write to the database, the modules will go through this function in the db_manager\n to add data to the database.\n :param file_name: Does not include the DATABASE_PATH, id \"non_static\\\\calendar_events.txt\"\n :param data: The data being writen, does not have to be string type.\n :param append: optional modifier, changes what mode the file is opened in.\n :return: None\n \"\"\"\n\n # parse the file name to include the database path\n file_name = DATABASE_PATH + file_name\n # open the file with the type specified by append\n file = open(file_name, \"a\" if append else \"w\")\n # write the data casted as a string\n file.write(str(data))\n # close the file\n file.close()\n\n return\n\n\ndef check_database():\n \"\"\"\n Function to make sure all needed files in the database, this is to help prevent errors later on.\n \"\"\"\n\n # update the display\n ui.DisplayQueueManager.request_connection([\"Database\"], {\"title\": \"Database Check\",\n \"color\": ui.YELLOW,\n \"TextBox\": [\"Looking for errors...\"]})\n\n # get a list of all contact and user data files\n metadata_files = os.listdir(DATABASE_PATH + \"contact_metadata\\\\\")\n user_files = os.listdir(DATABASE_PATH + \"user_data\\\\\")\n\n # wait for the display to catch up\n time.sleep(1)\n\n # update the display\n ui.DisplayQueueManager.update_data(\"Database Check\", {\"TextBox\": [\"Looking for errors...\",\n \" \" + str(len(metadata_files)) + \" metadata.\",\n \" \" + str(len(user_files)) + \" users.\"],\n \"color\": ui.YELLOW if len(metadata_files) == len(user_files)\n else ui.RED})\n\n # wait for the display to catch up\n time.sleep(0.5)\n\n # if the file counts do not match\n if len(metadata_files) != len(user_files):\n\n # update the display\n ui.DisplayQueueManager.update_data(\"Database Check\", {\"TextBox\": [\"Looking for errors...\",\n \" \" + str(\n len(metadata_files)) + \" metadata.\",\n \" \" + str(len(user_files)) + \" users.\",\n \"Repairing...\"]})\n\n # if there are more user files than contact files\n if len(metadata_files) < len(user_files):\n # for each user file\n for user_file in user_files:\n # if that user file is not a contact file\n if user_file not in metadata_files:\n # create that contact folder and its required metadata.txt\n os.mkdir(DATABASE_PATH + \"contact_metadata\\\\\" + user_file)\n open(DATABASE_PATH + \"contact_metadata\\\\\" + user_file + \"\\\\metadata.txt\", \"w\").close()\n\n # if there are more contact files than user files\n elif len(user_files) < len(metadata_files):\n # for each contact file\n for metadata_file in metadata_files:\n # if that contact file is not a user file\n if metadata_file not in user_files:\n # create the user folder and its required files\n os.mkdir(DATABASE_PATH + \"user_data\\\\\" + metadata_file)\n open(DATABASE_PATH + \"user_data\\\\\" + metadata_file + \"\\\\habits.txt\", \"w\").close()\n open(DATABASE_PATH + \"user_data\\\\\" + metadata_file + \"\\\\metadata.txt\", \"w\").close()\n\n # update the file lists\n metadata_files = os.listdir(DATABASE_PATH + \"contact_metadata\\\\\")\n user_files = os.listdir(DATABASE_PATH + \"user_data\\\\\")\n\n # finish waiting for the display to catch up\n time.sleep(0.5)\n\n # update the display\n ui.DisplayQueueManager.update_data(\"Database Check\", {\"TextBox\": [\"Looking for errors...\",\n \" \" + str(len(metadata_files)) + \" metadata.\",\n \" \" + str(len(user_files)) + \" users.\",\n \"Checking for missing files...\"],\n \"color\": ui.YELLOW if len(metadata_files) == len(user_files)\n else ui.RED})\n\n # counts the number of errors required\n file_errors = 0\n\n # for each file in each of the user folders. Try to open it, check for a valid read, and close it.\n # If no valid read, create the file.\n for user_file in user_files:\n try:\n open(DATABASE_PATH + \"user_data\\\\\" + user_file + \"\\\\habits.txt\", \"r\").close()\n except FileNotFoundError:\n file_errors += 1\n open(DATABASE_PATH + \"user_data\\\\\" + user_file + \"\\\\habits.txt\", \"w\").close()\n try:\n open(DATABASE_PATH + \"user_data\\\\\" + user_file + \"\\\\metadata.txt\", \"r\").close()\n except FileNotFoundError:\n file_errors += 1\n open(DATABASE_PATH + \"user_data\\\\\" + user_file + \"\\\\metadata.txt\", \"w\").close()\n\n # for each file in each of the contact folders. Try to open it, check for a valid read, and close it.\n # If no valid read, create the file.\n for metadata_file in metadata_files:\n try:\n open(DATABASE_PATH + \"contact_metadata\\\\\" + metadata_file + \"\\\\metadata.txt\", \"r\").close()\n except FileNotFoundError:\n file_errors += 1\n open(DATABASE_PATH + \"user_data\\\\\" + user_file + \"\\\\metadata.txt\", \"w\").close()\n\n # wait for the display to catch up\n time.sleep(1)\n\n # update the display\n ui.DisplayQueueManager.update_data(\"Database Check\", {\"TextBox\": [\"Looking for errors...\",\n \" \" + str(len(metadata_files)) + \" metadata.\",\n \" \" + str(len(user_files)) + \" users.\",\n \"Checking for missing files...\",\n \" Complete\",\n \" (\" + str(file_errors) + \" repairs made)\"],\n \"color\": ui.YELLOW})\n\n # wait for the display to catch up\n time.sleep(1)\n\n # update the display\n ui.DisplayQueueManager.update_data(\"Database Check\", {\"TextBox\": [\"Looking for errors...\",\n \" \" + str(len(metadata_files)) + \" metadata.\",\n \" \" + str(len(user_files)) + \" users.\",\n \"Checking for missing files...\",\n \" Complete\",\n \" (\" + str(file_errors) + \" repairs made)\",\n \"\",\n \"Check complete!\"],\n \"color\": ui.GREEN,\n \"lifespan\": 3})\n\n\ndef get_contact_metadata(contact_name, tag=\"\") -> object:\n \"\"\"\n :param contact_name: [\"first_name\", \"last_name\"]. Used to find the contact folder\n :param tag: optional. Used to pull a dictionary tag from the metadata file. If not provided the entire dictionary\n is returned.\n :return: -1 if no tag or file found. metadata dict[key] if key provided. metadata dict if no key provided.\n \"\"\"\n # for each folder in the contact_metadata Database folder\n for folder in os.listdir(DATABASE_PATH + \"contact_metadata\\\\\"):\n # if the folder name matches the given name\n if folder == contact_name[0].title() + \"-\" + contact_name[1].title():\n\n # open the file, read the data as a single string\n file = open(DATABASE_PATH + \"contact_metadata\\\\\" + folder + \"\\\\metadata.txt\", \"r\")\n data = file.read()\n file.close()\n\n # turn the data into an actual dictionary type.\n try:\n data = ast.literal_eval(data)\n except SyntaxError:\n return -1\n\n # if a tag is provided\n if tag != \"\":\n\n # try catches cases where the tag does not exist\n try:\n # return the metadata tag\n return data[tag]\n except KeyError:\n # key not found, return -1\n return -1\n # no key provided, return entire dict\n return data\n # contact_metadata folder not found, return -1\n return -1\n","sub_path":"DatabaseManager/db_manager.py","file_name":"db_manager.py","file_ext":"py","file_size_in_byte":22260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211607202","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# datetime:2020/12/29 11:43\n# author\nimport ddt\nimport unittest\n# 测试数据\ntestdata =[{\"user\":\"zhangsan\",\"psw\":\"123\"},\n {\"user\":\"wangwu\",\"psw\":\"456\"}]\n\n@ddt.ddt()\n\nclass Test(unittest.TestCase):\n def setUp(self):\n\n print(\"start\")\n\n def tearDown(self):\n print(\"end\")\n\n @ddt.data(*testdata)\n\n def test_ddt(self,data):\n\n print(data)\n if __name__ == \"__main__\":\n unittest.main()\n\n","sub_path":"test_app/k2.py","file_name":"k2.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142708041","text":"import json\nfrom collections import OrderedDict\nfrom operator import itemgetter \n\ninput_file = 'unigrams2-es.txt'\nunigrams = open(input_file, 'r').read().split('\\n')\n\ntotal_count = 0\n\nletter_dict = {}\n\nfor line in unigrams:\n entry = line.split(' ')\n key = unicode(entry[0], 'utf-8')\n value = int(entry[1])\n total_count += value\n letter_dict[key] = value\n\nfor entry in letter_dict:\n letter_dict[entry] = round(letter_dict[entry] / float(total_count) * 100, 4)\n\nletter_dict = OrderedDict(sorted(letter_dict.items(), key = itemgetter(1), reverse = True))\n\njson_r = json.dumps(letter_dict).replace(',', ',\\n')\n\noutput_file = input_file + '.JSON'\noutput = open(output_file, 'w')\noutput.write(json_r)","sub_path":"lab2/parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494140352","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\ndef getICD3Tree(normal):\n icd3_dict = {}\n\n for name, icd6 in normal.iteritems():\n icd3 = icd6[:3]\n if icd3 not in icd3_dict.keys():\n icd3_dict[icd3] = [(name, icd6)]\n else:\n icd3_dict[icd3].append((name, icd6))\n return icd3_dict\n\n# 载入ICD层次结构,返回的字典,键是icd4位码,值是list,list中每元素为(ICD6_name, icd6)\ndef getICDTree(normal):\n icd4_dict = {}\n\n for name, icd6 in normal.iteritems():\n icd4 = icd6[:5]\n if icd4 not in icd4_dict.keys():\n icd4_dict[icd4] = [(name, icd6)]\n else:\n icd4_dict[icd4].append((name, icd6))\n return icd4_dict\n\n# 载入字典(缩写字典)\ndef loadDict(filename):\n file = open(filename)\n res = {}\n\n while 1:\n line = file.readline().strip()\n if not line:\n break\n\n n1 = line.split(\" \")[0]\n n2 = line.split(\" \")[1]\n res[n1.decode(\"utf-8\")] = n2.decode(\"utf-8\")\n res[n2.decode(\"utf-8\")] = n1.decode(\"utf-8\")\n return res\n\n# 载入icd6的关键词集合,已去重\ndef loadICD_Keywords_Dict():\n file = open(\"./Dict/ICD_Keywords_Dict.txt\")\n line = file.readline()\n res = {}\n\n while(line != \"\"):\n icd = line.split(\"-\")[0]\n words = line.split(\"-\")[1].split(\",\")\n key_words = []\n for str in words:\n key_words.append((str.split(\":\")[0], float(str.split(\":\")[1])))\n res[icd] = key_words\n line = file.readline()\n return res\n\ndef getNormalNames(values):\n normal = {} # 标准疾病名称字典(normalized_name, ICD-10)\n\n for row in values:\n if isinstance(row[0], unicode):\n normal[row[1].decode('utf-8')] = row[0].decode('utf-8')\n return normal\n\ndef write_List(file, names):\n file.write(\" | \".join(names) + \"\\n\")","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327947698","text":"# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n#\n# Copyright 2018, 8minutenergy / Kisensum.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Neither 8minutenergy nor Kisensum, nor any of their\n# employees, nor any jurisdiction or organization that has cooperated in the\n# development of these materials, makes any warranty, express or\n# implied, or assumes any legal liability or responsibility for the accuracy,\n# completeness, or usefulness or any information, apparatus, product,\n# software, or process disclosed, or represents that its use would not infringe\n# privately owned rights. Reference herein to any specific commercial product,\n# process, or service by trade name, trademark, manufacturer, or otherwise\n# does not necessarily constitute or imply its endorsement, recommendation, or\n# favoring by 8minutenergy or Kisensum.\n# }}}\n\nimport logging\nimport sys\nimport json\nimport requests\nfrom datetime import datetime, timezone\nfrom collections import defaultdict, OrderedDict\n\nimport django\ndjango.setup()\nfrom django.conf import settings\nfrom dnp3.points import PointDefinitions\nfrom dnp3.mesa.functions import FunctionDefinitions\n\nfrom master_cmd import MasterCmd\nfrom master_cmd import SERVER_IP, CLIENT_IP, PORT_NUMBER\nfrom dashboard.mesa_master_app import MesaMasterApplication\nfrom dashboard.models import FunctionTest, Point, ArrayPoint\nfrom dashboard.util import get_function_test_dict_from_db, clean_func_test\nfrom dashboard.master_client import MasterClient as MC\n\n\n_log = logging.getLogger(__name__)\n_log.setLevel(logging.DEBUG)\n\nDIRECTION_SENT = 'sent'\n\ncurrent_outstation_ip = '127.0.0.1'\ncurrent_outstation_port = 20000\ncurrent_server_ip = settings.SERVER_IP_ADDRESS\ncurrent_server_port = settings.SERVER_PORT\n\n\nclass MesaMasterCmd(MasterCmd):\n \"\"\"\n Instantiate an opendnp3 DNP3Manager that acts as the master in a DNP3 master/outstation interaction.\n\n Accept command-line input that sends simulated commands to the outstation,\n using the line-oriented command interpreter framework from the 'cmd' Python Standard Library.\n\n This subclass extends the basic DNP3 master Cmd behavior defined in MasterCmd,\n adding MESA-ESS functions.\n \"\"\"\n\n def __init__(self):\n super(MesaMasterCmd, self).__init__()\n self.point_definitions = PointDefinitions(settings.POINT_DEF_PATH)\n self.function_definitions = FunctionDefinitions(self.point_definitions,\n function_definitions_path=settings.FUNCTION_DEF_PATH)\n\n @staticmethod\n def create_application():\n \"\"\"Override this method to use your own MasterApplication subclass.\"\"\"\n return MesaMasterApplication(host_ip=SERVER_IP, local_ip=CLIENT_IP, port=PORT_NUMBER)\n\n def do_menu(self, line):\n \"\"\"Display a menu of command-line options. Command syntax is: menu\"\"\"\n print('\\tlist_functions\\t\\t\\tList names of functions.')\n print('\\tlist_function_tests \\tList function tests for specified function.')\n print('\\tsend\\t\\t\\t\\t\\tSend function test.')\n print('\\tdump\\t\\t\\t\\t\\tPrint function test.')\n print('\\tmenu\\t\\t\\t\\t\\tDisplay this menu.')\n print('\\tserver_settings\\t\\t\\tView server (master) connection settings.')\n print('\\tconfigure_server\\t\\tChange server connection settings.')\n print('\\toutstation_settings\\t\\tView outstation connection settings.')\n print('\\tconfigure_outstation\\tChange outstation connection settings.')\n print('\\tping_master\\t\\t\\t\\tTest connection to master.')\n print('\\thelp\\t\\t\\t\\t\\tDisplay command-line help.')\n print('\\tquit')\n\n def do_list_functions(self, line):\n \"\"\"List all function ID's. Command syntax is: list_functions\"\"\"\n func_ids = [func_def.function_id for func_id, func_def in self.function_definitions.functions_by_id().items()]\n print('\\tAvailable functions:')\n for func_id in func_ids:\n print('\\t\\t{}'.format(func_id))\n\n def do_list_function_tests(self, arg):\n \"\"\"List available function tests for given function. Command syntax is: list_function_tests \"\"\"\n if not arg:\n print('\\tNo function ID given.\\n')\n print('\\tCorrect usage is: list_function_tests ')\n else:\n function_tests = FunctionTest.objects.filter(function_id=arg)\n if function_tests.count() == 0:\n print(\"\\tNo function tests for {}\".format(arg))\n else:\n print(\"\\tFunction tests for: {}\".format(arg))\n for function_test in function_tests:\n print(\"\\t\\t{}\".format(function_test.name))\n\n def do_send(self, arg):\n \"\"\"Send function test with specified name. Command syntax is: send \"\"\"\n connection_error = None\n correct_id = None\n if not arg:\n print('\\tNo function test name given.\\n')\n print('\\tCorrect usage is: send ')\n else:\n try:\n function_tests = FunctionTest.objects.filter(name=arg)\n if function_tests.count() > 1:\n print(\"\\tThere is more than one function test with name {}\".format(arg))\n print(\"\\tWhich function does this function test belong to?\")\n for function_id in function_tests.values_list('function_id', flat=True):\n print('\\t\\t{}'.format(function_id))\n correct_id = input(\"Choose function ID: \")\n try:\n function_test = FunctionTest.objects.get(function_id=correct_id, name=arg)\n except FunctionTest.DoesNotExist:\n print('\\nCannot find a function test with ID: {} and name: {}'.format(correct_id,\n arg))\n return\n\n func_test_dict = get_function_test_dict_from_db(arg, func_id=correct_id)\n self.save_before_send(func_test_dict)\n\n try:\n statuses = MC.get_client().send_function_test(current_server_ip, current_server_port,\n clean_func_test(func_test_dict))\n if not statuses:\n print(\"Error sending function test\")\n return\n except requests.exceptions.ConnectionError:\n connection_error = 'Could not reach outstation.'\n if not connection_error:\n if statuses.get('errors', None) is not None:\n print(statuses['errors'])\n return\n else:\n self.update_statuses(statuses)\n print('\\tResults: \\n')\n print(json.dumps(self.format_output(statuses,\n func_test_dict.get(\"id\",\n func_test_dict.get(\"function_id\",\n None))), indent=4))\n else:\n print(\"\\tThere is a connection error\")\n except FunctionTest.DoesNotExist:\n print(\"\\tNo function test with name {} exists.\".format(arg))\n\n def save_before_send(self, func_test_dict):\n function_def = self.function_definitions.function_for_id(func_test_dict['id'])\n now = datetime.now(timezone.utc)\n for point in function_def.all_points():\n if point.is_array_head_point:\n head_point = Point.objects.create(name=point.name,\n timestamp=now,\n value='',\n status=DIRECTION_SENT)\n head_point.save()\n counter = 0\n for inner_array_point in func_test_dict[point.name]:\n for array_point in point.array_points:\n ArrayPoint.objects.create(name=array_point[\"name\"],\n point=head_point,\n timestamp=now,\n value=inner_array_point[array_point['name']],\n status=DIRECTION_SENT,\n index=counter).save()\n\n else:\n Point.objects.create(name=point.name,\n timestamp=now,\n value=func_test_dict[point.name],\n status='').save()\n\n def update_statuses(self, statuses):\n \"\"\"\n Update sent points with statuses that have come back from outstation.\n\n :param statuses: dictionary where the keys are point indices and the values are dictionaries\n with point \"group\" and \"status\" values from outstation.\n :return:\n \"\"\"\n for k, v in statuses.items(): # k = index, v = dictionary containing 'group' and 'status'\n point_def = self.point_definitions.for_group_and_index(int(v['group']), int(k))\n if point_def.is_array_point:\n if point_def.is_array_head_point:\n\n # This is a special case because the 'head point' we receive is simultaneously\n # the first 'inner' array point and and the marker of the beginning of the array.\n # We need to modify both a Point model object and ArrayPoint model object\n point = Point.objects.filter(name=point_def.name)[0]\n point.status = v['status']\n point.direction = DIRECTION_SENT\n point.save()\n # Now handle the array point\n array_point = ArrayPoint.objects.filter(index=0).order_by('-timestamp').all()[0]\n array_point.status = v['status']\n array_point.direction = DIRECTION_SENT\n array_point.save()\n else:\n # The array point isn't a head array point\n starting_index = point_def.base_point_def.index\n lookup_index = int(k) - starting_index\n array_point = ArrayPoint.objects.filter(index=lookup_index).order_by('-timestamp').all()[0]\n array_point.status = v['status']\n array_point.direction = DIRECTION_SENT\n array_point.save()\n\n else:\n try:\n point = Point.objects.filter(name=point_def.name).order_by('-timestamp').all()[0]\n point.status = v['status']\n point.direction = DIRECTION_SENT\n point.save()\n except IndexError:\n pass\n\n def order_function_test_dict(self, ft_dict):\n \"\"\"\n Orders dictionary representing function test.\n :param function_def:\n :param ft_dict:\n :return:\n \"\"\"\n function_def = self.function_definitions.function_for_id(ft_dict['id'])\n result = OrderedDict()\n result['id'] = ft_dict['id']\n result['name'] = ft_dict['name']\n for point in function_def.all_points():\n try:\n result[point.name] = ft_dict[point.name]\n except KeyError:\n result[point.name] = ''\n return result\n\n def do_dump(self, arg):\n \"\"\"Print function test. Command syntax is: dump \"\"\"\n correct_id = None\n if not arg:\n print('\\tNo function test name given.\\n')\n print('\\tCorrect usage is: send ')\n else:\n try:\n function_tests = FunctionTest.objects.filter(name=arg)\n if function_tests.count() > 1:\n print(\"\\tThere is more than one function test with name {}\".format(arg))\n print(\"\\tWhich function does this function test belong to?\")\n for function_id in function_tests.values_list('function_id', flat=True):\n print('\\t\\t{}'.format(function_id))\n correct_id = input(\"Choose function ID: \")\n try:\n FunctionTest.objects.get(function_id=correct_id, name=arg)\n except FunctionTest.DoesNotExist:\n print('\\nCannot find a function test with ID: {} and name: {}'.format(correct_id, arg))\n return\n func_test_dict = get_function_test_dict_from_db(arg, func_id=correct_id)\n\n print(json.dumps(self.order_function_test_dict(func_test_dict), indent=2))\n except FunctionTest.DoesNotExist:\n print(\"\\tNo function test with name {} exists.\".format(arg))\n\n def format_output(self, statuses, function_id):\n \"\"\"\n Correctly assemble and order JSON representation of function test to print to the screen.\n :param statuses:\n :param function_id:\n :return:\n \"\"\"\n\n func_step_names = [step.point_def.name for step in self.function_definitions.function_for_id(function_id).steps]\n output = dict()\n array_points = defaultdict(list)\n array_head_points = []\n\n for k, v in statuses.items(): # k = index, v = dictionary containing 'group' and 'status'\n point_def = self.point_definitions.for_group_and_index(int(v['group']), int(k))\n if point_def.is_array and not point_def.is_array_head_point:\n inner_dict = dict()\n inner_dict['group'] = v['group']\n inner_dict['status'] = v['status']\n inner_dict['index'] = int(k)\n array_points[point_def.array_element_name].append(inner_dict)\n if point_def.is_array and point_def.is_array_head_point:\n first_point = point_def.array_points[0]['name']\n inner_dict = dict()\n inner_dict['group'] = v['group']\n inner_dict['status'] = v['status']\n inner_dict['index'] = int(k)\n array_points[first_point].append(inner_dict)\n if point_def.is_array_head_point:\n array_head_points.append(point_def)\n\n output[point_def.name] = v\n\n for key in array_points.keys():\n array_points[key] = sorted(array_points[key], key=lambda point: point['index'])\n\n for array_head_point in array_head_points:\n array_point_names = [a['name'] for a in array_head_point.array_points]\n array_point_list = list()\n for i in range(0, array_head_point.array_times_repeated):\n inner_dict = OrderedDict()\n for array_point_name in array_point_names:\n try:\n inner_dict[array_point_name] = array_points[array_point_name][i]\n except IndexError:\n pass\n if inner_dict:\n array_point_list.append(inner_dict)\n output[array_head_point.name] = array_point_list\n\n # Order the steps\n output_result = OrderedDict()\n for step_name in func_step_names:\n output_result[step_name] = output[step_name]\n\n return output_result\n\n def do_server_settings(self, arg):\n \"\"\"View server connection settings. Command syntax is: server_settings\"\"\"\n print('\\tCurrent server IP address is: {}'.format(str(current_server_ip)))\n print('\\tCurrent server port is: {}'.format(str(current_server_port)))\n\n def do_configure_server(self, arg):\n \"\"\"Change server connection settings. Command syntax is: configure_server \"\"\"\n global current_server_ip, current_server_port\n if not arg:\n print('\\tNo server connection settings given.\\n')\n print('\\tCorrect usage is: configure_server ')\n else:\n args = arg.split(' ')\n if len(args) < 2:\n print('\\tMust enter server ip_address and port.')\n print('\\tCorrect usage is: configure_server ')\n elif len(args) > 2:\n print('\\tToo many arguments given.')\n print('\\tCorrect usage is: configure_server ')\n else:\n current_server_ip, current_server_port = args[0], args[1]\n print('\\tNow using IP address {} and port {} to connect to master'.format(str(args[0]), str(args[1])))\n\n def do_outstation_settings(self, arg):\n \"\"\"View outstation connection settings. Command syntax is: outstation_settings\"\"\"\n print('\\tCurrent outstation IP address is: {}'.format(str(current_outstation_ip)))\n print('\\tCurrent outstation port is: {}'.format(str(current_outstation_port)))\n\n def do_configure_outstation(self, arg):\n \"\"\"Change outstation connection settings. Command syntax is: configure_outstation \"\"\"\n if not arg:\n print('\\tNo outstation connection settings given.\\n')\n print('\\tCorrect usage is: configure_outstation ')\n else:\n args = arg.split(' ')\n if len(args) < 2:\n print('\\tMust enter server ip_address and port.')\n print('\\tCorrect usage is: configure_outstation ')\n elif len(args) > 2:\n print('\\tToo many arguments given.')\n print('\\tCorrect usage is: configure_outstation ')\n else:\n current_outstation_ip, current_outstation_port = args[0], args[1]\n print('\\tNow using IP address {} and port {} to connect to outstation'.format(str(args[0]),\n str(args[1])))\n\n def do_ping_master(self, arg):\n \"\"\"Check connection to to DNP3 Master. Command syntax is: ping_master\"\"\"\n data = {'ip': current_server_ip,\n 'port': current_server_port}\n\n result = MC.get_client().get_master_conn_status(current_server_ip, current_server_port, data)\n\n if type(result) == bool:\n print(\"\\tSuccessfully reached master.\")\n else:\n print('\\tError: ' + result)\n\n def do_quit(self, line):\n \"\"\"Quit the command line interface. Command syntax is: quit\"\"\"\n MesaMasterApplication.get_app().shutdown()\n self.application.shutdown()\n exit()\n\n\ndef main():\n cmd_interface = MesaMasterCmd()\n _log.debug('Initialization complete. In command loop.\\n')\n cmd_interface.startup()\n _log.debug('Exiting.')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mesaserver/mesaserver/scripts/mesa_master_cmd.py","file_name":"mesa_master_cmd.py","file_ext":"py","file_size_in_byte":19661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338077642","text":"#!/usr/bin/python -uB\n# -*- coding: utf-8 -*-\n\n# Classes of methods\nu_vers = ['Unstructured']\nbase_vers = ['TransE', 'ScalE']\nscaltrans_vers = ['ScalTransE']\nxi_vers = ['XiTransE', 'XiScalE']\nsemixi_vers = ['XiScalTransSE', 'XiTransScalSE']\nxiscaltrans_vers = ['XiScalTransE']\n\nsimple_method_set = base_vers + xi_vers\n\nsim_set = ['L1', 'L2', 'dot']\nu_sim_set = ['L2_sqr']\n\nmargin_set = [1, 2, 5, 10]\nndim_set = [20, 50, 100, 200, 300]\nnhid_set = [20, 50, 100, 200, 300]\n\nepochs = 100\nnbatches = 10\nlr = 0.1\nseed = 123\n\ntrain_path = 'data/fb15k/FB15k-train.pkl'\nvalid_path = 'data/fb15k/FB15k-valid.pkl'\ntest_path = 'data/fb15k/FB15k-test.pkl'\n\n# ADAGRAD\n# def adagrad(param, rate, epsilon, gradient, updates, param_squared_gradients):\nc, method = 0, 'ADAGRAD'\n\n# def adagrad(param, rate, epsilon, gradient, updates, param_squared_gradients):\ncmd_adagrad = ('./learn_parameters.py --seed=%d --strategy=%s --totepochs=%d --test_all=%d --lr=%f --name=fb15k/fb15k_%s_%d '\n ' --train=%s --valid=%s --test=%s --nbatches=%d --no_rescaling --filtered '\n ' --op=%s --sim=%s --ndim=%d --nhid=%d --margin=%d' # varying params\n ' > logs/fb15k/fb15k.%s_%s_%d_%d_%d_%d.log 2>&1')\n\n\nfor op in simple_method_set:\n for sim in sim_set:\n for ndim in ndim_set:\n nhid = ndim\n for margin in margin_set:\n print(cmd_adagrad % (seed, method, epochs, epochs, lr, op, c, train_path, valid_path, test_path, nbatches, op, sim, ndim, nhid, margin, op, sim, ndim, nhid, margin, c))\n c += 1\n\nfor op in u_vers:\n for sim in u_sim_set:\n for ndim in ndim_set:\n nhid = ndim\n for margin in margin_set:\n print(cmd_adagrad % (seed, method, epochs, epochs, lr, op, c, train_path, valid_path, test_path, nbatches, op, sim, ndim, nhid, margin, op, sim, ndim, nhid, margin, c))\n c += 1\n","sub_path":"scripts/fb15k/fb15k_v1.py","file_name":"fb15k_v1.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645462832","text":"from django.test import TestCase\nfrom datetime import datetime, date, time\nfrom api import serializers\nfrom core import models\n\n\nclass BillRecordSerializerTestCase(TestCase):\n\n def test_duration_representation(self):\n period = datetime(2019, 1, 1,)\n bill = models.Bill.objects.create(\n telephone='9999999999',\n period=period\n )\n record = models.BillRecord.objects.create(\n bill=bill,\n destination='99888888888',\n start_date=date(2019, 1, 1),\n start_time=time(0, 0),\n duration=60,\n price=1.75\n )\n serializer = serializers.BillRecordSerializer(record)\n\n self.assertEqual(serializer.data['duration'], '0h1m0s')\n\n record = models.BillRecord.objects.create(\n bill=bill,\n destination='99888888888',\n start_date=date(2019, 1, 1),\n start_time=time(0, 0),\n duration=4493,\n price=1.75\n )\n serializer = serializers.BillRecordSerializer(record)\n\n self.assertEqual(serializer.data['duration'], '1h14m53s')\n\n\nclass BillSerializerTestCase(TestCase):\n\n def test_period_representation(self):\n period = datetime(2019, 1, 1)\n self.bill = models.Bill.objects.create(\n telephone='9999999999',\n period=period.date()\n )\n serializer = serializers.BillSerializer(self.bill)\n self.assertEqual(serializer.data['period'], '2019-01')\n","sub_path":"src/api/tests/test_serializers.py","file_name":"test_serializers.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"550480215","text":"import ssl\nimport urllib.request\nimport pandas as pd\nimport csv\nfrom Constants import *\nfrom util_logging import *\nimport socket\n\ndef getImage(id, indexOf, totalData, fromIndex, fromTotal):\n\tis_image_created = False\n\tcontext = ssl._create_unverified_context()\n\tssl._create_default_https_context = ssl._create_unverified_context\n\tsocket.setdefaulttimeout(60)\n\tproxy = urllib.request.ProxyHandler({})\n\topener = urllib.request.build_opener(proxy)\n\topener.addheaders = [('User-Agent','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/603.1.30 (KHTML, like Gecko) Version/10.1 Safari/603.1.30')]\n\turllib.request.install_opener(opener)\n\n\tfull_path = BASE_HTML_PATH + \"caleg/images/\" + id + \".jpg\"\n\n\ttry:\n\t\tprint(URL_IMAGE_CALEG.format(id))\n\n\t\turllib.request.urlretrieve(URL_IMAGE_CALEG.format(id), full_path)\n\t\tmessage_string = \"–––– creating image Caleg DPRD PROV :: ({}/{}) from ({}/{}) - idCaleg={} created ––––\".format(indexOf, totalData, fromIndex, fromTotal, id)\n\t\tis_image_created = True\n\texcept Exception as e:\n\t\tmessage_string = \"ERROR !!! Requesting IMAGE Caleg DPRD PROV :: ({}/{}) from ({}/{}) - idCaleg={} –– {}\".format(indexOf, totalData, fromIndex, fromTotal, id, str(e))\n\t\tis_image_created = False\n\n\tprint(message_string)\n\tpecker(LOG_IMAGE_CALEG, message_string)\n\treturn is_image_created\n\ndef byIndexOf(indexOf):\n\ttotal_data_generated = 0\n\tdata_generated = 0\n\tdata_error = 0\n\n\ttry:\n\t\tINDEX_OF = int(indexOf)\n\texcept Exception as e:\n\t\treturn print(\"INVALID INPUT, SMARTASS! ––{}\".format(str(e)))\n\t\t\n\tmessage_string = \"===== START :: APPENDING IMAGE OF CALEG DPRD PROV ––BY START FROM INDEX OF={}=====\".format(INDEX_OF)\n\tprint(message_string)\n\tpecker(LOG_IMAGE_CALEG, message_string)\n\n\tcsv_file = 'data_caleg_prov.csv'\n\n\ttry:\n\t\tdf = pd.read_csv(csv_file)\n\t\tid_list = df['id'].tolist()\n\t\tfor i,data in enumerate(id_list):\n\t\t\tif ((i+1) >= INDEX_OF):\n\t\t\t\tif (getImage(str(data), str((i + 1) - INDEX_OF + 1), str(len(id_list) - INDEX_OF + 1), str(i + 1), str(len(id_list)))):\n\t\t\t\t\tdata_generated += 1\n\t\t\t\telse:\n\t\t\t\t\tdata_error += 1\n\t\ttotal_data_generated = data_generated + data_error\n\t\tmessage_string = \"===== END :: APPENDING IMAGE OF CALEG DPRD PROV ––BY START FROM INDEX OF={}:: generated={}, error={}, appended={} data appended from {} total, =====\".format(str(INDEX_OF), data_generated, data_error, total_data_generated, len(id_list))\n\texcept Exception as e:\n\t\tmessage_string = \"ERROR !!! failed generating Caleg DPRD PROV ––BY START FROM INDEX OF={} –– {}\".format(INDEX_OF, str(e))\n\n\tprint(message_string)\n\tpecker(LOG_IMAGE_CALEG, message_string)\n\nTYPE_APPEND = input(\"1 - Single id\\n 2 - From indexOf\\nEnter appending type : \")\n\nif(TYPE_APPEND == '1'):\n\ttotal_data_generated = 0\n\tdata_generated = 0\n\tdata_error = 0\n\n\tprint(\"APPENDING SINGLE ID\")\n\n\tID_CALEG = input(\"Enter idCaleg\")\n\n\tmessage_string = \"===== START :: APPENDING IMAGE OF CALEG DPRD PROV ––BY SINGLE ID idCaleg={}=====\".format(ID_CALEG)\n\tprint(message_string)\n\tpecker(LOG_IMAGE_CALEG, message_string)\n\n\tif (getImage(ID_CALEG, '1', '1', 'SINGLE', 'SINGLE')):\n\t\tdata_generated += 1\n\telse:\n\t\tdata_error = 0\n\n\ttotal_data_generated = data_generated + data_error\n\tmessage_string = \"===== END :: APPENDING IMAGE OF CALEG DPRD PROV ––BY SINGLE ID idCaleg={}:: generated={}, error={}, total={} data appended, =====\".format(ID_CALEG, data_generated, data_error, total_data_generated)\n\tprint(message_string)\n\tpecker(LOG_IMAGE_CALEG, message_string)\nelif (TYPE_APPEND == '2'):\n\tprint (\"APPENDING BY INDEXOF\")\n\n\tINDEX_OF = input(\"Enter start from index of = \")\n\tbyIndexOf(INDEX_OF)\nelse:\n\tprint(\"WRONG INPUT, SMARTASS!\")\n\n\n\n","sub_path":"appending_image_caleg_prov.py","file_name":"appending_image_caleg_prov.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444932039","text":"# command line example:\n# python ps_report.py --subject=R1056M --task=FR1 --workspace-dir=~/scratch/py_9 --matlab-path=~/eeg --matlab-path=~/matlab/beh_toolbox --matlab-path=~/RAM/RAM_reporting --matlab-path=~/RAM/RAM_sys2Biomarkers --python-path=~/RAM_UTILS_GIT\n\n# python ps_report.py --subject=R1056M --task=FR1 --workspace-dir=/data10/scratch/mswat/py_run_9 --matlab-path=~/eeg --matlab-path=~/matlab/beh_toolbox --matlab-path=~/RAM/RAM_reporting --matlab-path=~/RAM/RAM_sys2Biomarkers --python-path=~/RAM_UTILS_GIT\n\n# python ps_report.py --subject=R1086M --task=FR1 --workspace-dir=/data10/scratch/mswat/R1086M_2 --matlab-path=~/eeg --matlab-path=~/matlab/beh_toolbox --matlab-path=~/RAM/RAM_reporting --matlab-path=~/RAM/RAM_sys2Biomarkers --matlab-path=~/RAM_UTILS_GIT/tests/ps2_report/AuxiliaryMatlab --python-path=~/RAM_UTILS_GIT\nimport sys\nfrom os.path import *\n\n# sys.path.append(join(dirname(__file__),'..','..'))\n\nfrom ReportUtils import CMLParser,ReportPipeline\n\nimport numpy as np\n\n\ncml_parser = CMLParser(arg_count_threshold=1)\ncml_parser.arg('--subject','R1156D')\ncml_parser.arg('--task','RAM_FR1')\ncml_parser.arg('--workspace-dir','/scratch/busygin/FR1_ppc')\ncml_parser.arg('--mount-point','')\n#cml_parser.arg('--recompute-on-no-status')\n# cml_parser.arg('--exit-on-no-change')\n\n\nargs = cml_parser.parse()\n\n\nfrom FR1EventPreparation import FR1EventPreparation\n\nfrom MontagePreparation import MontagePreparation\n\nfrom ComputeFR1Wavelets import ComputeFR1Wavelets\n\nfrom ComputePPCFeatures import ComputePPCFeatures\n\nfrom ComputeOutsamplePPCFeatures import ComputeOutsamplePPCFeatures\n\nfrom ComputeTTest import ComputeTTest\n\nfrom ComputeClassifier import ComputeClassifier\n\n\n# turn it into command line options\n\nclass Params(object):\n def __init__(self):\n self.width = 5\n\n self.fr1_start_time = 0.0\n self.fr1_end_time = 1.366\n self.fr1_buf = 1.365\n\n self.filt_order = 4\n\n self.freqs = np.logspace(np.log10(3), np.log10(180), 8)\n #self.freqs = np.array([180.0])\n\n self.penalty_type = 'l2'\n self.C = 7.2e-4\n\n self.n_perm = 200\n\n\nparams = Params()\n\n\n\n# sets up processing pipeline\nreport_pipeline = ReportPipeline(subject=args.subject, task=args.task,experiment=args.task,\n workspace_dir=join(args.workspace_dir,args.subject), mount_point=args.mount_point, exit_on_no_change=args.exit_on_no_change,\n recompute_on_no_status=args.recompute_on_no_status)\n\n\nreport_pipeline.add_task(FR1EventPreparation(mark_as_completed=False))\n\nreport_pipeline.add_task(MontagePreparation(params, mark_as_completed=True))\n\nreport_pipeline.add_task(ComputeFR1Wavelets(params=params, mark_as_completed=True))\n\nreport_pipeline.add_task(ComputePPCFeatures(params=params, mark_as_completed=True))\n\nreport_pipeline.add_task(ComputeOutsamplePPCFeatures(params=params, mark_as_completed=True))\n\n#report_pipeline.add_task(ComputeTTest(params=params, mark_as_completed=False))\n\nreport_pipeline.add_task(ComputeClassifier(params=params, mark_as_completed=False))\n\n\n# starts processing pipeline\nreport_pipeline.execute_pipeline()\n","sub_path":"tests/ppc_classifier/ppc_classifier.py","file_name":"ppc_classifier.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18966492","text":"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport math\nimport torch\nimport gensim\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch import optim\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence\nfrom torch.autograd import Variable\n\n\nclass Hred(nn.Module):\n OPTIM_OPTS = {\n 'adadelta': optim.Adadelta,\n 'adagrad': optim.Adagrad,\n 'adam': optim.Adam,\n 'adamax': optim.Adamax,\n 'asgd': optim.ASGD,\n 'lbfgs': optim.LBFGS,\n 'rmsprop': optim.RMSprop,\n 'rprop': optim.Rprop,\n 'sgd': optim.SGD,\n }\n\n RNN_OPTS = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}\n\n def __init__(self, opt, num_features, start_idx, padding_idx=0, longest_label=1):\n super().__init__()\n self.opt = opt\n\n self.longest_label = longest_label\n\n self.register_buffer('START', torch.LongTensor(start_idx))\n # get index of null token from dictionary (probably 0)\n self.NULL_IDX = padding_idx\n self.END = 1\n\n # store important params directly\n hsz = opt['hiddensize']\n emb = opt['embeddingsize']\n self.hidden_size = hsz\n self.emb_size = emb\n self.num_layers = opt['numlayers']\n self.learning_rate = opt['learning_rate']\n self.truncate = opt['truncate']\n self.attention = opt['attention']\n self.dirs = 2 if opt['bi_encoder'] else 1\n if type(opt['gpu']) is str:\n self.gpu = [int(index) for index in opt['gpu'].split(',')]\n else:\n self.gpu = [opt['gpu']]\n\n # set up tensors\n self.zeros_decs = {}\n\n rnn_class = Hred.RNN_OPTS[opt['encoder']]\n self.encoder = rnn_class(opt['embeddingsize'], opt['hiddensize'],\n opt['numlayers'], bidirectional=opt['bi_encoder'],\n dropout=opt['dropout'])\n\n if opt['context'] != 'same':\n rnn_class = Hred.RNN_OPTS[opt['context']]\n\n dec_isz = opt['embeddingsize']\n\n self.context = rnn_class(\n opt['numlayers'] * opt['hiddensize'], opt['contexthiddensize'],\n opt['numlayers'], dropout=opt['dropout'])\n\n if opt['decoder'] != 'same':\n rnn_class = Hred.RNN_OPTS[opt['decoder']]\n\n self.decoder = rnn_class(\n dec_isz, opt['hiddensize'], opt['numlayers'], dropout=opt['dropout'])\n\n self.lt = nn.Embedding(\n num_features, opt['embeddingsize'], padding_idx=self.NULL_IDX)\n if opt['embed'] is not None:\n self.load_pretrained()\n\n if 'psize' not in opt:\n opt['psize'] = opt['embeddingsize']\n\n if opt['hiddensize'] == opt['psize']:\n self.o2e = lambda x: x\n else:\n self.o2e = nn.Linear(opt['hiddensize'], opt['psize'])\n\n self.ch2h = nn.Linear(\n self.num_layers * opt['contexthiddensize'],\n self.num_layers * opt['hiddensize'])\n self.tanh = nn.Tanh()\n\n share_output = opt['lookuptable'] in ['dec_out', 'all'] and \\\n opt['psize'] == opt['embeddingsize']\n shared_weight = self.lt.weight if share_output else None\n self.e2s = Linear(opt['psize'], num_features,\n bias=False, shared_weight=shared_weight)\n self.dropout = nn.Dropout(opt['dropout'])\n\n self.use_attention = False\n\n self.episode_concat = opt['episode_concat']\n self.training = True\n self.generating = False\n self.local_human = False\n\n if opt.get('max_seq_len') is not None:\n self.max_seq_len = opt['max_seq_len']\n else:\n self.max_seq_len = opt['max_seq_len'] = 50\n\n def load_pretrained(self):\n model = gensim.models.word2vec.Word2Vec.load(self.opt['embed']).wv\n std = model.vectors.std().item()\n n_unk = 0\n for i in range(len(self.lt.weight)):\n if i == 0:\n self.lt.weight.data[i].zero_()\n else:\n word = self.opt['dict'].vec2txt([i])\n\n try:\n self.lt.weight.data[i] = torch.from_numpy(model[word])\n except KeyError:\n n_unk += 1\n self.lt.weight.data[i].normal_(0, std)\n print('unk_num: {}'.format(n_unk))\n\n def cuda(self):\n if len(self.gpu) > 1:\n self.START = self.START.cuda(self.gpu[0])\n self.lt.cuda(self.gpu[0])\n self.encoder.cuda(self.gpu[0])\n if len(self.gpu) == 4:\n self.context.cuda(self.gpu[1])\n self.ch2h.cuda(self.gpu[1])\n self.decoder.cuda(self.gpu[2])\n self.dropout.cuda(self.gpu[2])\n if type(self.o2e) is nn.Linear:\n self.o2e.cuda(self.gpu[2])\n self.e2s.cuda(self.gpu[3])\n else:\n self.context.cuda(self.gpu[0])\n self.ch2h.cuda(self.gpu[0])\n self.decoder.cuda(self.gpu[1])\n self.dropout.cuda(self.gpu[1])\n if type(self.o2e) is nn.Linear:\n self.o2e.cuda(self.gpu[1])\n self.e2s.cuda(self.gpu[-1])\n else:\n super().cuda()\n\n def zeros(self, device_id):\n # if device_id in self.zeros_decs:\n # ret = self.zeros_decs[device_id]\n # else:\n ret = torch.zeros(1, 1, 1).cuda(device_id)\n # self.zeros_decs[device_id] = ret\n\n return ret\n\n def _encode(self, xs, xlen, dropout=False):\n \"\"\"Call encoder and return output and hidden states.\"\"\"\n encoder_device = next(self.encoder.parameters()).get_device()\n batchsize = len(xs)\n\n # first encode context\n xes = self.lt(xs).transpose(0, 1)\n\n zeros = self.zeros(encoder_device)\n if list(zeros.size()) != [self.dirs * self.num_layers,\n batchsize, self.hidden_size]:\n zeros.resize_(self.dirs * self.num_layers,\n batchsize, self.hidden_size).fill_(0)\n hidden = Variable(zeros, requires_grad=False)\n\n xlen, idx = xlen.sort(descending=True)\n zero_len = (xlen == -1).nonzero()\n hidden = hidden.index_select(1, idx)\n if len(zero_len):\n first_zero_idx = zero_len[0].item()\n xes = xes.index_select(1, idx[:first_zero_idx])\n xes = pack_padded_sequence(\n xes, (xlen[:first_zero_idx] + 1).data.cpu().numpy())\n hidden, hidden_left = hidden.split(\n [first_zero_idx, batchsize - first_zero_idx], 1)\n else:\n xes = xes.index_select(1, idx)\n xes = pack_padded_sequence(xes, (xlen + 1).data.cpu().numpy())\n\n # self.encoder.flatten_parameters()\n _, hidden = self.encoder(xes, hidden.contiguous())\n\n hidden = hidden.view(\n -1, self.dirs,\n batchsize - len(zero_len), self.hidden_size).max(1)[0]\n\n if len(zero_len):\n hidden = torch.cat((hidden, hidden_left), 1)\n\n undo_idx = idx.clone()\n for i in range(len(idx)):\n undo_idx[idx[i]] = i\n\n hidden = hidden.index_select(1, undo_idx)\n\n return hidden\n\n def _context(self, hidden, context_hidden):\n batchsize = hidden.size(1)\n context_device = next(self.context.parameters()).get_device()\n\n hidden = hidden.transpose(0, 1).contiguous().view(1, batchsize, -1)\n\n if len(self.gpu) > 1:\n hidden = hidden.cuda(context_device)\n\n if context_hidden is None:\n zeros = self.zeros(context_device)\n if list(zeros.size()) != [self.dirs * self.num_layers, batchsize,\n self.opt['contexthiddensize']]:\n zeros.resize_(self.dirs * self.num_layers,\n batchsize, self.opt['contexthiddensize']).fill_(0)\n context_hidden = Variable(zeros, requires_grad=False)\n\n _, context_hidden = self.context(hidden, context_hidden)\n hidden = context_hidden.transpose(0, 1).contiguous().view(batchsize, -1)\n\n hidden = self.tanh(self.ch2h(hidden).view(\n batchsize, self.num_layers, -1).transpose(0, 1))\n\n return hidden, context_hidden\n\n def _decode(self, batchsize, output, ys, hidden):\n decoder_device = next(self.decoder.parameters()).get_device()\n lt_device = next(self.lt.parameters()).get_device()\n # update the model based on the labels\n scores = []\n if len(self.gpu) > 1:\n output = output.cuda(decoder_device)\n hidden = hidden.cuda(decoder_device)\n\n preds = []\n if ys is None:\n done = [False] * batchsize\n total_done = 0\n max_len = 0\n while total_done < batchsize and max_len < self.longest_label:\n # keep producing tokens until we hit END or max length for each\n output, hidden = self.decoder(output, hidden)\n pred, score = self.hidden_to_idx(output, dropout=self.training)\n preds.append(pred)\n scores.append(score)\n\n if len(self.gpu) > 1:\n pred = pred.cuda(lt_device)\n\n output = self.lt(pred).unsqueeze(0)\n\n if len(self.gpu) > 1:\n output = output.cuda(decoder_device)\n\n max_len += 1\n for b in range(batchsize):\n if not done[b]:\n # only add more tokens for examples that aren't done yet\n if pred.data[b] == self.END:\n # if we produced END, we're done\n done[b] = True\n total_done += 1\n else:\n # keep track of longest label we've ever seen\n self.longest_label = max(self.longest_label, ys.size(1))\n\n for i in range(ys.size(1)):\n output, hidden = self.decoder(output, hidden)\n pred, score = self.hidden_to_idx(output, dropout=self.training)\n preds.append(pred)\n scores.append(score)\n y = ys.select(1, i)\n if len(self.gpu) > 1:\n y = y.cuda(lt_device)\n\n output = self.lt(y).unsqueeze(0)\n\n if len(self.gpu) > 1:\n output = output.cuda(decoder_device)\n preds = torch.stack(preds, 1)\n\n return scores, preds\n\n def hidden_to_idx(self, hidden, dropout=False):\n \"\"\"Convert hidden state vectors into indices into the dictionary.\"\"\"\n e2s_device = next(self.e2s.parameters()).get_device()\n if hidden.size(0) > 1:\n raise RuntimeError('bad dimensions of tensor:', hidden)\n hidden = hidden.squeeze(0)\n if dropout:\n hidden = self.dropout(hidden) # dropout over the last hidden\n scores = self.o2e(hidden)\n if len(self.gpu) > 2:\n scores = scores.cuda(e2s_device)\n scores = self.e2s(scores)\n scores = F.log_softmax(scores, 1)\n _max_score, idx = scores.max(1)\n return idx, scores\n\n def forward(self, xses, dropout, xlen_ts, ys):\n batchsize = len(xses[0])\n\n context_hidden = None\n for idx in range(0, len(xses)):\n hidden = self._encode(xses[idx], xlen_ts[idx], dropout)\n hidden, context_hidden = self._context(hidden, context_hidden)\n\n x = Variable(self.START, requires_grad=False)\n xe = self.lt(x).unsqueeze(1)\n dec_xes = xe.expand(xe.size(0), batchsize, xe.size(2))\n\n scores, preds = self._decode(batchsize, dec_xes, ys, hidden)\n\n return scores, preds\n\n\nclass Linear(nn.Module):\n \"\"\"Custom Linear layer which allows for sharing weights (e.g. with an\n nn.Embedding layer).\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True,\n shared_weight=None):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.shared = shared_weight is not None\n\n # init weight\n if not self.shared:\n self.weight = Parameter(torch.Tensor(out_features, in_features))\n else:\n if (shared_weight.size(0) != out_features or\n shared_weight.size(1) != in_features):\n raise RuntimeError('wrong dimensions for shared weights')\n self.weight = shared_weight\n\n # init bias\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.weight.size(1))\n if not self.shared:\n # weight is shared so don't overwrite it\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, input):\n weight = self.weight\n if self.shared:\n # detach weight to prevent gradients from changing weight\n # (but need to detach every time so weights are up to date)\n weight = weight.detach()\n return F.linear(input, weight, self.bias)\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ')'\n","sub_path":"parlai/agents/hred/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":12274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371506417","text":"# Work on Libraries like csv, xlwt, xlrd, JSON.\r\n\r\nfrom xlrd import open_workbook\r\n\r\nif __name__ == '__main__':\r\n\r\n\tworkbook = open_workbook('sample_xlsx.xlsx')\r\n\r\n\tfor sheet in workbook.sheets():\r\n\t\tprint()\r\n\t\tprint(sheet.name,\" contents :\")\r\n\t\tfor row in range(sheet.nrows):\r\n\t\t\trow_vals = []\r\n\t\t\tfor col in range(sheet.ncols):\r\n\t\t\t\trow_vals.append(sheet.cell(row,col).value)\r\n\t\t\tprint(\"Row\", row+1, \"contents :\", row_vals)","sub_path":"July_25/Task_read_xlsx.py","file_name":"Task_read_xlsx.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"622811736","text":"#!/usr/bin/python3 -O\n# -*- coding: utf-8 -*-\nimport time\nimport sys\nimport os\nfrom multiprocessing import Process, freeze_support\n\ndef info( title ):\n if hasattr( os, 'getppid' ): # only available on Unix\n print( '{0}:\\tPID={1} PPID={2}'.format( title, os.getpid(), os.getppid() ) )\n else:\n print( '{0}:\\tPID={1}'.format( title, os.getpid() ) )\n\ndef fun( name ):\n info( 'порождённый процесс' )\n print( 'процесс {0} выполняет функцию с параметром {1}'.format( os.getpid(), name ) )\n time.sleep( 0.5 )\n \nif __name__ == '__main__':\n freeze_support()\n nproc = len( sys.argv ) > 1 and int( sys.argv[ 1 ] ) or 3\n print( 'число дочерних процессов ', nproc )\n info( 'родительский процесс' )\n procs = []\n for i in range( nproc ):\n procs.append( Process( target = fun, args = ( i, ) ) )\n for i in range( nproc ):\n procs[ i ].start()\n for i in range( nproc ):\n procs[ i ].join()\n print( 'завершается родительский процесс' )\n","sub_path":"mult.py","file_name":"mult.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"508586703","text":"import pandas as pd\nimport numpy as np\nimport re\n\nfrom shapely import geometry\nimport time\n\nfrom datasetup.utils import *\nimport itertools\n\n\ndef clean(filename):\n \"\"\" Cleans dataset\n\n Parameters\n -----------\n filename: str\n The input file name\n\n Returns\n -----------\n data: dataframe\n Cleaned data\n \"\"\"\n\n checkFileType(filename)\n data = openFile(filename)\n validate(data)\n\n print(data.shape)\n\n time_steps = data['ADV'].values\n regx = re.compile(r'[a-zA-z]$')\n time_steps = [re.sub(regx, '', str(time_step)) for time_step in time_steps]\n data.loc[:, 'ADV'] = time_steps\n\n TCID = data['TCID'].unique()\n data = data.groupby('TCID', sort = False, group_keys = False).apply(\n lambda x: x.drop_duplicates('ADV', keep = 'last'))\n\n status = data['STAT'].values\n status = [stat.replace(' ', '') for stat in status]\n\n print('Successfully cleaned file')\n\n return data\n\n\ndef filterPAR(data):\n \"\"\" Filters data points that passed through PAR\n\n Parameters\n -----------\n data: dataframe\n The dataframe which contains the data\n\n Returns\n -----------\n data: dataframe\n Dataframe which only contains data points that passed through PAR\n \"\"\"\n\n TCID_par = []\n par_poly = geometry.Polygon([(120, 25), (135, 25), (135, 5), (115, 5), (115, 15), (120, 21), (120, 25)])\n start_time = time.time()\n\n for TCID, val in data.groupby('TCID', sort = False):\n trajectory = geometry.MultiPoint(val[['LONGITUDE', 'LATITUDE']].values)\n\n if trajectory.intersects(par_poly):\n TCID_par.append(TCID)\n\n print(\"Run Time: %s seconds\" % (time.time() - start_time))\n\n data = data.loc[data['TCID'].isin(TCID_par)].reset_index(drop = True)\n\n print('Successfully filtered data that passed through PAR \\n')\n\n return data\n\n\ndef getPoints(data, mode):\n \"\"\" Get points based on mode\n\n Parameters\n -----------\n data: dataframe\n The dataframe which contains the data\n mode: str\n Basis of point extraction (if ORIGIN/ENDPOINT)\n\n Returns\n -----------\n data: dataframe\n Dataframe which only contains points based on specified mode\n \"\"\"\n\n start_time = time.time()\n TCID = data.TCID.unique()\n\n if mode == 'ORIGIN':\n data = data.drop(data[data['ADV'] != 1].index)\n elif mode == 'ENDPOINT':\n data = data.groupby(['TCID'], sort=False).last()\n\n print('Run Time: %s seconds' % (time.time() - start_time))\n print('Successfully extracted', mode, 'points')\n\n return data\n\n\ndef normalize(data):\n \"\"\" Normalizes latitude and longitude data points\n\n Parameters\n -----------\n data: dataframe\n The dataframe which contains the data\n\n Returns\n -----------\n data: dataframe\n Dataframe which contains NORMALIZED_LAT and NORMALIZED_LONG fields\n \"\"\"\n\n TCIDs = data['TCID'].unique()\n origin_array = []\n\n origin_array = data.groupby(['TCID'], sort=False)['LATITUDE', 'LONGITUDE'].first().values\n \n origin_dict = dict(zip(TCIDs, origin_array))\n\n for TCID, origin in origin_dict.items():\n if origin.shape[0] == 0:\n continue\n\n latitude = data.loc[data['TCID'] == TCID, 'LATITUDE'].values\n longitude = data.loc[data['TCID'] == TCID, 'LONGITUDE'].values\n \n norm_lat = [np.round(lat - origin[0,0], 2) for lat in latitude]\n norm_long = [np.round(lon - origin[0,1], 2) for lon in longitude]\n \n data.loc[data['TCID'] == TCID, 'NORMALIZED_LATITUDE'] = norm_lat\n data.loc[data['TCID'] == TCID, 'NORMALIZED_LONGITUDE'] = norm_long\n\n return data\n\n\ndef checkTimeConsistency(data):\n \"\"\" Checks if time step follows 6-hr interval\n\n Parameters\n -----------\n data: dataframe\n The dataframe which contains the data\n\n Returns\n -----------\n \"\"\"\n\n times = data['TIME'].values\n times = [t.split('/')[2] for t in times]\n times = [re.sub('[a-zA-z]$', '', t) for t in times]\n\n data.loc[:, 'TIME_INTERVAL'] = times\n\n TCID = data['TCID'].unique()\n counter = 0\n print(len(data['TCID'].unique()))\n\n for TC in TCID:\n time_interval = data.loc[data['TCID'] == TC, 'TIME_INTERVAL'].values\n time_interval = time_interval.astype(float)\n origin = time_interval[0]\n temp = origin\n\n for t in time_interval:\n flag = 0\n if temp > 23.:\n temp = 0.\n if t != temp:\n flag = 1\n break\n\n temp = temp + 6\n\n if flag == 1:\n print(flag, TC)\n data = data.drop(data[data['TCID'] == TC].index)\n\n data.to_csv('imported/Full_Dataset.csv', index = False)\n return\n","sub_path":"datasetup/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":4708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"221592093","text":"from flask import Flask, Blueprint, request, Response\nfrom .models import Courier, CourierType, Region, db, courier_region, IntervalTime, Order, ReadinessStatus, GroupOrder\nfrom .DateTimeHelper import DateTimeHelper\nimport datetime\nfrom .Validation import Validation\nfrom .functions import *\n\nmodule = Blueprint('delivery', __name__)\n\nvalidator = Validation()\n\n\n@module.route('/', methods=['GET'])\ndef index():\n return \"DASHA, PRIVET \"\n\n\n@module.route('/couriers', methods=['POST'])\ndef add_couriers():\n error = validator.is_valid_json_add_courier(request.json)\n if (type(error) == bool and not error) or len(error) != 0:\n return {\n \"validation_error\": {\n \"couriers\": error\n }\n }, 400\n\n added_couriers = []\n for item in request.json[\"data\"]:\n # Если пытаемся повторно кого-то добавить\n helper = db.session.query(Courier).get(item[\"courier_id\"])\n if helper is not None:\n continue\n\n added_couriers.append({\"id\": item[\"courier_id\"]})\n intervals: [IntervalTime] = DateTimeHelper.get_interval_time_list(item[\"working_hours\"])\n courier = Courier(\n courier_id=item[\"courier_id\"],\n courier_type=get_courier_type(item[\"courier_type\"]),\n interval=intervals\n )\n db.session.add(courier)\n db.session.commit()\n\n regions = get_regions_list(item[\"regions\"])\n for region in regions:\n helper = courier_region.insert().values(courier_id=courier.courier_id, region_id=region.region_id)\n db.session.execute(helper)\n db.session.commit()\n\n return {\"couriers\": added_couriers}, 201\n\n\n@module.route('/couriers/', methods=[\"PATCH\"])\ndef edit_courier(courier_id: str):\n if not courier_id:\n return Response(status=400)\n courier = db.session.query(Courier).get(courier_id)\n if courier is None or not validator.is_valid_json_edit_info_courier(request.json):\n return Response(status=400)\n\n json_ = request.json\n for key in json_:\n if key == \"courier_type\":\n courier.courier_type = get_courier_type(json_[key])\n if key == \"working_hours\":\n courier.interval = DateTimeHelper.get_interval_time_list(json_[key])\n if key == \"regions\":\n courier.regions = get_regions_list(json_[key])\n db.session.add(courier)\n db.session.commit()\n\n group = get_group_in_working(courier.group_order)\n\n if group is not None:\n orders = group.orders\n print(orders)\n # new_orders = findSuitableOrderInGroup(courier)\n\n\n\n return get_info_courier(courier), 200\n\n\n@module.route('/orders', methods=[\"POST\"])\ndef add_orders():\n error = validator.is_valid_json_add_orders(request.json)\n if type(error) == bool and not error:\n return {\"validation_error\": {}}, 400\n if len(error) != 0:\n return {\n \"validation_error\": {\n \"orders\": error\n }\n }, 400\n\n added_orders = []\n for item in request.json[\"data\"]:\n # Если пытаемся повторно кого-то добавить\n helper = db.session.query(Order).get(item[\"order_id\"])\n if helper is not None:\n continue\n\n added_orders.append({\"id\": item[\"order_id\"]})\n intervals: [IntervalTime] = DateTimeHelper.get_interval_time_list(item[\"delivery_hours\"])\n region = get_region(item[\"region\"])\n order = Order(\n order_id=item[\"order_id\"],\n weight=item[\"weight\"],\n region=region,\n interval=intervals,\n status=ReadinessStatus.new\n )\n db.session.add(order)\n db.session.commit()\n db.session.commit()\n return {\"orders\": added_orders}, 201\n\n\n@module.route('/orders/assign', methods=[\"POST\"])\ndef orders_assign():\n if not validator.is_valid_json_orders_assign(request.json):\n return {}, 400\n\n courier = db.session.query(Courier).get(request.json[\"courier_id\"])\n if courier is None:\n return {}, 400\n\n # недоделанная выборка у курьера\n group = db.session.query(GroupOrder).filter(GroupOrder.courier == courier.courier_id).\\\n filter(GroupOrder.status == ReadinessStatus.in_working).all()\n\n if len(group) == 0:\n orders = findSuitableOrders(courier)\n assign_time = datetime.datetime.now()\n ans = {\n \"orders\": list_order_to_list_id(orders),\n \"assign_time\": assign_time\n }\n for order in orders:\n order.status = ReadinessStatus.in_working\n group_order = GroupOrder(\n assign_time=assign_time,\n status=ReadinessStatus.in_working,\n courier=courier.courier_id,\n orders=orders\n )\n db.session.add(group_order)\n db.session.commit()\n else:\n orders = db.session.query(Order).filter(Order.group == group[0].id).\\\n filter(Order.status == ReadinessStatus.in_working).all()\n ans = {\n \"orders\": list_order_to_list_id(orders),\n \"assign_time\": group[0].assign_time\n }\n\n return ans, 200\n\n\n@module.route('/orders/complete', methods=[\"POST\"])\ndef order_complete():\n\n data = request.json\n\n if not validator.is_valid_json_order_complete(request.json):\n return {}, 400\n\n order = db.session.query(Order).get(data[\"order_id\"])\n # если заказ не был найден или не был распределен\n if order is None or order.group is None:\n return {}, 400\n\n group = db.session.query(GroupOrder).get(order.group)\n\n # если заказ был распределен на другого курьера\n if group.courier != data[\"courier_id\"]:\n return {}, 400\n\n order.status = ReadinessStatus.ready\n order.finish_time = DateTimeHelper.get_datetime_by_iso_str(data[\"complete_time\"])\n\n db.session.add(order)\n db.session.commit()\n\n if check_is_ready_group_order(group) is True:\n group.status = ReadinessStatus.ready\n\n db.session.add(order)\n db.session.commit()\n\n return {\n \"order_id\": order.order_id\n }, 200\n\n\n# @module.route('/couriers/', methods=[\"GET\"])\n# def get_info_courier(courier_id):\n# return \"GET INFO COURIER #\"\n","sub_path":"app/delivery/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195298601","text":"from gzip import GzipFile\n\nimport click\nfrom dagster import DagsterInstance\nfrom dagster.core.debug import DebugRunPayload\nfrom dagster.core.storage.pipeline_run import PipelineRunStatus, PipelineRunsFilter\n\n\ndef _recent_failed_runs_text(instance):\n lines = []\n runs = instance.get_runs(\n limit=5,\n filters=PipelineRunsFilter(\n statuses=[PipelineRunStatus.FAILURE, PipelineRunStatus.CANCELED]\n ),\n )\n if len(runs) <= 0:\n return \"\"\n for run in runs:\n lines.append(\"{:<50}{:<50}{:<20}\".format(run.run_id, run.pipeline_name, run.status))\n return \"Recently failed runs:\\n{}\".format(\"\\n\".join(lines))\n\n\ndef export_run(instance, run, output_file):\n debug_payload = DebugRunPayload.build(instance, run)\n with GzipFile(output_file, \"wb\") as file:\n click.echo(\"Exporting run_id '{}' to gzip output file {}.\".format(run.run_id, output_file))\n debug_payload.write(file)\n\n\n@click.group(name=\"debug\")\ndef debug_cli():\n \"\"\"\n Commands for debugging Dagster pipeline runs.\n \"\"\"\n\n\n@debug_cli.command(\n name=\"export\", help=\"Export the relevant artifacts for a pipeline run to a file.\"\n)\n@click.argument(\"run_id\", type=str)\n@click.argument(\"output_file\", type=click.Path())\ndef export_command(run_id, output_file):\n\n with DagsterInstance.get() as instance:\n run = instance.get_run_by_id(run_id)\n if run is None:\n raise click.UsageError(\n \"Could not find run with run_id '{}'.\\n{}\".format(\n run_id, _recent_failed_runs_text(instance)\n )\n )\n\n export_run(instance, run, output_file)\n","sub_path":"python_modules/dagster/dagster/cli/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545274638","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom sys import argv\n\nCPI_INDEX = 0\nDCACHE_MISSRATE_INDEX = 1\nICACHE_MISSRATE_INDEX = 2\nL2_MISSRATE_INDEX = 3\n\ndef read_file(file):\n with open(file) as f:\n input_data = f.readlines()[1:]\n raw_data = [i.strip().split() for i in input_data]\n keys, raw_values = [j[0] for j in raw_data], [j[1:] for j in raw_data]\n \n values = [np.array([float(m) for m in k]) for k in raw_values]\n\n parsed_data = {k:v for k,v in zip(keys, values)}\n f.close()\n return parsed_data\n\n\ndef create_plot(title, x_label, data_dict, data_point_index, boundary_line, file_name):\n\n labels = [\"64k L1i size\", \"4-way L1i assoc.\", \"8-way L1i assoc.\", \"32k L1d size\", \\\n \"128k L1d size\", \"4-way L1d assoc.\", \"8-way L1d assoc.\", \"1MB L2 size\", \"4MB L2 size\", \\\n \"2-way L2 assoc.\", \"4-way L2 assoc.\", \"32b line size\", \"128b line size\", \"256b line size\",\n \"256k L1d size\", \"Optimal CPI case\"]\n \n figure, axes = plt.subplots()\n axes.axvline(boundary_line, color='r')\n axes.set_title(title)\n\n axes.spines[\"right\"].set_visible(False)\n axes.spines[\"top\"].set_visible(False)\n \n axes.set_xlabel(x_label)\n results = []\n for key in list(data_dict.keys())[1:]:\n results.append(data_dict[key][data_point_index])\n\n \n bar_positions = np.arange(len(results))\n rects = axes.barh(bar_positions, results)\n axes.set_yticks(bar_positions)\n axes.set_yticklabels(labels)\n\n bound = (max(results) - min(results)) * 0.05\n clamped_min_x = max(0, min(results) - bound)\n axes.set_xlim(clamped_min_x, max(results) + bound)\n\n for rect in rects:\n length = rect.get_width()\n axes.annotate('{}'.format(length),\n xy=(length, rect.get_y() + (rect.get_height() / 2)),\n va='center')\n\n plt.savefig(file_name, bbox_inches='tight')\n axes.clear()\n\nif __name__ == \"__main__\":\n data = read_file(argv[1])\n create_plot(argv[2].capitalize() + \" CPI\", \"CPI (lower is better)\", data, CPI_INDEX, data[argv[2]][CPI_INDEX], argv[2] + \"-cpi.png\")\n create_plot(argv[2].capitalize() + \" L1 data cache overall miss rate\", \"Miss rate (lower is better)\",data, DCACHE_MISSRATE_INDEX, data[argv[2]][DCACHE_MISSRATE_INDEX], argv[2] + \"-dcmiss.png\")\n create_plot(argv[2].capitalize() + \" L1 instr. cache overall miss rate\", \"Miss rate (lower is better)\", data, ICACHE_MISSRATE_INDEX, data[argv[2]][ICACHE_MISSRATE_INDEX], argv[2] + \"-icmiss.png\")\n create_plot(argv[2].capitalize() + \" L2 cache overall miss rate\", \"Miss rate (lower is better)\", data, L2_MISSRATE_INDEX, data[argv[2]][L2_MISSRATE_INDEX], argv[2] + \"-l2miss.png\")\n ","sub_path":"Lab2/SPEC2006-design-exploration/graphs/make_graphs.py","file_name":"make_graphs.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"268626547","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom os.path import abspath, dirname, join\n\nfrom preggy import expect\nfrom mock import patch, Mock, call\nfrom octopus import TornadoOctopus\n\nfrom colorama import Fore, Style\nfrom holmes.worker import HolmesWorker\nfrom holmes.config import Config\nfrom tests.unit.base import ApiTestCase\n\n\nclass MockResponse(object):\n def __init__(self, status_code=200, text=''):\n self.status_code = status_code\n self.text = text\n\n\nclass WorkerTestCase(ApiTestCase):\n root_path = abspath(join(dirname(__file__), '..', '..'))\n\n @patch('uuid.UUID')\n def test_initialize(self, uuid):\n uuid.return_value = Mock(hex='my-uuid4')\n\n worker = HolmesWorker(['-c', join(self.root_path, 'tests/unit/test_worker.conf'), '--concurrency=10'])\n worker.initialize()\n\n expect(worker.uuid).to_equal('my-uuid4')\n\n expect(worker.facters).to_length(1)\n expect(worker.validators).to_length(1)\n\n expect(worker.otto).to_be_instance_of(TornadoOctopus)\n\n def test_config_parser(self):\n worker = HolmesWorker(['-c', join(self.root_path, 'tests/unit/test_worker.conf')])\n\n parser_mock = Mock()\n\n worker.config_parser(parser_mock)\n\n expect(parser_mock.add_argument.call_args_list).to_include(\n call(\n '--concurrency',\n '-t',\n type=int,\n default=10,\n help='Number of threads (or async http requests) to use for '\n 'Octopus (doing GETs concurrently)'\n ))\n\n expect(parser_mock.add_argument.call_args_list).to_include(\n call(\n '--cache',\n default=False,\n action='store_true',\n help='Whether http requests should be cached by Octopus.'\n ))\n\n def test_description(self):\n worker = HolmesWorker(['-c', join(self.root_path, 'tests/unit/test_worker.conf')])\n\n expected = \"%s%sholmes-worker-%s%s\" % (\n Fore.BLUE,\n Style.BRIGHT,\n '',\n Style.RESET_ALL,\n )\n\n expect(worker.get_description()).to_be_like(expected)\n\n def test_config_class(self):\n worker = HolmesWorker(['-c', join(self.root_path, 'tests/unit/test_worker.conf')])\n\n expect(worker.get_config_class()).to_equal(Config)\n","sub_path":"tests/unit/test_worker.py","file_name":"test_worker.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588702770","text":"import sys\nimport os\nimport time\n\nfrom tensorflow.keras.models import load_model as keras_load_model\nimport cv2\nimport numpy as np\nimport matplotlib; matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\n\nimport data\nfrom constants import *\nimport train\nimport model\n\n\ndef softmax(x):\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum()\n\n\ndef single_frame(model_loc):\n single_frame_model = model.single_frame_model()\n single_frame_model.load_weights(model_loc, by_name=True)\n cap = cv2.VideoCapture(0)\n image_size = (IMAGE_WIDTH, IMAGE_HEIGHT)\n while True:\n ret, frame = cap.read()\n frame = cv2.resize(frame, image_size)\n frame = frame.astype(np.float32) / 255.0\n pred = single_frame_model.predict(frame[None, ...])[0]\n \n encoded_frames = list()\n for i in range(40, 45):\n f = pred[..., i]\n encoded_frames.append(f)\n \n encoded_frames = np.array(encoded_frames)\n encoded_frames -= encoded_frames.min()\n encoded_frames /= encoded_frames.max()\n \n for i, f in enumerate(encoded_frames):\n cv2.imshow(f'frame_{i}', f)\n\n if cv2.waitKey(1) == ord('q'):\n break\n \n\ndef main(model_loc):\n fig, ax = plt.subplots()\n ax.set_ylim(0, 1)\n ax.figure.set_size_inches(10, 10)\n bars = ax.bar(data.labels, np.zeros(NUM_CLASSES, dtype=np.float32))\n plt.xticks(rotation=90)\n plt.tight_layout()\n\n num_frames = 7\n frame_time = 1/MIN_FPS * 1000\n \n single_frame_model = model.single_frame_model()\n multi_frame_model = model.multi_frame_model(num_frames=num_frames)\n full_model = model.full_model(single_frame_model, multi_frame_model, num_frames=num_frames + 1)\n full_model.load_weights(model_loc)\n\n cap = cv2.VideoCapture(0)\n\n image_size = (IMAGE_WIDTH, IMAGE_HEIGHT)\n prev = np.zeros((4*6*2048), dtype=np.float32)\n model_input = np.zeros((1, num_frames, 4*6*2048), dtype=np.float32)\n\n def animate(i):\n start = time.time()\n ret, frame = cap.read()\n \n if not ret:\n print(\"Couldn't read input\")\n return\n \n frame = cv2.resize(frame, image_size)\n frame = frame.astype(np.float32) / 255.0\n \n frame_encoded = single_frame_model.predict(frame[None, ...])[0]\n frame_encoded = frame_encoded.reshape(4*6*2048)\n frame_diff = frame_encoded - prev\n prev[:] = frame_encoded\n \n model_input[0, :-1] = model_input[0, 1:]\n model_input[0, -1] = frame_diff\n \n pred = multi_frame_model.predict(model_input)[0]\n \n pred = np.max(pred, axis=0)\n pred = softmax(pred)\n\n for bar, p in zip(bars, pred):\n bar.set_height(p)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(max(1, int((time.time() - start)*1000 - frame_time))) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n exit(0)\n\n if pred.max() > 0.5:\n predictions = pred.argsort()\n print(data.labels[predictions[-1]])\n print(data.labels[predictions[-2]])\n return bars\n\n animation.FuncAnimation(fig, animate, frames=None, interval=1, blit=True)\n plt.show()\n\n\nif __name__ == '__main__':\n if 'single' in sys.argv:\n single_frame(sys.argv[1])\n else:\n main(sys.argv[1])\n","sub_path":"live_test.py","file_name":"live_test.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39414516","text":"#!/usr/bin/env python\n\"\"\"\nopy_main.py\n\"\"\"\nfrom __future__ import print_function\n\nimport cStringIO\nimport hashlib\nimport optparse\nimport os\nimport sys\nimport marshal\n\nfrom . import pytree\nfrom . import skeleton\n\nfrom .pgen2 import driver, parse, pgen, grammar\nfrom .pgen2 import token\nfrom .pgen2 import tokenize\n\nfrom .compiler2 import dis_tool\nfrom .compiler2 import misc\nfrom .compiler2 import transformer\n\n# Disabled for now because byterun imports 'six', and that breaks the build.\nfrom .byterun import execfile\n\nfrom core import args\nfrom core import util\nlog = util.log\n\n\n# From lib2to3/pygram.py. This takes the place of the 'symbol' module.\n# compiler/transformer module needs this.\n\nclass Symbols(object):\n\n def __init__(self, gr):\n \"\"\"\n Creates an attribute for each grammar symbol (nonterminal), whose value is\n the symbol's type (an int >= 256).\n \"\"\"\n for name, symbol in gr.symbol2number.items():\n setattr(self, name, symbol)\n #log('%s -> %d' % (name, symbol))\n # For transformer to use\n self.number2symbol = gr.number2symbol\n\n\ndef HostStdlibNames():\n import symbol\n import token\n names = {}\n for k, v in symbol.sym_name.items():\n names[k] = v\n for k, v in token.tok_name.items():\n names[k] = v\n return names\n\n\ndef WriteGrammar(grammar_path, pickle_path):\n log(\"Generating grammar tables from %s\", grammar_path)\n g = pgen.generate_grammar(grammar_path)\n log(\"Writing grammar tables to %s\", pickle_path)\n try:\n # calls pickle.dump on self.__dict__ after making it deterministic\n g.dump(pickle_path)\n except OSError as e:\n log(\"Writing failed: %s\", e)\n\n\ndef CountTupleTree(tu):\n \"\"\"Count the nodes in a tuple parse tree.\"\"\"\n if isinstance(tu, tuple):\n s = 0\n for entry in tu:\n s += CountTupleTree(entry)\n return s\n elif isinstance(tu, int):\n return 1\n elif isinstance(tu, str):\n return 1\n else:\n raise AssertionError(tu)\n\n\nclass TupleTreePrinter(object):\n def __init__(self, names):\n self._names = names\n\n def Print(self, tu, f=sys.stdout, indent=0):\n ind = ' ' * indent\n f.write(ind)\n if isinstance(tu, tuple):\n f.write(self._names[tu[0]])\n f.write('\\n')\n for entry in tu[1:]:\n self.Print(entry, f, indent=indent+1)\n elif isinstance(tu, int):\n f.write(str(tu))\n f.write('\\n')\n elif isinstance(tu, str):\n f.write(str(tu))\n f.write('\\n')\n else:\n raise AssertionError(tu)\n\n\ndef Options():\n \"\"\"Returns an option parser instance.\"\"\"\n p = optparse.OptionParser()\n\n # NOTE: default command is None because empty string is valid.\n\n # NOTE: In 'opy run oil.pyc -c', -c is an arg to opy, and not a flag.\n\n p.add_option(\n '-c', dest='command', default=None,\n help='Python command to run')\n return p\n\n\n# TODO: more actions:\n# - lex, parse, ast, cfg, compile/eval/repl\n\n# Made by the Makefile.\nPICKLE_REL_PATH = '_build/opy/py27.grammar.pickle'\n\ndef OpyCommandMain(argv):\n \"\"\"Dispatch to the right action.\"\"\"\n\n # TODO: Use core/args.\n #opts, argv = Options().parse_args(argv)\n\n try:\n action = argv[0]\n except IndexError:\n raise args.UsageError('opy: Missing required subcommand.')\n\n if action in ('parse', 'compile', 'eval', 'repl', 'run'):\n loader = util.GetResourceLoader()\n f = loader.open(PICKLE_REL_PATH)\n gr = grammar.Grammar()\n gr.load(f)\n f.close()\n\n # In Python 2 code, always use from __future__ import print_function.\n try:\n del gr.keywords[\"print\"]\n except KeyError:\n pass\n\n symbols = Symbols(gr)\n pytree.Init(symbols) # for type_repr() pretty printing\n transformer.Init(symbols) # for _names and other dicts\n tr = transformer.Transformer()\n else:\n # e.g. pgen2 doesn't use any of these. Maybe we should make a different\n # tool.\n gr = None\n symbols = None\n tr = None\n\n #\n # Actions\n #\n\n if action == 'pgen2':\n grammar_path = argv[1]\n pickle_path = argv[2]\n WriteGrammar(grammar_path, pickle_path)\n\n elif action == 'stdlib-parse':\n # This is what the compiler/ package was written against.\n import parser\n\n py_path = argv[1]\n with open(py_path) as f:\n st = parser.suite(f.read())\n\n tree = st.totuple()\n\n printer = TupleTreePrinter(HostStdlibNames())\n printer.Print(tree)\n n = CountTupleTree(tree)\n log('COUNT %d', n)\n\n elif action == 'lex':\n py_path = argv[1]\n with open(py_path) as f:\n tokens = tokenize.generate_tokens(f.readline)\n for typ, val, start, end, unused_line in tokens:\n print('%10s %10s %-10s %r' % (start, end, token.tok_name[typ], val))\n\n elif action == 'parse':\n py_path = argv[1]\n with open(py_path) as f:\n tokens = tokenize.generate_tokens(f.readline)\n p = parse.Parser(gr, convert=skeleton.py2st)\n parse_tree = driver.PushTokens(p, tokens, gr.symbol2number['file_input'])\n\n if isinstance(parse_tree, tuple):\n n = CountTupleTree(parse_tree)\n log('COUNT %d', n)\n\n printer = TupleTreePrinter(transformer._names)\n printer.Print(parse_tree)\n else:\n tree.PrettyPrint(sys.stdout)\n log('\\tChildren: %d' % len(tree.children), file=sys.stderr)\n\n elif action == 'compile': # 'opyc compile' is pgen2 + compiler2\n py_path = argv[1]\n out_path = argv[2]\n\n with open(py_path) as f:\n co = skeleton.Compile(f, py_path, gr, 'file_input', 'exec')\n\n log(\"Compiled to %d bytes of bytecode\", len(co.co_code))\n\n # Write the .pyc file\n with open(out_path, 'wb') as out_f:\n h = misc.getPycHeader(py_path)\n out_f.write(h)\n marshal.dump(co, out_f)\n\n elif action == 'eval': # Like compile, but parses to a code object and prints it\n py_expr = argv[1]\n f = cStringIO.StringIO(py_expr)\n co = skeleton.Compile(f, '', gr, 'eval_input', 'eval')\n\n v = dis_tool.Visitor()\n v.show_code(co)\n print()\n print('RESULT:')\n print(eval(co))\n\n elif action == 'repl': # Like eval in a loop\n while True:\n py_expr = raw_input('opy> ')\n f = cStringIO.StringIO(py_expr)\n\n # TODO: change this to 'single input'? Why doesn't this work?\n co = skeleton.Compile(f, '', gr, 'eval_input', 'eval')\n\n v = dis_tool.Visitor()\n v.show_code(co)\n print(eval(co))\n\n elif action == 'dis':\n pyc_path = argv[1]\n try:\n report_path = argv[2]\n report_f = open(report_path, 'w')\n except IndexError:\n report_f = sys.stdout\n\n with open(pyc_path, 'rb') as f:\n # TODO: Make this a flag.\n #v = dis_tool.Visitor(dis_bytecode=False)\n v = dis_tool.Visitor()\n #v = dis_tool.Visitor(co_name='_parse')\n v.Visit(f)\n\n v.Report(report_f)\n\n elif action == 'dis-md5':\n pyc_paths = argv[1:]\n if not pyc_paths:\n raise args.UsageError('dis-md5: At least one .pyc path is required.')\n\n for path in pyc_paths:\n h = hashlib.md5()\n with open(path) as f:\n magic = f.read(4)\n h.update(magic)\n ignored_timestamp = f.read(4)\n while True:\n b = f.read(64 * 1024)\n if not b:\n break\n h.update(b)\n print('%6d %s %s' % (os.path.getsize(path), h.hexdigest(), path))\n\n elif action == 'run':\n # TODO: Add an option like -v in __main__\n\n #level = logging.DEBUG if args.verbose else logging.WARNING\n #logging.basicConfig(level=level)\n #logging.basicConfig(level=logging.DEBUG)\n\n # Compile and run, without writing pyc file\n py_path = argv[1]\n opy_argv = argv[1:]\n\n if py_path.endswith('.py'):\n with open(py_path) as f:\n co = skeleton.Compile(f, py_path, gr, 'file_input', 'exec')\n execfile.run_code_object(co, opy_argv)\n\n elif py_path.endswith('.pyc') or py_path.endswith('.opyc'):\n with open(py_path) as f:\n f.seek(8) # past header. TODO: validate it!\n co = marshal.load(f)\n execfile.run_code_object(co, opy_argv)\n\n else:\n raise args.UsageError('Invalid path %r' % py_path)\n\n else:\n raise args.UsageError('Invalid action %r' % action)\n","sub_path":"opy/opy_main.py","file_name":"opy_main.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280776030","text":"from django.core.management.base import BaseCommand\nfrom works.models import WorkMaterial\nfrom works.models import WorksModel\nfrom works.models import WorkSize\nfrom workcategory.models import WorksCategoryModel\n\n\nclass Command(BaseCommand):\n\n help = \"This command creates many users\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--number\", help=\"How many material do you want to create\")\n\n def handle(self, *args, **options):\n arr = [[\"1\", \"Dreamer\", \"oil on canvas\", \"116.8cmX91cm\", \"2016\", \"dreamer.jpg\"],\n [\"1\", \"Poor pure\", \"oil on canvas\",\n \"116.8cmX91cm\", \"2016\", \"poorpure.jpg\"],\n [\"1\", \"사랑은 어디에 있는가\", \"oil on canvas\",\n \"116.8cmX91cm\", \"2016\", \"사랑은어디에있는가.jpg\"],\n [\"1\", \"인생은아름다워\", \"oil on canvas\",\n \"116.8cmX91cm\", \"2016\", \"인생은아름다워.jpg\"],\n [\"1\", \"20대를 기념하기위해 그린 그림\", \"oil on canvas\",\n \"90.9cmX72.7cm\", \"2017\", \"20대를기념하기위해그린그림.JPG\"],\n [\"1\", \"adulthood\", \"oil on canvas\",\n \"91cmX116.8cm\", \"2016\", \"adulthood.jpg\"],\n [\"1\", \"가치있는 빈곤\", \"oil on canvas\",\n \"116.8cmX91cm\", \"2017\", \"가치있는빈곤.JPG\"],\n [\"1\", \"수평의기준\", \"oil on canvas\", \"106cmX41.3cm\", \"2018\", \"수평의기준.jpg\"],\n [\"1\", \"시시한 이유에서의 환희\", \"oil on canvas\",\n \"162.2cmX130.3cm\", \"2018\", \"이유에서의환희.jpg\"]\n ]\n\n for item in arr:\n print(item[2])\n category = WorksCategoryModel.objects.get(pk=item[0])\n material = WorkMaterial.objects.get(name=item[2])\n size = WorkSize.objects.get(name=item[3])\n WorksModel.objects.create(\n year=item[4], category=category, material=material, size=size, worksTitle=item[1])\n self.stdout.write(self.style.SUCCESS(\"Works created!\"))\n","sub_path":"works/management/commands/seed_positive.py","file_name":"seed_positive.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617743542","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\ndef readData(dir):\n data = np.loadtxt(dir, dtype=np.float32, skiprows=1)\n return data\n\ndef dataStats(data, label):\n #plt.figure()\n plt.plot(data, color=\"blue\",label=label, linewidth=0.5)\n plt.legend(loc='upper right')\n plt.title('Pound/Dollar daily exchange rates')\n print('The mean of the ' +label + ' is: ' + str(np.mean(data)))\n print('The stdev of the: ' +label + ' is: ' + str(np.std(data)))\n print('The variance of the: ' +label + ' is: ' + str(np.var(data)))\n plt.show()\n\n# Read the data\nsv_dat = readData(\"Assignment 3/sv.dat\")\n\n# a)\n#Data descriptions: plot/mean/var/etacross\ndataStats(sv_dat, 'returns')\n\n# b)\nxt = np.log((sv_dat - (np.mean(sv_dat)))**2)\ndataStats(xt, 'Log squared demeaned returns')\n\n# c)\n","sub_path":"Assignment 3/3.1.py","file_name":"3.1.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127231570","text":"from exp_base import *\n\n############## choose an experiment ##############\n\ncurrent = 'pwc_builder'\ncurrent = 'pwc_eval'\n\nmod = '\"test0\"'\nmod = '\"t00\"' # compute and show depth\nmod = '\"t01\"' # compute and show gt egoflow\nmod = '\"t02\"' # \nmod = '\"t03\"' # backwarp\nmod = '\"t04\"' # compute and vis the valid mask\nmod = '\"t05\"' # compute and vis stabflow_e\nmod = '\"t06\"' # also show flow_e-stabflow_g\nmod = '\"t07\"' # compute 3d stabilized flow*occ\nmod = '\"t08\"' # show both 3d flows\nmod = '\"t09\"' # backwarp the stabdepth to get a 3d displacement field and vis \nmod = '\"t10\"' # transpose \nmod = '\"t11\"' # *valid\nmod = '\"t12\"' # also alt version\nmod = '\"t13\"' # mask with inbound0 for the 2d vis\nmod = '\"t14\"' # clean v0 to v3\nmod = '\"t15\"' # discover and vis, with v0\nmod = '\"t16\"' # do things at half res, because the coeffs are tuned for that\nmod = '\"t17\"' # do not vis; do compute cumu maps\nmod = '\"t18\"' # do not vis; do compute cumu maps\nmod = '\"t19\"' # compute all cumu maps\nmod = '\"t20\"' # compute all cumu maps; 1k\nmod = '\"t21\"' # include v5, which should be better than v3\n\n\n############## define experiments ##############\n\nexps['pwc_builder'] = [\n 'carla_pwc', # mode\n 'carla_moving_data', # dataset\n # 'kitti_pwc_data', # dataset\n '10_iters',\n 'lr4',\n 'B1',\n 'no_shuf',\n 'no_backprop',\n 'fastest_logging',\n 'include_summs',\n]\nexps['pwc_eval'] = [\n 'carla_pwc', # mode\n 'carla_moving_data', # dataset\n '1k_iters',\n 'B1',\n 'no_shuf',\n 'no_backprop',\n 'fastest_logging',\n 'eval_map',\n]\n\n############## net configs ##############\n\n\n############## datasets ##############\n\n# DHW for mem stuff\nSIZE = 32\nZ = SIZE*4\nY = SIZE*1\nX = SIZE*4\n\nK = 2 # how many objects to consider\n\nS = 2\nH = 128\nW = 384\n# H and W for proj stuff\nPH = int(H/2.0)\nPW = int(W/2.0)\n\ngroups['carla_moving_data'] = [\n 'dataset_name = \"carla\"',\n 'H = %d' % H,\n 'W = %d' % W,\n # 'trainset = \"cabs16i3c0o1t\"',\n 'valset = \"cabs16i3c0o1v\"',\n 'dataset_list_dir = \"/data/carla/npzs\"',\n 'dataset_location = \"/data/carla/npzs\"',\n 'dataset_format = \"npz\"',\n]\ngroups['kitti_pwc_data'] = ['dataset_name = \"kitti\"',\n 'H = %d' % H,\n 'W = %d' % W,\n 'trainset = \"caas2i1c0o1t\"',\n 'dataset_list_dir = \"/projects/katefgroup/datasets/multistage_dyno/kitti/npzs\"',\n 'dataset_location = \"/projects/katefgroup/datasets/multistage_dyno/kitti/npzs\"',\n 'dataset_format = \"npz\"',\n]\n\n############## verify and execute ##############\n\ndef _verify_(s):\n varname, eq, val = s.split(' ')\n assert varname in globals()\n assert eq == '='\n assert type(s) is type('')\n\nprint(current)\nassert current in exps\nfor group in exps[current]:\n print(\" \" + group)\n assert group in groups\n for s in groups[group]:\n print(\" \" + s)\n _verify_(s)\n exec(s) \n\ns = \"mod = \" + mod\n_verify_(s)\n\nexec(s)\n","sub_path":"pytorch_disco_recovery/exp_carla_pwc.py","file_name":"exp_carla_pwc.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419495308","text":"from unittest import TestCase\n\nfrom cnfencoder import Lexer, Token, TokenType\n\n\nclass TestCNF(TestCase):\n def test_valid_formula(self):\n formula = \"(X1 ∧ X22 ∧ X4) → (¬(X3 ↔ ¬ X5) ⊕ X2)\"\n expected_types = [\n TokenType.PARENTHESES_OPEN,\n TokenType.LITERAL,\n TokenType.AND,\n TokenType.LITERAL,\n TokenType.AND,\n TokenType.LITERAL,\n TokenType.PARENTHESES_CLOSE,\n TokenType.IMPLICATION,\n TokenType.PARENTHESES_OPEN,\n TokenType.NEGATION,\n TokenType.PARENTHESES_OPEN,\n TokenType.LITERAL,\n TokenType.EQUIVALENCE,\n TokenType.LITERAL,\n TokenType.PARENTHESES_CLOSE,\n TokenType.XOR,\n TokenType.LITERAL,\n TokenType.PARENTHESES_CLOSE\n ]\n\n lexer = Lexer(formula)\n\n tokens = []\n token = lexer.getToken()\n while token.type != TokenType.EOF:\n tokens.append(token)\n token = lexer.getToken()\n\n self.assertListEqual(expected_types, [t.type for t in tokens])\n\n self.assertEqual(tokens[1].text, 'X1')\n self.assertEqual(tokens[3].text, 'X22')\n self.assertEqual(tokens[13].text, '¬X5')\n\n def test_invalid_character(self):\n formula = '(X1 + X2)'\n\n lexer = Lexer(formula)\n\n try:\n tokens = []\n token = lexer.getToken()\n while token.type != TokenType.EOF:\n tokens.append(token)\n token = lexer.getToken()\n self.assertTrue(False)\n except:\n self.assertTrue(True)\n","sub_path":"cnfencoder/test/test_lexer.py","file_name":"test_lexer.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74569898","text":"# coding: utf-8\n\nglobal data\ndata = []\nglobal states\nstates = []\nglobal symbols\nsymbols = []\nglobal listValues\nlistValues = []\nglobal transitions\ntransitions = []\nglobal finalStates\nfinalStates = []\n\ndef automaton(word, current, response=None):\n if len(word)>0:\n for char in word:\n if not char in symbols:\n return None\n\n for state in states[current][symbols.index(word[0])]:\n current = state\n if len(word) == 1:\n if current in finalStates:\n response = True\n else:\n continue\n else:\n response = automaton(word[1:], current, response) \n return response\n\n#Fix please\ndef reorder():\n state = 0\n while state <= finalStates[0]:\n a = []\n for symbol in symbols:\n b = []\n for transition in transitions:\n if symbol == transition[1] and state == transition[0]:\n b.append(transition[2])\n a.append(b)\n states.append(a)\n state += 1\n\ndef getInitial(v):\n if (v.isdigit()):\n return int(v)\n else:\n return None\n\ndef getSymbol(v, index):\n i = 0\n if(v.isdigit()):\n for n in listValues:\n if(i == index):\n return n\n i += 1 \n return None\n\ndef setData(values):\n inital_state = values[0:1]\n end_state = values[2: len(values)]\n index = 1\n for v in end_state:\n index += 1\n data_state = []\n if len(v) > 0:\n data_state.append(int(inital_state))\n if(getSymbol(v, index)):\n data_state.append(getSymbol(v, index))\n data_state.append(getInitial(v))\n if(len(data_state) == 3):\n data.append(data_state)\n\ndef setValues(line):\n for idy, char in enumerate(line):\n listValues.append(char)\n if idy > 1 and idy % 2 == 0 :\n symbols.append(char)\n\ndef getTransitionTable(path):\n try:\n csv = readFile(path)\n for idx, line in enumerate(csv):\n if len(line) > 1:\n if idx == 0 :\n setValues(line)\n else:\n setData(line)\n for item in data:\n if item not in transitions:\n transitions.append(item)\n\n with open(path) as myfile:\n finalStates.append(int(list(myfile)[-1][0]))\n\n reorder()\n csv.close()\n except Exception as inst:\n raise Exception(inst)\n finally:\n csv.close()\n\ndef readFile(name):\n try:\n x = open(name, 'r')\n return x\n except:\n return('Something went wrong when reading to the file')\n x.close()\n \ndef isNum(n):\n try:\n getTransitionTable('num.csv') \n\n return automaton(n, 0)\n\n except Exception as e:\n print(e)","sub_path":"num.py","file_name":"num.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592493816","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\nimport numpy as np\nimport timeit\n\ndef proximal_gradient_algorithm(F , f_grad , g_prox , x0 , step , PREC , ITE_MAX , PRINT ):\n x = np.copy(x0)\n x_tab = np.copy(x)\n if PRINT:\n print(\"------------------------------------\\n Proximal gradient algorithm\\n------------------------------------\\nSTART -- stepsize = {:0}\".format(step))\n t_s = timeit.default_timer()\n for k in range(ITE_MAX):\n g = f_grad(x)\n x = g_prox(x - step*g , step) ####### ITERATION\n\n x_tab = np.vstack((x_tab,x))\n\n\n t_e = timeit.default_timer()\n if PRINT:\n print(\"FINISHED -- {:d} iterations / {:.6f}s -- final value: {:f}\\n\\n\".format(k,t_e-t_s,F(x)))\n return x, x_tab\n\n","sub_path":"Lab7_StochasticMethods/algoProx.py","file_name":"algoProx.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235748805","text":"# import re\n\n\n# def main():\n# username = input('name: ')\n# qq = input('qq: ')\n# m1 = re.match(r'^[0-9a-zA-Z_]{6,20}', username)\n# if not m1:\n# print('please input username')\n# m2 = re.match(r'^[1-9]\\d{4,11}$', qq)\n# if not m2:\n# print('please input qq')\n# if m1 and m2:\n# print('ok')\n\n\n# if __name__ == '__main__':\n# main()\n\n\n# import re\n\n# def main():\n# pattern = re.compile(r'(?<=\\D)1[34578]\\d{9}(?=\\D)')\n# sentence = '重要的事情说8130123456789遍,我的手机号是13512346789这个靓号,不是15600998765,也是110或119,王大锤的手机号才是15600998765。'\n# my_list = re.findall(pattern, sentence)\n# print(my_list)\n# for temp in pattern.finditer(sentence):\n# print(temp.group())\n# m = pattern.search(sentence)\n# while m:\n# print(m.group())\n# m = pattern.search(sentence, m.end())\n\n\n# if __name__ == '__main__':\n# main()\n\n\nimport re\n\n\ndef main():\n pome = '窗前明月光,疑是地上霜。举头望明月,低头思故乡。'\n sentence_list = re.split(r'[,。,。]', pome)\n print(sentence_list)\n while '' in sentence_list:\n sentence_list.remove('')\n print(sentence_list)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"11-15/reg.py","file_name":"reg.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526807162","text":"import sys\nimport os\nsys.path.append(\"..\")\nimport numpy as np \nimport pandas as pd \nimport matplotlib.pyplot as plt\nfrom tofu.modules.linear_model import LinearRegression\nfrom tofu.preprocessing import train_test_split\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\ndf = pd.read_csv('dataset/temperature_data.csv')\n\nprint(df.head())\n\nstd_scaler = StandardScaler()\nmin_max_scaler = MinMaxScaler()\n\nX = std_scaler.fit_transform(df['humidity'].values.reshape(-1,1))\ny = min_max_scaler.fit_transform(df['temp'].values.reshape(-1, 1))\n\nX_train, X_test, y_train, y_test = train_test_split(X=X, y=y, test_size=0.3, random_seed=101)\n\nplt.scatter(X_train, y_train)\nplt.xlabel('Humidity')\nplt.ylabel('Temperature')\nplt.show()\n\nmodel = LinearRegression()\n\nmodel.fit(X_train, y_train, learning_rate=0.01, epochs=200)\n\nplt.plot(model.losses)\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.title('Loss Metrics')\nplt.show()\n\nline = model.predict(X_train)\n\nplt.scatter(X_train, y_train)\nplt.plot(X_train, line, c='r')\nplt.xlabel('Humidity')\nplt.ylabel('Temperature')\nplt.title('Line of Best Fit')\nplt.show()\n\ntest_pred = model.predict(X_test)\n\nloss = (np.square(test_pred - y_test)).mean(axis=None)\n\nprint(f\"\\nTest Loss MSE: {loss:.4f}\")","sub_path":"examples/lin_reg_temp.py","file_name":"lin_reg_temp.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"58010725","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib import admin\nfrom datetime import datetime\nfrom django.utils.text import slugify\nfrom django.utils import timezone\n\nfrom PIL import Image as PILImage\nfrom PIL import ImageOps\n\nfrom io import BytesIO\nimport base64\nfrom django.core.files.base import ContentFile, File\n\nclass MaintenanceMode(models.Model):\n #message = models.CharField(max_length = 100000, default = \"Site is temporarily down for maintenance. Please give us a call with any inquiries. In the event of an emergency, please call 911. Thank you!\")\n is_on = models.BooleanField(default=1)\n def is_active(self):\n return bool(self.is_on)\n def __str__(self):\n if self.is_on:\n return \"Maintenance mode is on. Turn off?\"\n else:\n return \"Maintenance mode is off. Turn on?\"\n\n\nclass GeneralInfo(models.Model):\n site_title = models.CharField(max_length=50, default=\"Default Title\")\n phone_number = models.CharField(max_length=20,default=\"(802) 434-5090\")\n fax_number = models.CharField(max_length=20,default=\"(802) 329-2144\")\n address_first_line = models.CharField(max_length=5000,default=\"12 Burnett Ct\")\n address_second_line = models.CharField(max_length=5000,default=\"Richmond, VT 05477\")\n site_description = models.TextField(max_length = 40000, null = True, blank = True)\n location_blurb = models.TextField(max_length=4000, default=\"\", null=True, blank=True )\n monday_hours = models.CharField(max_length=5000,default=\"8:00am - 5:00pm\")\n tuesday_hours = models.CharField(max_length=5000,default=\"8:00am - 5:00pm\")\n wednesday_hours = models.CharField(max_length=5000,default=\"8:00am - 5:00pm\")\n thursday_hours = models.CharField(max_length=5000,default=\"8:00am - 5:00pm\")\n friday_hours = models.CharField(max_length=5000,default=\"8:00am - 5:00pm\")\n weekend_hours_policy = models.TextField(max_length=4000, default=\"\", null=True, blank=True )\n special_hours_policy = models.TextField(max_length=4000, default=\"\", null=True, blank=True )\n after_hours_care = models.TextField(max_length=4000, default=\"\", null=True, blank=True )\n google_map_link = models.URLField(max_length=500000,default=\"https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d2850.0895318208995!2d-72.99429824929942!3d44.41080847900011!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x4cb586a16b5ff405%3A0xb9f077d9bc8ac2ef!2sRichmond+Pediatrics+%26+Adolescent+Medicine%3A+Parker+Paul+J+MD!5e0!3m2!1sen!2sus!4v1485475889424\")\n def __str__(self):\n return \"General Info\"\n\nclass Page(models.Model):\n title = models.CharField(max_length=50)\n description = models.TextField(blank=True, null=True, max_length=1000)\n page_link = models.CharField(max_length=30000, blank = True, null = True)\n display_priority = models.IntegerField(default = 1, null = True, blank = True)\n def __str__(self):\n return self.title\n\nclass Icon(models.Model):\n img_icon = models.ImageField(blank = True, null = True)\n svg_icon = models.FileField(blank = True, null = True)\n path = models.CharField(max_length=50, blank = True, null = True)\n def __str__(self):\n return self.path\n\nclass ImageSize(models.Model):\n size = models.TextField(max_length = 500)\n def __str__(self):\n return self.size\n\nclass ResourceType(models.Model):\n description = models.TextField(max_length=2000, default=\"\", blank=True, null=True)\n icon = models.ForeignKey(Icon, blank = True, null = True, on_delete = models.SET_NULL)\n icon_description = models.CharField(blank=True, null=True, max_length=1000)\n image = models.ImageField(default=\"\")\n image_description = models.CharField(blank=True, null=True, max_length=1000)\n image_data_uri = models.CharField(null=True, blank=True, default=\"\", max_length=100000000)\n title = models.CharField(max_length=65, default=\"New Item\")\n page = models.ForeignKey(Page, blank = True, null = True, on_delete = models.SET_NULL)\n display_priority = models.IntegerField(default = 1, null = True, blank = True)\n link = models.SlugField(blank = True, null = True, unique = True)\n last_updated = models.DateTimeField(blank = True, null = True)\n @property\n def image_url(self):\n if self.image and hasattr(self.image, 'url'):\n return self.image.url\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n\n def __str__(self):\n if (self.title):\n return self.title\n else:\n return str(self.id)\n def _get_unique_slug(self):\n print(\"HI I AM GETTING UNIQUE SLUG\")\n link = slugify(self.title)\n unique_slug = link\n num = 1\n while ResourceType.objects.filter(link=unique_slug).exists():\n print(\"hi that slug already exists\")\n unique_slug = '{}-{}'.format(link, num)\n num += 1\n return unique_slug\n def save(self, *args, **kwargs):\n previous_image = self.image\n super(ResourceType, self).save(*args, **kwargs)\n if not self.link:\n self.link = self._get_unique_slug()\n super().save()\n\n #make smaller size of the image\n if self.image and previous_image.name != self.image.name:\n \n image = PILImage.open(self.image.path)\n if self.image.width > 400 or self.image.height > 400:\n if (image.width <= image.height):\n ratio = 400.0 / image.width\n elif (image.height <= image.width):\n ratio = 400.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n tn_buffer = BytesIO()\n tn_buffer.seek(0)\n image.save(fp=tn_buffer, format='PNG')\n self.image.save(self.image.name,\n ContentFile(tn_buffer.getvalue()), save=False)\n\n #make data uri\n if (image.width <= image.height):\n ratio = 50.0 / image.width\n elif (image.height <= image.width):\n ratio = 50.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n buffered = BytesIO()\n image.save(fp=buffered, format='PNG')\n im_data = buffered.getvalue()\n self.image_data_uri = 'data:image/png;base64,' + base64.b64encode(im_data).decode(encoding=\"utf-8\", errors=\"strict\")\n image.close()\n super(ResourceType, self).save(*args, **kwargs)\n\nclass SpecialAnnouncement(models.Model):\n label = models.CharField(max_length = 2000)\n starts_showing = models.DateTimeField(auto_now_add=True, blank = True)\n stops_showing = models.DateTimeField(auto_now_add=True, blank = True)\n announcement = models.TextField(max_length = 500000)\n display_priority = models.IntegerField(default = 1, null = True, blank = True)\n internal_link = models.CharField(blank=True, null=True, max_length=600)\n external_link = models.URLField(blank=True, null=True)\n resource_type = models.ForeignKey(ResourceType, blank=True, null=True, on_delete=models.CASCADE)\n def __str__(self):\n return self.label\n\nclass Resource(models.Model):\n WIDTH_OPTIONS = ((\"half-width\", \"half-width\"), (\"full-width\", \"full-width\"))\n display_priority = models.IntegerField(default = 1, null = True, blank = True)\n title = models.CharField(max_length=30000, blank=True, null=True)\n text = models.TextField(max_length=500000, blank=True, null=True)\n paragraph_one = models.TextField(max_length=500000, blank=True, null=True)\n paragraph_two = models.TextField(max_length=500000, blank=True, null=True)\n paragraph_three = models.TextField(max_length=500000, blank=True, null=True)\n paragraph_four = models.TextField(max_length=500000, blank=True, null=True)\n pdf_document = models.FileField(upload_to=\"\", blank= True, null = True)\n external_link_url = models.URLField(max_length = 400000, blank = True, null = True)\n image = models.ImageField(blank = True, null = True, default=\"\")\n image_description = models.CharField(blank=True, null=True, max_length=1000)\n image_data_uri = models.CharField(null=True, blank=True, default=\"\", max_length=100000000)\n icon = models.ImageField(blank = True, null = True, default=\"\")\n icon_description = models.CharField(blank=True, null=True, max_length=1000)\n image_size = models.ForeignKey(ImageSize, blank = True, null = True, on_delete = models.CASCADE)\n resource_type = models.ForeignKey(ResourceType, null=True, blank=True, on_delete=models.SET_NULL)\n google_map_url = models.URLField(max_length = 400000, blank = True, null = True)\n link = models.SlugField(blank = True, null = True, unique = True)\n search_keywords = models.TextField(max_length = 500000, blank = True, null = True)\n width = models.TextField(max_length=20, choices=WIDTH_OPTIONS, default=\"full-width\")\n @property\n def image_url(self):\n if self.image and hasattr(self.image, 'url'):\n return self.image.url\n @property\n def icon_url(self):\n if self.icon and hasattr(self.icon, 'url'):\n return self.icon.url\n @property\n def document_url(self):\n if self.pdf_document and hasattr(self.pdf_document, 'url'):\n return self.pdf_document.url\n def __str__(self):\n if (self.title):\n return self.title\n else:\n return str(self.id)\n def _get_unique_slug(self):\n link = slugify(self.title)\n unique_slug = link\n num = 1\n while Resource.objects.filter(link=unique_slug).exists():\n unique_slug = '{}-{}'.format(link, num)\n num += 1\n return unique_slug\n def save(self, *args, **kwargs):\n previous_image = self.image\n super(Resource, self).save(*args, **kwargs)\n if self.resource_type:\n self.resource_type.last_updated = timezone.now()\n\n if not self.link:\n self.link = self._get_unique_slug()\n super().save()\n\n #make smaller size of the image\n if self.image and previous_image.name != self.image.name:\n image = PILImage.open(self.image.path)\n if image.width > 1000 or image.height > 1000:\n if (image.width <= image.height):\n ratio = 1000.0 / image.width\n elif (image.height <= image.width):\n ratio = 1000.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n tn_buffer = BytesIO()\n tn_buffer.seek(0)\n image.save(fp=tn_buffer, format='PNG')\n self.image.save(self.image.name,\n ContentFile(tn_buffer.getvalue()), save=False)\n\n #make data uri\n if (image.width <= image.height):\n ratio = 50.0 / image.width\n elif (image.height <= image.width):\n ratio = 50.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n buffered = BytesIO()\n image.save(fp=buffered, format='PNG')\n im_data = buffered.getvalue()\n self.image_data_uri = 'data:image/png;base64,' + base64.b64encode(im_data).decode(encoding=\"utf-8\", errors=\"strict\")\n image.close()\n super(Resource, self).save(*args, **kwargs)\n\nclass SlideShowImage(models.Model):\n image = models.ImageField(upload_to=\"images\")\n data_uri = models.CharField(null=True, blank=True, default=\"\", max_length=100000000)\n author = models.CharField(null=True, blank=True, max_length=400)\n image_description = models.CharField(blank=True, null=True, max_length=1000)\n thumbnail = models.ImageField(upload_to=\"images/thumbnails\", null=True, blank=True)\n def __str__ (self):\n if self.description:\n return str(self.description)\n else:\n return str(self.id)\n def save(self, *args, **kwargs):\n super(SlideShowImage, self).save(*args, **kwargs)\n #make thumbnails\n if self.image:\n image = PILImage.open(self.image.path)\n\n #if image is too big, resize it\n if image.width > 1300 or image.height > 1300:\n if (image.width <= image.height):\n ratio = 1300.0 / image.width\n elif (image.height <= image.width):\n ratio = 1300.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n tn_buffer = BytesIO()\n tn_buffer.seek(0)\n image.save(fp=tn_buffer, format='PNG')\n self.image.save(self.image.name,\n ContentFile(tn_buffer.getvalue()), save=False)\n #make thumbnail\n if (image.width <= image.height):\n ratio = 150.0 / image.width\n elif (image.height <= image.width):\n ratio = 150.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n tn_buffer = BytesIO()\n tn_buffer.seek(0)\n image.save(fp=tn_buffer, format='PNG')\n self.thumbnail.save(self.image.name,\n ContentFile(tn_buffer.getvalue()), save=False)\n #make data uri\n if (image.width <= image.height):\n ratio = 50.0 / image.width\n elif (image.height <= image.width):\n ratio = 50.0 / image.height\n new_width = int(image.width * ratio)\n new_height = int(image.height * ratio)\n image = image.resize((new_width,new_height), PILImage.ANTIALIAS)\n buffered = BytesIO()\n image.save(fp=buffered, format='PNG')\n im_data = buffered.getvalue()\n self.data_uri = 'data:image/png;base64,' + base64.b64encode(im_data).decode(encoding=\"utf-8\", errors=\"strict\")\n image.close()\n super(SlideShowImage, self).save(*args, **kwargs)\n @property\n def image_url(self):\n if self.image and hasattr(self.image, 'url'):\n return self.image.url\n @property\n def thumbnail_url(self):\n if self.thumbnail and hasattr(self.image, 'url'):\n return self.thumbnail.url\n\n\n\n\n\n# Create your models here.\n#\n","sub_path":"website/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195069675","text":"from rest_framework import status\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom rest_framework_jwt.settings import api_settings\nfrom rest_framework.views import APIView\nfrom .serializers import QQ_TokenViewSerializer\nfrom .utils import OauthQQ\nfrom .models import OauthQQUser\n# Create your views here.\n\n\n# 实现出现QQ授权登录视图\nclass QQ_OauthURLView(APIView):\n \"\"\"\n 实现出现QQ授权登录视图\n GET /oauth/qq/statues/\n \"\"\"\n\n def get(self, request):\n\n # 生成auth_url\n # https://graph.qq.com/oauth2.0/authorize\n # 请求参数请包含如下内容:\n # response_type 必须 授权类型,此值固定为“code”。\n # client_id 必须 申请QQ登录成功后,分配给应用的appid。\n # redirect_uri 必须 成功授权后的回调地址,必须是注册appid时填写的主域名下的地址,建议设置为网站首页或网站的用户中心。注意需要将url进行URLEncode。\n # state 必须 client端的状态值。用于第三方应用防止CSRF攻击,成功授权后回调时会原样带回。请务必严格按照流程检查用户与state参数状态的绑定。\n # scope 可选 scope=get_user_info\n\n #获取state\n # state = 'tate'\n state = request.query_params.get('state')\n #设置base_url,注意添加 ?\n # base_url = 'https://graph.qq.com/oauth2.0/authorize?'\n #组织参数\n # params = {\n # 'response_type':'code',\n # 'client_id':settings.QQ_APP_ID,\n # 'redirect_uri':settings.QQ_REDIRECT_URL,\n # 'state': state,\n # 'scope':'get_user_info',\n # }\n # http://api.meiduo.site:8000/oauth/qq/statues/?state=/\n #对参数进行urlencode,然后拼接url\n\n qq = OauthQQ()\n\n auth_url = qq.get_oauth_url(state)\n\n # 返回响应\n return Response({'auth_url': auth_url})\n\n\n\n# 获取access_token\nclass QQ_TokenView(GenericAPIView):\n \"\"\"\n 获取access_token\n GET /oauth/qq/users/?code=xxx\n \"\"\"\n\n serializer_class = QQ_TokenViewSerializer\n\n def get(self, request):\n\n # 1.获取code,并进行判断\n code = request.query_params.get('code')\n\n if code is None:\n return Response({'message': '缺少参数'}, status=status.HTTP_400_BAD_REQUEST)\n\n # 2.获取token\n qq = OauthQQ()\n # 获取外界资源的时候,不知道外界都会发生什么情况,最好前扑捉一下异常\n try:\n # 通过code 换access_token\n access_token = qq.get_access_token(code=code)\n openid =qq.get_openid(access_token)\n except Exception :\n return Response({'message': '服务异常'}, status=status.HTTP_503_SERVICE_UNAVAILABLE)\n\n # return response\n\n\n # 我们需要根据 openid 来进行判断\n # 如果 openid 存在于数据库中, 直接返回登陆的 token\n # 如果数据库中没有 openid ,说明用户是第一次绑定,需要跳转到 绑定页面\n try:\n qq_user = OauthQQUser.objects.get(openid=openid)\n except OauthQQUser.DoesNotExist:\n # 说明是第一次绑定,则跳转到 绑定页面\n # 需要把 openid 应该作为一个参数 传递过去\n # return Response({'openid': openid})\n # 因为 openid 非常重要,所有以需要对openid进行处理\n access_token = OauthQQUser.generate_open_id_token(openid)\n\n return Response({'access_token': access_token})\n else:\n user = qq_user.user\n print()\n # 如果openid已经在数据库中,说明已经绑定过了,直接返回登陆的token\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n\n response = Response({\n 'token': token,\n 'user_id': user.id,\n 'username': user.username,\n })\n\n return response\n\n\n def post(self, request):\n \"\"\"\n 绑定用户时:\n 1.将用户的手机号,密码,短信验证码以及 sccess_token(openid) 提交给后台\n 2.对手机号, access_token 进行校验\n 3.将用户信息和 openid 进行绑定处理\n \"\"\"\n\n # 创建序列化器\n serializer = QQ_TokenViewSerializer(data=request.data)\n # 进行校验\n serializer.is_valid(raise_exception=True)\n # 保存\n user = serializer.save()\n\n # 生成已登陆的token\n jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\n jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n payload = jwt_payload_handler(user)\n token = jwt_encode_handler(payload)\n\n response = Response({\n 'token': token,\n 'user_id': user.id,\n 'username': user.username,\n })\n\n # 返回登录的token\n return response\n\n\n\n\n\n\n# from itsdangerous import TimedJSONWebSignatureSerializer as S\n# # s = S(scret_key = second)\n# # scret_key 加密的字符串\n# # second 加密的时效\n# # 序列化器的初始化\n# s = S('日寒月暖煎人寿', 3600)\n#\n# # 我们可以将 一些敏感的数据 传递给序列化器,序列化器经过一定的算法之后,会生成一个字符串\n# dict = {\n# 'openid': '123456789'\n# }\n# data = s.dumps(dict)\n# token.decode()\n# data\n#\n# # 数据验证\n# # b\n# # 'eyJhbGciOiJIUzI1NiIsImlhdCI6MTUzOTMyMzUxMiwiZXhwIjoxNTM5MzI3MTEyfQ\n# # .eyJvcGVuaWQiOiIxMjM0NTY3ODkifQ\n# # .e3ALSGzdMct0lW0ijfcJ2IKG0CcpINHdk5eadqD60Ck'\n\n# s.loads('eyJhbGciOiJIUzI1NiIsImlhdCI6MTUzOTMyMzUxMiwiZXhwIjoxNTM5MzI3MTEyfQ.eyJvcGVuaWQiOiIxMjM0NTY3ODkifQ.e3ALSGzdMct0lW0ijfcJ2IKG0CcpINHdk5eadqD60Ck'\n# )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# 实现出现weixin授权登录视图\nclass Weixin_OauthURLView(APIView):\n \"\"\"实现出现QQ授权登录视图\"\"\"\n\n # GET /oauth/qweixin/statues/\n\n\n def get(self, request):\n state = request.query_params.get('state')\n\n weixin = OauthQQ()\n\n auth_url = weixin.get_oauth_url(state)\n\n # 返回响应\n return Response({'auth_url': auth_url})\n\n\n","sub_path":"mall/apps/oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26046330","text":"\"\"\" \n@Author: huuuuusy\n@GitHub: https://github.com/huuuuusy\n系统: Ubuntu 18.04\nIDE: VS Code 1.37\n工具: python == 3.7.3\n\"\"\"\n\n\"\"\"\n思路:\n 防御式编程:\n 正常思路是中间部分只要出现3个0即可种花,两边需要特殊讨论\n 防御式思想是将两边各补一个0,这样两边的情况和中间相同,无需单独讨论;对于所有情况,只要出现连续的3个0即可种花\n结果:\n 执行用时 : 196 ms, 在所有 Python3 提交中击败了81.87%的用户\n 内存消耗 : 13.9 MB, 在所有 Python3 提交中击败了10.67%的用户\n\"\"\"\n\nclass Solution:\n def canPlaceFlowers(self, flowerbed, n):\n new_flowers = [0]+flowerbed+[0]\n for i in range(1, len(new_flowers)-1): # i的范围是真正种花的范围,需要去掉头尾的补零\n if new_flowers[i-1] == new_flowers[i] == new_flowers[i+1] == 0:\n new_flowers[i] = 1 # 在此处栽花\n n -= 1\n return n <= 0 # 表示可以栽完花\n\nif __name__ == \"__main__\":\n flowerbed = [1,0,0,0,1]\n n = 1\n answer = Solution().canPlaceFlowers(flowerbed, n)\n print(answer)","sub_path":"LeetCode/python-R1/0605-种花问题D/V1.py","file_name":"V1.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226378484","text":"from PIL import Image\r\nfrom keras.applications.vgg16 import preprocess_input\r\nimport base64\r\nfrom io import BytesIO\r\nimport json\r\nimport random\r\nimport cv2\r\nfrom keras.models import load_model\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\n\r\nmodel=load_model('facefeatures_model2.h5')\r\n\r\nface_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\ncapture=cv2.VideoCapture(0)\r\nwhile True:\r\n\tret,frame=capture.read()\r\n\tfaces=face_cascade.detectMultiScale(frame,1.3,5)\r\n\tfor(x,y,w,h) in faces:\r\n\t\tROI=frame[y:y+h,x:x+w]\r\n\t\tfor f in faces:\r\n\t\t\tnew_array=cv2.resize(ROI,(224,224))\r\n\t\t\tim=Image.fromarray(new_array,'RGB')\r\n\t\t\timg_array=np.array(im)\r\n\t\t\timg_array=np.expand_dims(img_array,axis=0)\r\n\r\n\t\t\tpred=model.predict(img_array)\r\n\t\t\tprint(pred)\r\n\r\n\t\t\tcv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n\r\n\t\t\tif pred[0][0]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Abhinav',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][1]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Anukriti',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][2]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Geeta',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\t\r\n\t\t\telif pred[0][3]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Kanish',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][4]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Lalit',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][5]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Prashant',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][6]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Sushil',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][7]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Swati',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\t\t\telif pred[0][8]>0.9:\r\n\t\t\t\tcv2.putText(frame,'Vandana',(x,y),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\tcv2.imshow(\"Frame\",frame)\r\n\tif cv2.waitKey(1) & 0xFF==ord(\"q\"):\r\n\t\tbreak\r\ncapture.release()\r\ncv2.destroyAllWindows()\r\n\r\n# def face_extractor(img):\r\n# \tfaces=face_cascade.detectMultiScale(img,1.3,5)\r\n\r\n# \tif faces is ():\r\n# \t\treturn None\r\n\t\r\n# \tfor (x,y,w,h) in faces:\r\n# \t\tcv2.rectangle(img,(x,y),(x+w,y+h),(0,255,255),2)\r\n# \t\tcropped_face=img[y:y+h,x:x+w]\r\n\r\n# \treturn cropped_face\r\n\r\n# video_capture=cv2.VideoCapture(0)\r\n# while True:\r\n# \t_, frame=video_capture.read()\r\n\r\n# \tface=face_extractor(frame)\r\n# \tif type(face) is np.ndarray:\r\n# \t\tface=cv2.resize(face,(224,224))\r\n# \t\tim=Image.fromarray(face,'RGB')\r\n# \t\timg_array=np.array(im)\r\n# \t\timg_array=np.expand_dims(img_array,axis=0)\r\n# \t\tpred=model.predict(img_array)\r\n# \t\tprint(pred)\r\n\r\n# \t\tname=\"None Matching\"\r\n\r\n# \t\tif pred[0][0]>0.5:\r\n# \t\t\tname='Abhinav'\r\n# \t\telif pred[0][1]>0.5:\r\n# \t\t\tname='Geeta'\r\n# \t\telif pred[0][2]>0.5:\r\n# \t\t\tname='Swati'\r\n\r\n# \t\tcv2.putText(frame,name,(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n# \telse:\r\n# \t\tcv2.putText(frame,\"No face found\",(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)\r\n\r\n# \tcv2.imshow('Video',frame)\r\n# \tif cv2.Waitkey(1) & 0xFF==ord('q'):\r\n# \t\tbreak\r\n# video_capture.release()\r\n# cv2.destroyAllWindows()","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63042999","text":"from swiftstory.Game import Game\nfrom swiftstory.Cards import Cards\n\nimport os\n\nclass GameManager():\n def __init__(self):\n self.langs = {}\n\n for filename in next(os.walk('usr/share/swiftstory/lang'))[1]:\n self.langs[filename] = {}\n\n for lang in self.langs:\n self.langs[lang]['black_cards'] = Cards.get_black_cards(lang)\n self.langs[lang]['white_cards'] = Cards.get_white_cards(lang)\n\n self.langs[lang]['games'] = {}\n\n def join_game(self, game_name, lang):\n if self.langs.get(lang) is None:\n return None\n\n games = self.langs[lang]['games']\n black_cards = self.langs[lang]['black_cards']\n white_cards = self.langs[lang]['white_cards']\n\n game = games.get(game_name)\n\n if game is None:\n print('Starting new game')\n\n game = games[game_name] = Game(white_cards, black_cards)\n\n return game\n","sub_path":"swiftstory/GameManager.py","file_name":"GameManager.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"421760953","text":"'''\n\n Online Python Compiler.\n Code, Compile, Run and Debug python program online.\nWrite your code in this editor and press \"Run\" button to execute it.\n\n'''\n\n# Function to print sum \ndef returnSum(myDict): \n \n sum = 0\n for i in myDict: \n sum = sum + myDict[i] \n \n return sum\n \n# Driver Function \ndict = {'a': 100, 'b':200, 'c':300} \nprint(\"Dictionary\", dict)\nprint(\"Sum :\", returnSum(dict)) ","sub_path":"Assignment_3/Q_3.py","file_name":"Q_3.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237719535","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import Group\nfrom django.utils.translation import ugettext as _\n\nfrom .models import Driver, MyUser\nfrom .forms import UserChangeForm, UserCreationForm\n\n# Register your models here.\n\n\nclass MyUserAdmin(UserAdmin):\n form = UserChangeForm\n add_form = UserCreationForm\n\n list_display = ('id', '__unicode__', 'is_superuser', 'is_admin',\n 'is_confirmed', 'date_joined',)\n list_filter = ('is_active', 'is_admin', 'is_confirmed',)\n readonly_fields = ('date_joined', 'last_login',\n 'modified',)\n fieldsets = (\n (None,\n {'fields': ('email', 'password',)}),\n ('Basic information',\n {'fields': ('full_name', 'phone_number', 'profile_picture',\n 'university',)}),\n ('Permissions',\n {'fields': ('is_active', 'is_confirmed', 'is_admin',\n 'is_superuser', 'user_permissions')}),\n (_('Dates'),\n {'fields': ('date_joined', 'last_login', 'modified',)}),\n )\n\n add_fieldsets = (\n (None,\n {'classes': ('wide',),\n 'fields': ('email', 'password1', 'password2',)}),\n )\n search_fields = ('email', 'full_name',)\n ordering = ('email',)\n filter_horizontal = ('user_permissions',)\n actions = ('activate', 'disable',)\n\n def activate(self, request, queryset):\n \"\"\"\n Updates is_active to be True.\n \"\"\"\n queryset.update(is_active=True)\n activate.short_description = _(\"Activate selected users\")\n\n def disable(self, request, queryset):\n \"\"\"\n Updates is_active to be False.\n \"\"\"\n queryset.update(is_active=False)\n disable.short_description = _(\"Disable selected users\")\n\nadmin.site.register(MyUser, MyUserAdmin)\n\n\nclass DriverAdmin(admin.ModelAdmin):\n list_display = ('id', '__unicode__', 'is_active',\n 'average_rating',)\n list_filter = ('is_active', 'trips_completed',)\n fields = ('user', 'car_status', 'is_active', 'rating',\n 'trips_completed', 'created', 'modified',)\n readonly_fields = ('created', 'modified',)\n actions = ('activate', 'disable',)\n\n class Meta:\n model = Driver\n\n def activate(self, request, queryset):\n \"\"\"\n Updates is_active to be True.\n \"\"\"\n queryset.update(is_active=True)\n activate.short_description = _(\"Activate selected drivers\")\n\n def disable(self, request, queryset):\n \"\"\"\n Updates is_active to be False.\n \"\"\"\n queryset.update(is_active=False)\n disable.short_description = _(\"Disable selected drivers\")\n\n# admin.site.register(Driver, DriverAdmin)\nadmin.site.unregister(Group)\n","sub_path":"accounts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"454547197","text":"DEFAULT = {\n 'CellCallMaxIter': 100,\n 'CellCallTolerance': 0.02,\n 'Inefficiency': 0.2,\n 'InsideCellBonus': 2,\n 'MisreadDensity': 0.00001,\n 'SpotReg': 0.1,\n 'nNeighbors': 3,\n 'rGene': 20,\n 'rSpot': 2,\n 'label_image': '../data/default/CellMap.mat',\n 'roi': {\"x0\": 6150, \"x1\": 13751, \"y0\": 12987, \"y1\": 18457},\n 'geneset': '../data/GeneSet.mat',\n}\n","sub_path":"source/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"338988610","text":"import settings as env # loads api keys\nfrom newsapi import NewsApiClient # queries articles\nimport newspaper # scrapes article contents\nfrom datetime import datetime, timedelta\nimport sys\nimport db\n\n# The source ids for The News Api (https://newsapi.org/docs/endpoints/sources)\nsource_ids = [\n 'cnn',\n 'fox-news'\n 'abc-news',\n 'cbc-news',\n 'the-hill',\n 'the-new-york-times',\n 'politico',\n 'associated-press',\n 'msnbc',\n 'the-washington-post',\n 'reuters',\n 'breitbart-news'\n # 'the-huffington-post',\n # 'vice-news',\n # 'daily-mail',\n # 'national-review',\n # 'new-york-magazine',\n # 'bbc-news',\n]\n\n# Set up the news querying client (https://newsapi.org/docs)\nclient = NewsApiClient(api_key = env.NEWS_API_KEY)\n\n# the minimum number of characters an article can have\n# to perform sentiment analysis. Some articles are just\n# videos and don't have enough substantial text to get\n# a valid, reasonable value\nmin_char_length = 100\n\n# Formats a datetime into the YYYY-MM-DD, the format used by newsapi\ndef formatDate(date):\n # add leading 0s to month and day if needed\n day = f'0{date.day}' if date.day < 10 else f'{date.day}'\n month = f'0{date.month}' if date.month < 10 else f'{date.month}'\n return f'{date.year}-{month}-{day}'\n\n\n# returns a list of strings containing the contents of articles\n# from the given source on the given date pertaining to the given keyword\ndef getArticles(keyword, source, date):\n # construct date strings to use for newsapi. The start date is the date\n # given and the end date is the next day.\n start_date = formatDate(date)\n end_date = formatDate(date + timedelta(days=1))\n\n article_data = [] # stores article data (ie urls, sources, authors)\n\n # iterate through all pages of the query results. News API is limited to\n # only 100 results, so the client throws an error before the last page\n # usually\n page = 1\n while(True):\n try:\n # get the next page of article data and add them to the article list\n query = client.get_everything(q = keyword, page=page, sources=source,\n from_param=start_date, to=end_date)\n article_data.extend(query['articles'])\n # increment page number before looping\n page += 1\n except:\n # either the last page was found or results were capped by API\n break\n\n articles = []\n # Iterate through articles and use their urls to scrape contents\n for data in article_data:\n # Get article contents using the scraper library (https://github.com/mattlisiv/newsapi-python)\n article = newspaper.Article(data['url'])\n try:\n # try downloading the article. The scraper library can handle\n # a lot of sources, but not everything\n article.download()\n article.parse()\n except:\n # scraper was unable to download and parse the article. Try the next article\n continue\n\n # make sure the article is long enough to be reasonably analyzed\n if(len(article.text) > min_char_length):\n articles.append(article.text)\n\n print(f'Got {len(articles)} articles from {source} on {date}')\n\n return articles\n\n\nif __name__ == '__main__':\n keyword = 'trump'\n\n end_date = datetime.now()\n date = end_date - timedelta(days=10) # start date\n while date < end_date:\n for source in source_ids:\n articles = getArticles(keyword, source, date)\n if len(articles) != 0:\n db.store_articles(articles, source, date)\n date += timedelta(days=1)\n","sub_path":"articles.py","file_name":"articles.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353232057","text":"\"\"\"\n CONFIG ENV\n\"\"\"\nimport logging\n\nfrom datetime import timedelta\n\nSECRET_KEY = \"Pl@yV0XfdsjkEERkjkW1jk$5Jk%#12dsfdsadjk32\"\n\nWTF_CSRF_ENABLED = True\nCSRF_ENABLED = True\n\n# Avoid 404 response default message\nERROR_404_HELP = False\n\n# JWT CONSTANTS\nJWT_AUTH_URL_RULE = \"/service/auth\"\nJWT_EXPIRATION_DELTA = timedelta(days=1)\n\nlogging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S')\n","sub_path":"default_config.py","file_name":"default_config.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266192099","text":"import pandas as pd\nimport time,datetime\nimport numpy as np\nfrom example.informs import portfolio\nimport cmath\n\n#timeseries data\nall_data=pd.read_csv('/TimeSeriesInputData.csv')\ndate_list=list(set(list(all_data['DATE'])))\nfor i in range(len(date_list)):\n ak=date_list[i]\n date_list[i]=ak[6:]+str('-')+ak[:2]+'-'+ak[3:5]\ndate_list.sort()\nall_data['DATE']=pd.to_datetime(all_data['DATE'])\nall_data=all_data.rename(columns={'NAME':'name','DATE':'date','SEDOL':'sedol','SECTOR':'sector','BETA':'beta','ALPHA_SCORE':'as','BENCH_WEIGHT':'bw',\"MCAP_Q\":'mq'})\nsedol_list=all_data['sedol'].unique().tolist()\n\n\n\ndic_data = {k: v for k, v in all_data.groupby('date')}\n\nusing_dic = dic_data[pd.to_datetime(date_list[0])]\n\n# print(dic_data[pd.to_datetime(date_list[1])])\n# print(using_dic.index)\n\nasset_list = []\nfor i in using_dic[\"sedol\"]:\n asset_list.append(i)\n\n\n\n\n\ndic_sedol_as = {using_dic[\"sedol\"][i] : 10000*using_dic[\"as\"][i] for i in using_dic.index}\n\ndic_bench = {using_dic[\"sedol\"][i] : using_dic[\"bw\"][i] for i in using_dic.index}\n\ndic_beta = {using_dic[\"sedol\"][i] : using_dic[\"beta\"][i] for i in using_dic.index}\n\ndic_sector = {using_dic[\"sector\"][i] : [] for i in using_dic.index }\ndic_MCAP = {using_dic[\"mq\"][i] : [] for i in using_dic.index}\n\nfor i in using_dic.index:\n dic_sector[using_dic[\"sector\"][i]].append(using_dic[\"sedol\"][i])\n dic_MCAP[using_dic[\"mq\"][i]].append(using_dic[\"sedol\"][i])\n\n\n\n\n\n\n\n\n#risk:cov_mat\ndef risk(date_str):\n risk_data=pd.read_csv('/Riskmodels2/cov_mat_%s.csv'%(date_str))\n risk_sedol=risk_data['ROW_INDEX'].unique().tolist()\n risk_mat = np.zeros((len(risk_sedol),len(risk_sedol)))\n risk_mat[np.triu_indices(len(risk_sedol), 0)] = list(risk_data['VALUE'])\n irows,icols = np.triu_indices(len(risk_sedol),0)\n risk_mat[icols,irows]=risk_mat[irows,icols]\n return risk_data,risk_sedol,risk_mat\nrisk_data,risk_sedol,risk_mat=risk(date_list[0])\n\n\n\nsedol_var_list =[]\n\nfor i in risk_sedol:\n sedol_var_list.append(\"d\"+str(i))\n\n\n\n# for i in risk_sedol:\n# sedol_var_list.append(\"y\"+str(i))\n#\n# for j in risk_sedol:\n# for i in range(2):\n# sedol_var_list.append(\"c\"+str(j)+str(i))\n\nsedol_var_list.append(\"assum\")\nalpha = []\n\n\nfor i in risk_sedol:\n alpha.append(-1*dic_sedol_as[i])\n\n# asset_list.append(risk_sedol[0])\n\n# dic_bench.update({risk_sedol[0]:0})\n# dic_beta.update({risk_sedol[0]:0})\n\nqmat=[]\n\n\n\n\n\nfor i in range(len(risk_mat)):\n qmat_1=[]\n qmat_1.append(sedol_var_list)\n new_risk_mat=[]\n for j in risk_mat[i]:\n new_risk_mat.append(j)\n new_risk_mat.append(0)\n # for j in risk_mat[i]:\n # new_risk_mat.append(0)\n # for j in risk_mat[i]:\n # for k in range(2):\n # new_risk_mat.append(0)\n # qmat_1.append(list(risk_mat[i]))\n qmat_1.append(new_risk_mat)\n qmat.append(qmat_1)\n\n\nfor i in range(1):\n qmat_1=[]\n qmat_1.append(sedol_var_list)\n new_risk_mat=[]\n for j in risk_mat[i]:\n new_risk_mat.append(0)\n new_risk_mat.append(0)\n # for j in risk_mat[i]:\n # new_risk_mat.append(0)\n # for j in risk_mat[i]:\n # for k in range(2):\n # new_risk_mat.append(0)\n qmat_1.append(new_risk_mat)\n qmat.append(qmat_1)\n\n\n# for i in range(len(risk_mat)):\n# qmat_1=[]\n# qmat_1.append(sedol_var_list)\n# new_risk_mat=[]\n# for j in risk_mat[i]:\n# new_risk_mat.append(0)\n# for j in risk_mat[i]:\n# new_risk_mat.append(0)\n# for j in risk_mat[i]:\n# new_risk_mat.append(0)\n# for j in risk_mat[i]:\n# for k in range(2):\n# new_risk_mat.append(0)\n# qmat_1.append(new_risk_mat)\n# qmat.append(qmat_1)\n#\n# for k in range(len(risk_mat)):\n# for i in range(2):\n# qmat_1=[]\n# qmat_1.append(sedol_var_list)\n# new_risk_mat=[]\n# for j in range(len(risk_mat[i])):\n# new_risk_mat.append(0)\n# for j in range(len(risk_mat[i])):\n# new_risk_mat.append(0)\n# for j in range(len(risk_mat[i])):\n# new_risk_mat.append(0)\n# for j in risk_mat[i]:\n# for k in range(2):\n# new_risk_mat.append(0)\n# qmat_1.append(new_risk_mat)\n# qmat.append(qmat_1)\n# 픽스\n\n\n\n\nq_con1 = []\nq_con2 = []\nq_val = []\n\n\nfor i in range(len((risk_mat[0]))):\n for j in range(len(risk_mat[0])):\n if j >= i:\n q_con1.append(i)\n q_con2.append(j)\n if i == j:\n ex_list = list(risk_mat[i])\n q_val.append(ex_list[j])\n else:\n ex_list = list(risk_mat[i])\n q_val.append(2*ex_list[j])\n\n\n\nQ_con = []\nQ_con.append(q_con1)\nQ_con.append(q_con2)\nQ_con.append(q_val)\n\n\nf0 = open('d:/pic/dic_sector.txt', 'wb')\nf1 = open('d:/pic/dic_bench.txt', 'wb')\nf2= open('d:/pic/risk_sedol.txt', 'wb')\nf3= open('d:/pic/dic_MCAP.txt', 'wb')\nf4= open('d:/pic/dic_beta.txt', 'wb')\nf5 = open('d:/pic/alpha.txt', 'wb')\nf6 = open('d:/pic/qmat.txt', 'wb')\nf7 = open('d:/pic/Q_con.txt', 'wb')\nf8 = open('d:/pic/risk_mat.txt', 'wb')\nimport pickle\npickle.dump(dic_sector, f0)\npickle.dump(dic_bench, f1)\npickle.dump(risk_sedol, f2)\npickle.dump(dic_MCAP, f3)\npickle.dump(dic_beta, f4)\npickle.dump(alpha, f5)\npickle.dump(qmat, f6)\npickle.dump(Q_con, f7)\npickle.dump(risk_mat, f8)\n\nf0.close()\nf1.close()\nf2.close()\nf3.close()\nf4.close()\nf5.close()\nf6.close()\nf7.close()\nf8.close()\n\n\n\n\n# dic_result = portfolio(sector=dic_sector,bench=dic_bench,asset=risk_sedol,MCAPQ=dic_MCAP,beta=dic_beta,alpha=alpha,qmat=qmat, Q_con=Q_con)\n# dic_result = portfolio(sector=dic_sector,bench=dic_bench,asset=risk_sedol,MCAPQ=dic_MCAP,beta=dic_beta,alpha=alpha,qmat=qmat, Q_con=Q_con)\n\n# w_dic = {}\n#\n#\n# for i in dic_result.keys():\n# w_dic.update({str(i):dic_bench[i]+dic_result[i]})\n#\n# pre_list = set(list(w_dic.keys()))\n#\n# rsrs = 0\n#\n# for i in risk_sedol:\n# rsrs += dic_result[i]*dic_sedol_as[i]\n#\n# print(rsrs)\n\n######################################################################################################################################################\n#\n# using_dic = dic_data[pd.to_datetime(date_list[1])]\n#\n# asset_list = []\n# for i in using_dic[\"sedol\"]:\n# asset_list.append(i)\n#\n# dic_sedol_as = {using_dic[\"sedol\"][i] : using_dic[\"as\"][i] for i in using_dic.index}\n#\n# dic_bench = {using_dic[\"sedol\"][i] : using_dic[\"bw\"][i] for i in using_dic.index}\n#\n# dic_beta = {using_dic[\"sedol\"][i] : using_dic[\"beta\"][i] for i in using_dic.index}\n#\n# dic_sector = {using_dic[\"sector\"][i] : [] for i in using_dic.index }\n# dic_MCAP = {using_dic[\"mq\"][i] : [] for i in using_dic.index}\n#\n# for i in using_dic.index:\n# dic_sector[using_dic[\"sector\"][i]].append(using_dic[\"sedol\"][i])\n# dic_MCAP[using_dic[\"mq\"][i]].append(using_dic[\"sedol\"][i])\n#\n#\n# risk_data,risk_sedol,risk_mat=risk(date_list[1])\n#\n#\n#\n# sedol_var_list =[]\n#\n# for i in risk_sedol:\n# sedol_var_list.append(\"d\"+str(i))\n#\n#\n# alpha = []\n#\n#\n# for i in risk_sedol:\n# alpha.append(-1*dic_sedol_as[i])\n#\n#\n# qmat=[]\n#\n#\n#\n#\n#\n# for i in range(len(risk_mat)):\n# qmat_1=[]\n# qmat_1.append(sedol_var_list)\n# new_risk_mat=[]\n# for j in risk_mat[i]:\n# new_risk_mat.append(j)\n# qmat_1.append(new_risk_mat)\n# qmat.append(qmat_1)\n#\n#\n#\n# dic_result2 = portfolio(sector=dic_sector,bench=dic_bench,asset=risk_sedol,MCAPQ=dic_MCAP,beta=dic_beta,alpha=alpha,qmat=qmat)\n#\n#\n# w_dic2 = {}\n#\n# for i in dic_result2.keys():\n# w_dic2.update({str(i):dic_bench[i]+dic_result2[i]})\n#\n# pre_list2 = set(list(w_dic2.keys()))\n#\n# turnover = 0\n#\n# add_key_1 = pre_list-pre_list2\n# add_key_2 = pre_list2-pre_list\n#\n#\n# for i in add_key_2:\n# w_dic.update({i:0})\n#\n# for i in add_key_1:\n# w_dic2.update({i:0})\n#\n# pre_list.union(pre_list2)\n#\n# for i in pre_list:\n# turnover += abs(w_dic2[i]-w_dic[i])\n#\n#\n#\n# rOTP = 0\n#\n# for i in dic_sedol_as.keys():\n# rOTP += w_dic2[i]*dic_sedol_as[i]\n#\n# r_Txadj_t = rOTP - turnover*0.5\n#\n# print(\"turnover : \" + str(turnover))\n# print(\"r_OTP : \" + str(rOTP))\n# print(\"r_Txadj_t : \" + str(r_Txadj_t))","sub_path":"CPLEX/pickle_make.py","file_name":"pickle_make.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144734823","text":"\"\"\"Helpers for interacting with Jenkins server.\"\"\"\nfrom collections import namedtuple\nimport requests\n\n\nJenkinsInfo = namedtuple(\n 'JenkinsInfo',\n ['url', 'job', 'token'])\n\n\nclass JenkinsError(Exception):\n pass\n\n\ndef generate_build_url(number, config):\n url = config.url.format(config.job)\n url = url + '/{0}'.format(number)\n return url\n\n\ndef generate_job_url(config):\n url = config.url\n return url.format(config.job)\n\n\ndef generate_job_build_url(config):\n url = config.url + '/buildWithParameters'\n return url.format(config.job)\n\n\ndef kick_jenkins_merge(pr_id, git_sha, jenkins_info):\n \"\"\"Trigger a merge build for the pull request specified\"\"\"\n url = generate_job_build_url(jenkins_info)\n request_data = {\n 'pr': pr_id,\n 'sha1': git_sha,\n 'token': jenkins_info.token,\n }\n\n resp = requests.post(url, request_data)\n if resp.status_code != 200:\n raise JenkinsError(resp.content)\n","sub_path":"src/jenkinsgithublander/jenkins.py","file_name":"jenkins.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"12614385","text":"from functools import partial\nimport numpy as np\nimport tensorflow as tf2\ntf = tf2.compat.v1\ntf.disable_v2_behavior()\n\n\n# It turns out that the position of batch normalization layer matters in\n# neural networks, see discussions in:\n# https://stackoverflow.com/questions/39691902/ordering-of-batch-normalization-and-dropout\n# https://www.zhihu.com/question/283715823\n# Also according to the discussions, it is generally NOT recommended to use\n# batch normalization and dropout simultaneously.\ndef dense_nn(net, hidden_units, activation=tf.nn.elu, use_bn=True,\n bn_after_activation=True, dropout_rate=None, is_training=True,\n name=\"mlp\"):\n hidden_length = len(hidden_units)\n if activation is None:\n activation = tf.identity\n\n with tf.variable_scope(name):\n if use_bn:\n net = tf.layers.batch_normalization(net, training=is_training)\n for i, units in enumerate(hidden_units, start=1):\n # if i < hidden_length:\n net = tf.layers.dense(inputs=net,\n units=units,\n activation=None,\n name=name+\"_layer\"+str(i),\n reuse=tf.AUTO_REUSE)\n\n if use_bn and bn_after_activation:\n net = activation(net)\n net = tf.layers.batch_normalization(net, training=is_training)\n elif use_bn and not bn_after_activation:\n net = tf.layers.batch_normalization(net, training=is_training)\n net = activation(net)\n else:\n net = activation(net)\n\n if dropout_rate:\n net = tf.layers.dropout(net, dropout_rate,\n training=is_training)\n\n # else:\n # net = tf.layers.dense(inputs=net,\n # units=units,\n # activation=activation)\n\n return net\n\n\ndef var_list_by_name(names):\n assert isinstance(names, (list, tuple)), \"names must be list or tuple\"\n var_dict = dict()\n for name in names:\n matched_vars = [\n var for var in tf.trainable_variables() if name in var.name\n ]\n var_dict[name] = matched_vars\n return var_dict\n\n\ndef reg_config(reg):\n if not reg:\n return None\n elif isinstance(reg, float) and reg > 0.0:\n return tf.keras.regularizers.l2(reg)\n else:\n raise ValueError(\"reg must be float and positive...\")\n\n\ndef dropout_config(dropout_rate):\n if not dropout_rate:\n return 0.0\n elif dropout_rate <= 0.0 or dropout_rate >= 1.0:\n raise ValueError(\"dropout_rate must be in (0.0, 1.0)\")\n else:\n return dropout_rate\n\n\ndef lr_decay_config(initial_lr, default_decay_steps, **kwargs):\n decay_steps = kwargs.get(\"decay_steps\", default_decay_steps)\n decay_rate = kwargs.get(\"decay_rate\", 0.96)\n global_steps = tf.Variable(0, trainable=False, name=\"global_steps\")\n learning_rate = tf.train.exponential_decay(initial_lr, global_steps,\n decay_steps, decay_rate,\n staircase=True)\n\n return learning_rate, global_steps\n\n\ndef sparse_tensor_interaction(data, recent_num=None, random_sample_rate=None):\n sparse_data = data.sparse_interaction.tocoo()\n row = sparse_data.row.reshape(-1, 1)\n indices = np.concatenate([row, np.zeros_like(row)], axis=1)\n values = sparse_data.col\n\n# user_interacted_num = np.diff(data.sparse_interaction.indptr)\n if recent_num is not None:\n indices, values = user_recent_interact(recent_num, indices, values)\n elif random_sample_rate is not None:\n indices, values = random_sample(random_sample_rate, indices, values)\n\n sparse_tensor = tf.SparseTensor(\n indices=indices, values=values, dense_shape=sparse_data.shape)\n return sparse_tensor\n\n\ndef random_sample(sample_rate, indices, values):\n assert 0.0 < sample_rate < 1.0, \"sample_rate must be in (0.0, 1.0)\"\n total_length = len(values)\n sample_num = int(total_length * sample_rate)\n sampled_indices = np.random.choice(\n range(total_length), size=sample_num, replace=False)\n indices = indices[sampled_indices]\n values = values[sampled_indices]\n return indices, values\n\n\ndef user_recent_interact(num, indices, values):\n assert isinstance(num, int), \"recent_interact_num must be int\"\n (users,\n user_position,\n user_counts) = np.unique(indices[:, 0],\n return_inverse=True,\n return_counts=True)\n\n user_split_indices = np.split(\n np.argsort(user_position, kind=\"mergesort\"),\n np.cumsum(user_counts)[:-1]\n )\n\n n_users = len(users)\n recent_indices = list()\n for u in range(n_users):\n # assume user interactions have already been sorted by time.\n u_data = user_split_indices[u][-num:]\n recent_indices.extend(u_data)\n indices = indices[recent_indices]\n values = values[recent_indices]\n return indices, values\n\n\ndef conv_nn(tf_version, filters, kernel_size, strides, padding, activation,\n dilation_rate=1):\n if tf_version >= \"2.0.0\":\n net = tf.keras.layers.Conv1D(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n activation=activation,\n dilation_rate=dilation_rate\n )\n else:\n net = partial(\n tf.layers.conv1d,\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n activation=activation\n )\n return net\n\n\ndef max_pool(tf_version, pool_size, strides, padding):\n if tf_version >= \"2.0.0\":\n net = tf.keras.layers.MaxPool1D(\n pool_size=pool_size,\n strides=strides,\n padding=padding\n )\n else:\n net = partial(\n tf.layers.max_pooling1d,\n pool_size=pool_size,\n strides=strides,\n padding=padding\n )\n return net\n\n\ndef match_adam(v_tf, v_model):\n return (v_tf.name.startswith(v_model + \"/Adam:0\") or\n v_tf.name.startswith(v_model + \"/Adam_1:0\"))\n\n\ndef modify_variable_names(model, trainable):\n user_var, item_var, sparse_var, dense_var = None, None, None, None\n manual_var = None\n if trainable:\n if hasattr(model, \"user_variables\"):\n user_var = [v+\":0\" for v in model.user_variables]\n if hasattr(model, \"item_variables\"):\n item_var = [v+\":0\" for v in model.item_variables]\n if hasattr(model, \"sparse_variables\"):\n sparse_var = [v+\":0\" for v in model.sparse_variables]\n if hasattr(model, \"dense_variables\"):\n dense_var = [v+\":0\" for v in model.dense_variables]\n\n manual_var = []\n if user_var is not None:\n manual_var.extend(user_var)\n if item_var is not None:\n manual_var.extend(item_var)\n if sparse_var is not None:\n manual_var.extend(sparse_var)\n if dense_var is not None:\n manual_var.extend(dense_var)\n\n else:\n if hasattr(model, \"user_variables\"):\n user_var = []\n for v in model.user_variables:\n user_var.append(v + \"/Adam:0\")\n user_var.append(v + \"/Adam_1:0\")\n user_var.append(v + \"/Ftrl:0\")\n user_var.append(v + \"/Ftrl_1:0\")\n if hasattr(model, \"item_variables\"):\n item_var = []\n for v in model.item_variables:\n item_var.append(v + \"/Adam:0\")\n item_var.append(v + \"/Adam_1:0\")\n item_var.append(v + \"/Ftrl:0\")\n item_var.append(v + \"/Ftrl_1:0\")\n if hasattr(model, \"sparse_variables\"):\n sparse_var = []\n for v in model.sparse_variables:\n sparse_var.append(v + \"/Adam:0\")\n sparse_var.append(v + \"/Adam_1:0\")\n sparse_var.append(v + \"/Ftrl:0\")\n sparse_var.append(v + \"/Ftrl_1:0\")\n if hasattr(model, \"dense_variables\"):\n dense_var = []\n for v in model.dense_variables:\n dense_var.append(v + \"/Adam:0\")\n dense_var.append(v + \"/Adam_1:0\")\n dense_var.append(v + \"/Ftrl:0\")\n dense_var.append(v + \"/Ftrl_1:0\")\n\n return user_var, item_var, sparse_var, dense_var, manual_var\n\n\ndef multi_sparse_combine_embedding(data_info, variables, all_sparse_indices,\n combiner, embed_size):\n field_offsets = data_info.multi_sparse_combine_info.field_offset\n field_lens = data_info.multi_sparse_combine_info.field_len\n feat_oovs = data_info.multi_sparse_combine_info.feat_oov\n sparse_end = field_offsets[0]\n\n # only one multi_sparse feature and no sparse features\n if sparse_end == 0 and len(field_offsets) == 1:\n result = multi_sparse_alone(variables, all_sparse_indices,\n combiner, embed_size, field_offsets[0],\n field_lens[0], feat_oovs[0])\n else:\n if sparse_end > 0:\n sparse_indices = all_sparse_indices[:, :sparse_end]\n sparse_embedding = tf.nn.embedding_lookup(variables, sparse_indices)\n result = [sparse_embedding]\n else:\n result = []\n\n for offset, length, oov in zip(field_offsets, field_lens, feat_oovs):\n result.append(\n multi_sparse_alone(variables, all_sparse_indices, combiner,\n embed_size, offset, length, oov)\n )\n result = tf.concat(result, axis=1)\n return result\n\n\ndef multi_sparse_alone(variables, all_sparse_indices, combiner,\n embed_size, offset, length, oov):\n variable_dim = len(variables.get_shape().as_list())\n # oov feats are padded to 0-vector\n oov_indices = [oov] if variable_dim == 1 else oov\n zero_padding_op = tf.scatter_update(\n variables, oov_indices, tf.zeros([embed_size], dtype=tf.float32)\n )\n multi_sparse_indices = all_sparse_indices[:, offset: offset + length]\n\n with tf.control_dependencies([zero_padding_op]):\n multi_sparse_embed = tf.nn.embedding_lookup(variables, multi_sparse_indices)\n\n res_embed = tf.reduce_sum(multi_sparse_embed, axis=1, keepdims=True)\n if combiner in (\"mean\", \"sqrtn\"):\n multi_sparse_lens = tf.reduce_sum(\n tf.cast(\n tf.not_equal(multi_sparse_indices, oov), tf.float32\n ), axis=1, keepdims=True\n )\n if combiner == \"sqrtn\":\n multi_sparse_lens = tf.sqrt(multi_sparse_lens)\n if variable_dim == 2:\n multi_sparse_lens = tf.expand_dims(multi_sparse_lens, axis=1)\n\n res_embed = tf.div_no_nan(res_embed, multi_sparse_lens)\n\n return res_embed\n","sub_path":"libreco/utils/tf_ops.py","file_name":"tf_ops.py","file_ext":"py","file_size_in_byte":10997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627796868","text":"import torch\nimport numpy as np\nfrom pathlib import Path\n\nclass LidarDatasets(torch.utils.data.Dataset):\n\n def __init__(self, filenames, maxLen=1.0):\n self.data = np.concatenate([np.load(filename) for filename in filenames])\n self.data = torch.tensor(self.data).float()\n\n self.datanum = self.data.shape[0]\n self.datasize = self.data.shape[-1]\n\n def limit(self, end):\n self.data = self.data[:, :end]\n\n self.datanum = self.data.shape[0]\n self.datasize = self.data.shape[-1]\n\n def __len__(self):\n return self.datanum\n\n def __getitem__(self, idx):\n return self.data[idx]\n\nif __name__ == '__main__':\n\n train_filenames = ['./data/vaernnEnv0/id-{}.npy'.format(id) for id in range(10)]\n test_filenames = ['./data/vaernnEnv0/id-{}.npy'.format(id) for id in range(10, 12)]\n\n lidarTrainDatasets = LidarDatasets(train_filenames)\n lidarTrainDatasets.limit(1080)\n\n lidarTestDatasets = LidarDatasets(train_filenames)\n lidarTestDatasets.limit(1080)","sub_path":"Lidar.py","file_name":"Lidar.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"618635760","text":"import sys\nimport urllib\nimport requests\nfrom datetime import datetime\nimport time\nimport random\nimport re\nfrom urllib.parse import urljoin\n\nrandom.seed(datetime.now().second)\n\nfrom bs4 import BeautifulSoup\n\nclass YandexCatalogRequestException(Exception):\n pass\n\ndef parse_yaca_page(page):\n soup = BeautifulSoup(page)\n ol_result = soup.find(\"ol\", class_=\"b-result\")\n start = ol_result[\"start\"]\n results = soup.find_all(\"li\", class_=\"b-result__item\")\n if not results:\n return None\n\n pages = list()\n num = int(start)\n for res in results:\n result_name_tag = res.find(\"a\", class_=\"b-result__name\")\n name_str = result_name_tag.text\n name_match = re.match(r'\\\"(.+?)\\\"', str(name_str))\n if name_match:\n name = name_match.group(1)\n else:\n name = str(name_str)\n\n ic_span = res.find(\"span\", class_=\"b-result__quote\")\n ic = None\n if ic_span:\n ic_str = ic_span.string\n ic_match = re.search(r\"\\d+\", str(ic_str))\n ic = ic_match.group(0)\n\n ref = result_name_tag[\"href\"]\n\n yaca_text = res.text\n\n pages.append((num, name, ref, ic, yaca_text))\n num += 1\n return pages\n\ndef request_yaca(qurl, page_num):\n page_str = \"%d.html\" % (page_num - 1)if page_num > 1 else \"\"\n\n qparts = qurl.split(\"?\", 1)\n old_base = qparts[0]\n if old_base.endswith(\"html\"):\n old_base = resplit(base, \"/\")\n\n url_base = old_base + \"/\" + page_str\n url = \"{0}?{1}\".format(url_base, qparts[1])\n\n r = requests.get(url)\n if r.status_code == 200:\n page = r.text\n return page\n else:\n raise YandexCatalogRequestException(r.status_code)\n\nclass YandexCatalogReader(object):\n def __init__(self, query, fpage=1, lpage=100):\n self.query = query\n self.fpage = fpage\n self.lpage = lpage\n self.last_request = datetime.min\n\n def results(self):\n for page_num in range(self.fpage, self.lpage+1):\n dtime = datetime.now() - self.last_request\n time_to_sleep = 20 - dtime.seconds\n if time_to_sleep > 0:\n tts = random.random() * time_to_sleep\n time.sleep(int(tts))\n\n page = request_yaca(self.query, page_num)\n self.last_request = datetime.now()\n results = parse_yaca_page(page)\n if not results:\n break\n for result in results:\n yield result\n\nif __name__ == \"__main__\":\n text = sys.argv[1]\n ycr = YandexCatalogReader(text)\n for res in ycr.results():\n print(res)\n","sub_path":"YandexCatalogReader.py","file_name":"YandexCatalogReader.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101516914","text":"import torch\n\n\nclass DICELoss(torch.nn.Module):\n def __init__(self):\n super(DICELoss, self).__init__()\n\n def forward(self, target, input):\n smooth = 1.\n\n iflat = input.contiguous().view(-1)\n tflat = target.contiguous().view(-1)\n intersection = (iflat * tflat).sum()\n\n return 2 - ((2. * intersection + smooth) / (iflat.sum() + tflat.sum() + smooth))\n\n\ndef train_batch(model, single, optimizer):\n image = single['image']\n label = single['label']\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n images = image.to(device=device, dtype=torch.float32)\n labels = label.to(device=device, dtype=torch.float32)\n\n # Forward pass\n outputs = model(images)\n outputs = outputs.to(device)\n\n criterion = DICELoss().to(device)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return loss.item()\n\n\ndef test_batch(model, single):\n vimage = single['image']\n vlabel = single['label']\n\n vimages = vimage.to(device=device, dtype=torch.float32)\n vlabels = vlabel.to(device=device, dtype=torch.float32)\n\n voutputs = model(vimages)\n voutputs = voutputs.to(device)\n\n criterion = DICELoss().to(device)\n viloss = criterion(voutputs, vlabels)\n\n return viloss.item(), voutputs, vlabels\n\n","sub_path":"Ass3/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126222999","text":"#!/usr/bin/python3 python\n# encoding: utf-8\n'''\n@author: 刘家兴\n@contact: ljx0ml@163.com\n@file: model.py\n@time: 2021/8/3 12:02\n@desc:\n'''\n\nimport numpy as np\nimport cv2\nIMAGE_SIZE_YOLOV3=416\n\ndef pre_process(inp_img):\n gray = inp_img[:, :, 0]\n gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n image = cv2.resize(gray, (IMAGE_SIZE_YOLOV3, IMAGE_SIZE_YOLOV3), interpolation=cv2.INTER_LINEAR)\n\n img = np.half(image)\n img /= 255.0\n if img.shape[-1] == 3:\n img = np.expand_dims(img, 0)\n img = np.transpose(img, (0, 3, 1, 2)).astype(np.float32)\n\n return img.astype(np.float32)","sub_path":"src/utils/process/yolov3/preprocess_yolov3.py","file_name":"preprocess_yolov3.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226190107","text":"import MySQLdb\nimport sys\nimport inspect\nfrom student import student\nfrom student import teacher\nfrom student import stu_group\n\n\ndef insert(input_obj):\n\tif isinstance(input_obj, student):\n\t\tinput_sql = \"INSERT INTO Student(name, score, class_id) VALUES ('{}', {}, {});\".format(input_obj.name, input_obj.score, input_obj.class_id)\n\t\tprint(input_obj.name, \"is now added into list.\")\n\telif isinstance(input_obj, teacher):\n\t\tinput_sql = \"INSERT INTO Teacher(name, age, class_id) VALUES ('{}', {}, {});\".format(input_obj.name, input_obj.age, input_obj.class_id)\n\t\tprint(input_obj.name, \"is now added into list.\")\n\telif isinstance(input_obj, stu_group):\n\t\tinput_sql = \"INSERT INTO Class(name, grade) VALUES ('{}', '{}');\".format(input_obj.name, input_obj.grade)\n\t\tprint(input_obj.name, \"is now added into list.\")\n\tcursor.execute(input_sql)\n\ndef delete(table, name):\n\tif (table == 'student'):\n\t\tsql = \"SELECT name FROM Student\"\n\telif (table == 'teacher'):\n\t\tsql = \"SELECT name FROM Teacher\"\n\telif (table == 'class'):\n\t\tsql = \"SELECT name FROM Class\"\n\tcursor.execute(sql)\n\tname_list = [row[0] for row in cursor.fetchall()] #fetchall() format: ((student1-col1,col2,...), (student2-col1,col2,...), ....)\n\tif name in name_list:\n\t\tif (table == 'student'):\n\t\t\tsql = \"DELETE FROM Student WHERE name = '{}'\".format(name)\n\t\telif (table == 'teacher'):\n\t\t\tsql = \"DELETE FROM Teacher WHERE name = '{}'\".format(name)\n\t\telif (table == 'class'):\n\t\t\tsql = \"DELETE FROM Class WHERE name = '{}'\".format(name)\n\n\t\tcursor.execute(sql)\n\t\tprint(name, \"is now deleted from\", table)\n\telse:\n\t\tprint(name, \"doesn't exist in\", table)\n\ndef list(table):\n\tif (table == 'student'):\n\t\tsql = \"SELECT name FROM Student\"\n\telif (table == 'teacher'):\n\t\tsql = \"SELECT name FROM Teacher\"\n\telif (table == 'class'):\n\t\tsql = \"SELECT name FROM Class\"\n\tcursor.execute(sql)\n\tname_list = [row[0] for row in cursor.fetchall()] #fetchall() format: ((student1-col1,col2,...), (student2-col1,col2,...), ....)\n\tname_list.sort()\n\tprint(name_list)\n\ndef list_contains(table, contain_component):\n\tif (table == 'student'):\n\t\tsql = \"SELECT name FROM Student\"\n\telif (table == 'teacher'):\n\t\tsql = \"SELECT name FROM Teacher\"\n\telif (table == 'class'):\n\t\tsql = \"SELECT name FROM Class\"\n\tcursor.execute(sql)\n\tname_list = [row[0] for row in cursor.fetchall()] #fetchall() format: ((student1-col1,col2,...), (student2-col1,col2,...), ....)\n\tname_list.sort()\n\tmatch = [_ for _ in name_list if contain_component in _]\n\tprint(match)\n\n\n\nif __name__ == '__main__':\n\n\tdb = MySQLdb.connect(host=\"localhost\", user=\"Patina\", passwd=\"1234\", db=\"MyClass\")\n\tcursor = db.cursor()\n\n\targv = sys.argv\n\ttable = argv[1]\n\tcommand = argv[2]\n\n\tif (command == 'insert'):\n\t\tif(table == 'student' and len(argv) == 6):\n\t\t\tinput_obj = student(argv[3], argv[4], argv[5])\n\t\t\tinsert(input_obj)\n\t\telif(table == 'teacher' and len(argv) == 6):\n\t\t\tinput_obj = teacher(argv[3], argv[4], argv[5])\n\t\t\tinsert(input_obj)\n\t\telif(table == 'class' and len(argv) == 5):\n\t\t\tinput_obj = stu_group(argv[3], argv[4])\n\t\t\tinsert(input_obj)\n\t\telif(((table == 'student' or table == 'teacher') and len(argv) < 6) or (table == 'class' and len(argv) < 5)):\n\t\t\tprint(\"Some parameter required might be missing, please check.\")\n\t\telse:\n\t\t\tprint(\"There might be redundant parameter, please check.\")\n\n\telif (command == 'delete'):\n\t\tif(len(argv) == 4):\n\t\t\tname = argv[3]\n\t\t\tdelete(table, name)\n\t\telif(len(argv) < 4):\n\t\t\tprint(\"Some parameter required might be missing, please check.\")\n\t\telse:\n\t\t\tprint(\"There might be redundant parameter, please check.\")\n\n\telif (command == 'list'):\n\t\tif (len(argv) == 3):\n\t\t\tlist(table)\n\t\telif (len(argv) == 4):\n\t\t\tcontain_component = argv[3]\n\t\t\tlist_contains(table, contain_component)\n\t\telif(len(argv) < 3):\n\t\t\tprint(\"Some parameter required might be missing, please check.\")\n\t\telif(len(argv) > 4):\n\t\t\tprint(\"There might be redundant parameter, please check.\")\n\n\n\tdb.commit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341441738","text":"from uniback.models.general import BackupSet, BackupObject\nfrom uniback.tools.local_session import LocalSession\nfrom uniback.dictionary.uniback_constants import BackupSetList, BackupSetTypes\nimport json\n\n\ndef delete_backup_set(id):\n with LocalSession() as session:\n session.query(BackupSet).filter_by(id=id).delete()\n session.commit()\n\n\ndef delete_backup_sets(ids):\n with LocalSession() as session:\n for id in ids:\n session.query(BackupSet).filter_by(id=id).delete()\n session.query(BackupObject).filter_by(backup_set_id=id).delete()\n session.commit()\n\n\ndef get_backup_sets():\n with LocalSession() as session:\n backup_sets = session.query(BackupSet)\n return_list = []\n for backup_set in backup_sets:\n return_list.append(\n dict(\n id=backup_set.id,\n name=backup_set.name,\n type=BackupSetList.BACKUP_SETS[backup_set.type]\n )\n )\n return return_list\n\n# used for getting a tuple of values to be added to a select field\n# on a form\ndef get_backup_sets_tuple():\n with LocalSession() as session:\n backup_sets = session.query(BackupSet)\n return_list = []\n for backup_set in backup_sets:\n return_list.append(\n (backup_set.id, backup_set.name)\n )\n return return_list\n\n\ndef add_backup_set(data):\n with LocalSession() as session:\n if data['type'] == BackupSetTypes.BS_TYPE_FILESFOLDERS:\n json_object = json.loads(data['backup_object_data']['file_list'])\n backup_object_list = json_object['file_list']\n display_state = json.dumps(json_object['state'])\n else: \n raise Exception(f\"Unsupported backup set {data['type']}\")\n backup_set = (\n BackupSet(\n name=data['name'],\n type=data['type'],\n source=data['source'],\n data=display_state\n )\n )\n print(display_state)\n session.add(backup_set)\n session.commit()\n for backup_object in backup_object_list:\n new_backup_object = BackupObject(\n data=backup_object,\n backup_set_id=backup_set.id)\n session.add(new_backup_object)\n session.commit()\n\n\ndef get_backup_set_info(id):\n with LocalSession() as session:\n backup_set = session.query(BackupSet).filter_by(id=id).first()\n set_item_list = session.query(BackupObject).filter_by(backup_set_id=id)\n set_item_list_data = []\n for item in set_item_list:\n set_item_list_data.append(item.data)\n if backup_set:\n info_dict = dict(\n id=backup_set.id,\n name=backup_set.name,\n source=backup_set.source,\n type_name=BackupSetList.BACKUP_SETS[backup_set.type],\n data=backup_set.data,\n type=backup_set.type,\n time_added=backup_set.time_added\n )\n else:\n info_dict = dict(\n id=\"UNDEFINED\",\n name=\"UNDEFINED\",\n source=\"UNDEFINED\",\n type_name=\"UNDEFINED\",\n type=\"UNDEFINED\",\n time_added=\"UNDEFINED\"\n )\n return info_dict, set_item_list_data\n","sub_path":"uniback/db_interfaces/backup_sets.py","file_name":"backup_sets.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374819896","text":"# -*- coding: utf-8 -*-\r\nimport requests\r\nimport os\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\nsdata = pd.read_stata(os.path.abspath('/Users/XXX/Desktop/get_psu_from_nbs/tmpfiles/post_need.dta'), encoding='utf8')\r\nsdata['postcode'] = u''\r\nnum = sdata.shape[0]\r\n\r\nshead = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586'}\r\n\r\nfor i in range(num):\r\n try:\r\n r = requests.session()\r\n r.headers = shead\r\n addstr = sdata.pstr[i].encode('gbk')\r\n add_url = urllib.quote(addstr)\r\n r0 = r.get('http://opendata.baidu.com/post/s?wd=' + add_url)\r\n doc0 = BeautifulSoup(r0.content, 'html5lib', from_encoding=\"gbk\")\r\n t1 = doc0.find('ul')\r\n t2 = t1.li.a\r\n postcode = t2.get_text()\r\n except:\r\n postcode = u''\r\n sdata.postcode[i] = postcode\r\nsdata.to_excel(os.path.abspath('/Users/XXX/Desktop/get_psu_from_nbs/outfiles/post_python.xlsx'), index=False)\r\n","sub_path":"postcode.py","file_name":"postcode.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"478389779","text":"# =============================================================================\n# TASK SETTINGS DEFINITION (should appear on GUI)\n# =============================================================================\n# Stuff that should come from PyBpod\nPROJECT = 'IBL'\nTASK = 'advancedChoiceWorld'\nBOX = 'Box0'\nMOUSE_NAME = 'aCW_test_mouse'\nEXPERIMENTER = 'Nico'\nROTARY_ENCODER_PORT = 'COM3'\nROTARY_ENCODER_SERIAL_PORT_NUM = 1\nOSC_CLIENT_PORT = 7110\nOSC_CLIENT_IP = '127.0.0.1'\n# ROOT_DATA_FOLDER = None -> relative path in ../pybpod_projects/IBL/data/\nROOT_DATA_FOLDER = None\n# TASK\nNTRIALS = 10 # Number of trials for the current session\nUSE_VISUAL_STIMULUS = True # Run the visual stim in bonsai\nREPEAT_ON_ERROR = True\nREPEAT_STIMS = [1., 0.5]\n# REWARDS\nVALVE_TIME = 0.2016 # calibrated on 2018-01-19\n# STATE TIMERS\nRESPONSE_WINDOW = 0 # 0 for inf timer of state set to 0 means non existant\nINTERACTIVE_DELAY = 0.5 # how long after stim onset the CL starts\nITI = 0.5 # Inter trial delay happens every trial\nITI_CORRECT = 1 # how long the stim should stay visible after CORRECT choice\nITI_ERROR = 2 # # how long the stim should stay visible after ERROR choice\n# VISUAL STIM\nSTIM_POSITIONS = [-90, 90] # All possible positions for this session\nSTIM_CONTRASTS = [1., 0.5, 0.25, 0.125, 0.0625, 0.] # All possible contrasts\nSTIM_FREQ = 0.2 # Probably constant\nSTIM_ANGLE = 0. # Vertical orientation of Gabor patch (0=vertical)\nSTIM_SIGMA = 20. # Size of the gabor patch gaussian window\n# Not adaptive on a trial be trial basis\nSTIM_GAIN = 5. # Gain of the RE to stimulus movement\n# SOUNDS\nSOUND_SAMPLE_FREQ = 44100 # 192000 # depends on the sound card. 96000 ?\nWHITE_NOISE_DURATION = ITI_ERROR # Length of noise burst\nWHITE_NOISE_AMPLITUDE = 0.05\nGO_TONE_DURATION = 0.1 # Length of tone\nGO_TONE_FREQUENCY = 10000 # 10KHz\nGO_TONE_AMPLITUDE = 1 # [0->1]\n","sub_path":"IBL/tasks/advancedChoiceWorld/task_settings.py","file_name":"task_settings.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199377985","text":"import json\nimport os\nimport glob\nimport io\n\nfrom slackviewer.message import Message\n\nclass Reader(object):\n \"\"\"\n Reader object will read all of the archives' data from the json files\n \"\"\"\n\n def __init__(self, PATH):\n self._DIRS = {'channel': 'channels',\n 'group': 'private_channels',\n 'dm': 'direct_messages',\n 'mpim': 'private_channels'}\n self._PATH = PATH\n # TODO: Make sure this works\n self.__USER_DATA = {}\n with io.open(os.path.join(self._PATH, \"metadata.json\")) as f:\n metadata = json.load(f)\n for id,name in metadata['users'].items():\n self.__USER_DATA[id] = {'name': name}\n\n #with io.open(os.path.join(self._PATH, \"users.json\"), encoding=\"utf8\") as f:\n # self.__USER_DATA = {u[\"id\"]: u for u in json.load(f)}\n\n\n ##################\n # Public Methods #\n ##################\n\n def compile_channels(self):\n channel_names = os.listdir(os.path.join(self._PATH, 'channels'))\n channel_names = [os.path.splitext(x)[0] for x in channel_names]\n \n #channel_data = self._read_from_json(\"channels.json\")\n #channel_names = [c[\"name\"] for c in channel_data.values()]\n\n return self._create_messages('channel', channel_names, {}) #channel_data)\n\n def compile_groups(self):\n group_names = os.listdir(os.path.join(self._PATH, 'private_channels'))\n group_names = [x for x in group_names if not x.startswith('mpdm-')]\n group_names = [os.path.splitext(x)[0] for x in group_names]\n\n #group_data = self._read_from_json(\"groups.json\")\n #group_names = [c[\"name\"] for c in group_data.values()]\n\n return self._create_messages('group', group_names, {}) #group_data)\n\n def compile_dm_messages(self):\n dm_names = os.listdir(os.path.join(self._PATH, 'direct_messages'))\n dm_names = [os.path.splitext(x)[0] for x in dm_names]\n return self._create_messages('dm', dm_names, {})\n\n ## Gets list of dm objects with dm ID and array of members ids\n #dm_data = self._read_from_json(\"dms.json\")\n #dm_ids = [c[\"id\"] for c in dm_data.values()]\n\n ## True is passed here to let the create messages function know that\n ## it is dm data being passed to it\n #return self._create_messages(dm_ids, dm_data, True)\n\n def compile_dm_users(self):\n \"\"\"\n Gets the info for the members within the dm\n\n Returns a list of all dms with the members that have ever existed\n\n :rtype: [object]\n {\n id: \n users: []\n }\n\n \"\"\"\n dm_names = os.listdir(os.path.join(self._PATH, 'direct_messages'))\n dm_names = [os.path.splitext(x)[0] for x in dm_names]\n all_dm_users = []\n for name in dm_names:\n with open(os.path.join(self._PATH, 'direct_messages', name + '.json')) as f:\n channel_info = json.load(f)['channel_info']\n members = [self.__USER_DATA[m] for m in channel_info[\"members\"]]\n all_dm_users.append({'id': name, 'users': members})\n return all_dm_users\n\n #dm_data = self._read_from_json(\"dms.json\")\n #dms = dm_data.values()\n #all_dms_users = []\n\n #for dm in dms:\n # # checks if messages actually exsist\n # if dm[\"id\"] not in self._EMPTY_DMS:\n # dm_members = {\"id\": dm[\"id\"], \"users\": [self.__USER_DATA[m] for m in dm[\"members\"]]}\n # all_dms_users.append(dm_members)\n\n #return all_dms_users\n\n\n def compile_mpim_messages(self):\n mpim_names = os.listdir(os.path.join(self._PATH, 'private_channels'))\n mpim_names = [x for x in mpim_names if x.startswith('mpdm-')]\n mpim_names = [os.path.splitext(x)[0] for x in mpim_names]\n\n #mpim_data = self._read_from_json(\"mpims.json\")\n #mpim_names = [c[\"name\"] for c in mpim_data.values()]\n\n return self._create_messages('mpim', mpim_names, {}) #mpim_data)\n\n def compile_mpim_users(self):\n \"\"\"\n Gets the info for the members within the multiple person instant message\n\n Returns a list of all dms with the members that have ever existed\n\n :rtype: [object]\n {\n name: \n users: []\n }\n\n \"\"\"\n mpim_names = os.listdir(os.path.join(self._PATH, 'private_channels'))\n mpim_names = [x for x in mpim_names if x.startswith('mpdm-')]\n mpim_names = [os.path.splitext(x)[0] for x in mpim_names]\n all_mpim_users = []\n for name in mpim_names:\n with open(os.path.join(self._PATH, 'private_channels', name + '.json')) as f:\n channel_info = json.load(f)['channel_info']\n all_mpim_users.append({'name': channel_info['name'], 'users': [self.__USER_DATA[m] for m in channel_info[\"members\"]]})\n\n #mpim_data = self._read_from_json(\"mpims.json\")\n #mpims = [c for c in mpim_data.values()]\n #all_mpim_users = []\n\n #for mpim in mpims:\n # mpim_members = {\"name\": mpim[\"name\"], \"users\": [self.__USER_DATA[m] for m in mpim[\"members\"]]}\n # all_mpim_users.append(mpim_members)\n\n return all_mpim_users\n\n\n ###################\n # Private Methods #\n ###################\n\n def _create_messages(self, type, channels, data):\n \"\"\"\n Creates object of arrays of messages from each json file specified by the names or ids\n\n :param [str] channels: names of each group of messages\n\n :param [object] data: array of objects detailing where to get the messages from in\n the directory structure\n\n :param bool isDms: boolean value used to tell if the data is dm data so the function can\n collect the empty dm directories and store them in memory only\n\n :return: object of arrays of messages\n\n :rtype: object\n \"\"\"\n\n dir = self._DIRS[type]\n chats = {}\n empty_dms = []\n\n for channel in channels:\n messages = []\n\n with io.open(os.path.join(self._PATH, dir, channel + '.json')) as f: #, encoding=\"utf8\") as f:\n # loads all messages\n channel_messages = json.load(f)\n messages.extend([Message(self.__USER_DATA, data, type, channel, d) for d in channel_messages['messages']])\n\n chats[channel] = messages\n\n if type == 'dm':\n self._EMPTY_DMS = empty_dms\n\n return chats\n\n def _read_from_json(self, file):\n \"\"\"\n Reads the file specified from json and creates an object based on the id of each element\n\n :param str file: Path to file of json to read\n\n :return: object of data read from json file\n\n :rtype: object\n \"\"\"\n\n try:\n with io.open(os.path.join(self._PATH, file), encoding=\"utf8\") as f:\n return {u[\"id\"]: u for u in json.load(f)}\n except IOError:\n return {}\n","sub_path":"slackviewer/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125634376","text":"import scrapy\n\n\nclass BestsellingOffersSpider(scrapy.Spider):\n name = 'bestselling_offers'\n allowed_domains = ['www.glassesshop.com']\n #start_urls = ['https://www.glassesshop.com/bestsellers'] /Because we have used the start_requests() function, we don't need this list\n\n def start_requests(self):\n yield scrapy.Request(url='https://www.glassesshop.com/bestsellers', callback=self.parse, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.16; rv:85.0) Gecko/20100101 Firefox/85.0'\n })\n\n def parse(self, response):\n for product in response.xpath(\"//div[@id='product-lists']/div\"):\n yield {\n 'product_url': product.xpath(\".//div[4]/div[2]/div/div[1]/div/a/@href\").get(),\n 'image_url': product.xpath(\".//div[3]/a/img[2]/@data-src\").get(),\n 'product_name': product.xpath(\".//div[4]/div[2]/div/div[1]/div/a/text()\").get(),\n 'product_price': product.xpath(\".//div[4]/div[2]/div/div[2]/div/div/span/text()\").get(),\n 'user_agent': response.request.headers['User-Agent']\n }\n \n next_page = response.xpath(\"//a[@class='page-link' and @rel='next']/@href\").get()\n\n if next_page:\n yield scrapy.Request(url=next_page, callback=self.parse, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.16; rv:85.0) Gecko/20100101 Firefox/85.0'\n })","sub_path":"glassesshop/glassesshop/spiders/bestselling_offers.py","file_name":"bestselling_offers.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230669674","text":"class Solution:\n def trap(self, height: List[int]) -> int:\n if len(height) == 0:\n return 0\n max_l, max_r = height[0], height[len(height)-1]\n A = height\n l, r = 0, len(A)-1\n res = 0\n while l <= r:\n if A[l] < A[r]:\n max_l = max(max_l, A[l])\n res += max_l - A[l]\n # print(\"l\", res, l)\n l+=1\n else:\n max_r = max(max_r, A[r])\n res += max_r - A[r]\n # print(\"r\", res, r)\n r-=1\n return res","sub_path":"2020_03_21/biss_42.py","file_name":"biss_42.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38809163","text":"x, y = [int(x) for x in input().split()]\nobstacles = list(range(x))\ndeleted = []\nfor _ in range(y):\n inp = int(input())\n if inp not in deleted:\n obstacles.remove(inp)\n deleted.append(inp)\nfor element in obstacles:\n print(element)\nprint(\"Mario got %i of the dangerous obstacles.\" % len(deleted))\n\n","sub_path":"Kattis or Codeforces Problems/savingprincess.py","file_name":"savingprincess.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529777800","text":"import pandas as pd\nimport os\nimport sys\n#命令行执行路径\nrunPwd = os.getcwd()\n#当前文件路径\ncurPwd = os.path.abspath(os.path.dirname(__file__))\n#当前文件父路径\ncurParentPwd = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n# print(\"pwd:\",runPwd)\n# print(\"pwd1:\",curPwd)\n# print(\"curParentPwd:\",curParentPwd)\n\ndef getSinaPingYin2ProvinceDict():\n file_path = curParentPwd +\"/data/sina_province.txt\"\n df = pd.read_csv(file_path, header=None, names=['pingyin', 'province'],\n encoding=\"utf8\")\n province_name_list = df.values.tolist()\n dict = {}\n\n for province_name in province_name_list:\n dict[province_name[0].lower()] = province_name[1]\n return dict\n\ndef getSinaCity2PingYinDict():\n file_path = curParentPwd +\"/data/sina_city.txt\"\n df = pd.read_csv(file_path, header=None, names=['pingyin', 'city'],\n encoding=\"utf8\")\n city_name_list = df.values.tolist()\n dict = {}\n\n for city_name in city_name_list:\n dict[city_name[1]] = city_name[0].lower()\n return dict\n\ndef getSinaPingYin2CityDict():\n file_path = curParentPwd +\"/data/sina_city.txt\"\n df = pd.read_csv(file_path, header=None, names=['pingyin', 'city'],\n encoding=\"utf8\")\n city_name_list = df.values.tolist()\n dict = {}\n\n for city_name in city_name_list:\n dict[city_name[0].lower()] = city_name[1]\n return dict\n\ndef getCNCity2CodeDict():\n file_path = curParentPwd +\"/data/cn_city_code.txt\"\n df = pd.read_csv(file_path, header=None, names=['code', 'city'],\n encoding=\"utf8\")\n city_name_list = df.values.tolist()\n dict = {}\n\n for city_name in city_name_list:\n dict[city_name[1]] = city_name[0]\n return dict\n\ndef getCNCode2CityDict():\n file_path = curParentPwd +\"/data/cn_city_code.txt\"\n df = pd.read_csv(file_path, header=None, names=['code', 'city'],\n encoding=\"utf8\")\n city_name_list = df.values.tolist()\n dict = {}\n\n for city_name in city_name_list:\n dict[str(city_name[0])] = city_name[1]\n return dict\n\nif __name__ == '__main__':\n dict = getCNCity2CodeDict()\n print(dict)","sub_path":"scrapy-redisDemo/weatherSpiderMaster/weatherSpiderMaster/util/cityUtil.py","file_name":"cityUtil.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40522407","text":"from selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nurl = 'http://spo.iitk.ac.in/preparation/Internship_insight_2019-20.html'\r\npage = requests.get(url)\r\nsoup = BeautifulSoup(page.content, 'html.parser')\r\ns1 = soup.find_all('a', class_='btn btn-primary')\r\ns2=[\"a\"]*len(s1)\r\nprint(type(s2[0]))\r\nfor i in range(len(s1)):\r\n s2[i]=s1[i].get('href').replace('.html', '').replace('_', ' ')\r\nfor i in range(4):\r\n driver = webdriver.Chrome('C:/chromedriver')\r\n driver.get('http://www.google.co.in/')\r\n wait = WebDriverWait(driver, 600)\r\n typer = driver.find_element_by_xpath(\r\n '//*[@id=\"tsf\"]/div[2]/div/div[1]/div/div[1]/input')\r\n typer.click()\r\n typer.send_keys(\"%s iitk\"%(s2[i]))\r\n typer2 = driver.find_element_by_xpath(\r\n '//*[@id=\"tsf\"]/div[2]/div/div[3]/center/input[1]')\r\n typer2.click()\r\n time.sleep(60)\r\n\r\n","sub_path":"iit kanpur.py","file_name":"iit kanpur.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"416836849","text":"#!/usr/bin/python3\nimport os\nimport re\n\ndata = os.popen('ipcs -q').read()\n\nlines = re.split('\\n', data);\n\nfor line in lines:\n elements = re.split(' ', line)\n if len(elements) >= 2:\n if elements[1].isdigit():\n os.system('ipcrm -q' + elements[1])\n","sub_path":"cleanMsgQueues.py","file_name":"cleanMsgQueues.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95841238","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\" Unit tests for makeproject cleanme\n\nCopyright 2013-2019 by Rebecca Ann Heineman becky@burgerbecky.com\n\nIt is released under an MIT Open Source license. Please see LICENSE\nfor license details. Yes, you can use it in a\ncommercial title without paying anything, just give me a credit.\nPlease? It's not like I'm asking you for money!\n\n\"\"\"\n\nimport os\nimport makeprojects\n\n########################################\n\n\ndef test_cleanme(tmpdir):\n \"\"\"\n Test to see if cleanme loads build_rules.py.\n \"\"\"\n\n # Sample script\n clean_script = (\n 'import burger\\n'\n 'def rules(command, working_directory):\\n'\n ' if command==\"clean\":\\n'\n ' burger.clean_directories(working_directory, (\"temp\", \"bin\"))\\n'\n )\n\n # Create some temp folders\n temp_dir = tmpdir.mkdir('temp')\n bin_dir = tmpdir.mkdir('bin')\n # Create a folder that should not be deleted\n source_dir = tmpdir.mkdir('source')\n\n # Write out the build_rules.py file\n tmpdir.join('build_rules.py').write(clean_script)\n makeprojects.clean(str(tmpdir), [])\n\n # temp and bin should disappear, but not the others\n assert not os.path.isdir(str(temp_dir))\n assert not os.path.isdir(str(bin_dir))\n assert os.path.isdir(str(source_dir))\n assert os.path.isfile(str(tmpdir.join('build_rules.py')))\n\n # Cleanup\n tmpdir.remove()\n\n\n########################################\n\n\ndef test_cleanme_root(tmpdir):\n \"\"\"\n Test to see if cleanme handles root=True.\n \"\"\"\n\n clean_script_root = (\n 'import burger\\n'\n 'def rules(command, working_directory=None, root=True):\\n'\n ' if command==\"clean\":\\n'\n ' burger.clean_files(working_directory, \"*.txt\")\\n'\n )\n\n clean_script_no_root = (\n 'import burger\\n'\n 'def rules(command, working_directory):\\n'\n ' if command==\"clean\":\\n'\n ' burger.clean_files(working_directory, \"*.cpp\")\\n'\n )\n\n # Create some temp folders\n a_dir = tmpdir.mkdir('a')\n\n # Write out the build_rules.py file\n tmpdir.join('build_rules.py').write(clean_script_root)\n a_dir.join('build_rules.py').write(clean_script_no_root)\n\n tmpdir.join('foo.txt').write('abc')\n tmpdir.join('foo.cpp').write('abc')\n a_dir.join('foo.txt').write('abc')\n a_dir.join('foo.cpp').write('abc')\n makeprojects.clean(str(a_dir), [])\n\n # Files in 'a' should disappear but not those in tmpdir\n assert os.path.isfile(str(tmpdir.join('foo.txt')))\n assert os.path.isfile(str(tmpdir.join('foo.cpp')))\n assert not os.path.isfile(str(a_dir.join('foo.txt')))\n assert not os.path.isfile(str(a_dir.join('foo.cpp')))\n\n # Cleanup\n tmpdir.remove()\n","sub_path":"unittests/test_cleanme.py","file_name":"test_cleanme.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573872432","text":"import serial\nimport time\nimport math\nimport signal\nimport sys\nfrom ._singleton import Singleton\nfrom .comms_constants import MicromelonOpCode as OPCODE, MicromelonType as OPTYPE, MicromelonImageResolution as IMRES, tupleForResolution\nfrom ._tcp_controller import SerialTCPConnection\n\nclass RoverController(metaclass=Singleton):\n \"\"\"\n Manages connection and packet level communication with the robot\n Is a singleton - get a reference to the instance with constructor\n\n standard usage\n rc = RoverController()\n rc.connectSerial()\n \"\"\"\n def __init__(self, port = None, bluetooth = False):\n self._READ_PACKET_TIMEOUT_SECS = 6.0\n self._BLOCKING_READ_TIMEOUT_SECS = 2.0\n signal.signal(signal.SIGINT, _sigint_handler)\n if bluetooth:\n self._connection = None\n elif port:\n self.connectSerial(port)\n else:\n self._connection = None\n\n def isInBluetoothMode(self):\n return False # BLE python not implemented\n \n def isInNetworkMode(self):\n return isinstance(self._connection, SerialTCPConnection)\n\n def setReadTimeout(self, seconds = None):\n \"\"\"\n Timeout used for blocking reads of packets\n \"\"\"\n if seconds == None:\n self._READ_PACKET_TIMEOUT_SECS = 6.0\n else:\n self._READ_PACKET_TIMEOUT_SECS = seconds\n \n self._BLOCKING_READ_TIMEOUT_SECS = self._READ_PACKET_TIMEOUT_SECS / 3.0\n if self._connection and self._connection.timeout:\n self._connection.timeout = self._BLOCKING_READ_TIMEOUT_SECS\n\n def connectSerial(self, newPort = \"/dev/ttyS0\"):\n \"\"\"\n Sets the communcation to use serial\n default port is the gpio serial on a raspberry pi\n \"\"\"\n if isinstance(self._connection, serial.Serial):\n self._connection.close()\n self._connection.port = newPort\n self._connection.open()\n else:\n if self._connection:\n self._connection.close()\n self._connection = serial.Serial(newPort, baudrate=115200, timeout=self._BLOCKING_READ_TIMEOUT_SECS)\n self._connection.flushInput()\n self._connection.flushOutput()\n self.arm()\n\n def connectIP(self, address = '192.168.4.1', port = 4202):\n if self._connection:\n self._connection.close()\n self._connection = SerialTCPConnection(address, port, self._BLOCKING_READ_TIMEOUT_SECS)\n self._connection.open()\n self._connection.flushInput()\n self._connection.flushOutput()\n try:\n self.arm()\n except Exception as e:\n print('Arming failed, maybe no robot connected')\n print('Using camera only mode')\n\n\n def stopRobot(self, waitForAck = False):\n self.writePacket(OPCODE.WRITE, OPTYPE.MOTOR_SET, [0] * 7, waitForAck)\n self.writePacket(OPCODE.WRITE, OPTYPE.BUZZER_FREQ, [0], waitForAck)\n\n def writeAttribute(self, opType, data):\n \"\"\"\n Writes an attribute and returns once the ACK packet is received from the robot\n \"\"\"\n self.writePacket(OPCODE.WRITE, opType, data)\n\n def readAttribute(self, opType, data = [], timeout = None):\n \"\"\"\n Blocking read - returns the raw data from robot response\n \"\"\"\n self.writePacket(OPCODE.READ, opType, data, False)\n p = self.waitForPacket(OPCODE.ACK, opType, timeout)\n return p[3:] # Only return the data\n\n def writePacket(self, opCode, opType, data = [], waitForAck = True):\n \"\"\"\n Writes the packet over transport\n Blocks and waits for ack by default\n \"\"\"\n sendPacket = [0x55, opCode.value, opType.value, len(data)] + data\n if not self._connection:\n print('Warning: No robot connected')\n print('Tried to ' + opCode.name + ' ' + opType.name + ' with data: ' + str(data))\n print('Packet: ' + str(sendPacket))\n return\n #print('Packet: ' + str(sendPacket))\n self._connection.write(sendPacket)\n if waitForAck:\n self.waitForPacket(OPCODE.ACK, opType)\n\n def readPacket(self, blocking = False, timeout = None):\n \"\"\"\n Reads a packet from the transport\n returns packet on success\n returns None if timeout or no packet available on non-blocking\n \"\"\"\n if not self._connection:\n raise Exception('No robot connected - cannot read packet')\n if not blocking and self._connection.in_waiting <= 0:\n return None\n if timeout == None:\n timeout = self._BLOCKING_READ_TIMEOUT_SECS\n attempts = 0\n maxAttempts = self._READ_PACKET_TIMEOUT_SECS / timeout\n maxAttempts = math.floor(maxAttempts)\n header = None\n while attempts < maxAttempts:\n header = self._connection.read(4)\n attempts += 1\n if (len(header) == 4):\n if header[1] == OPCODE.ACK.value and header[2] == OPTYPE.NETWORK_KEEP_ALIVE.value:\n continue # network keepalive doesn't count as a useful packet\n break\n if (attempts == maxAttempts and len(header) == 0):\n return None # timeout\n if (len(header) != 4):\n raise Exception('Invalid header: ' + str(header))\n header = list(header)\n # Don't include start byte in packet\n header = header[1:]\n data = []\n dataLen = header[2]\n if (header[1] == OPTYPE.RPI_IMAGE.value):\n # One byte length doesn't work for big images so use as resolution flag\n resDims = tupleForResolution(header[2])\n dataLen = resDims[0] * resDims[1] * 3\n if dataLen > 0:\n data = self._connection.read(dataLen)\n data = list(data)\n if len(data) == 0:\n raise Exception('Timeout reading packet data')\n return header + data\n\n def waitForPacket(self, opCode, opType, timeout = None):\n \"\"\"\n Blocks until a packet with a matching header is received\n throws an exception if an invalid or error packet is received\n \"\"\"\n invalidReceiveOpCodes = [\n OPCODE.READ,\n OPCODE.WRITE,\n OPCODE.ERROR_INVALID_OP_CODE,\n OPCODE.ERROR_INVALID_PAYLOAD_SIZE,\n OPCODE.ERROR_INVALID_CHECKSUM,\n OPCODE.ERROR_NOT_IMPLEMENTED\n ]\n p = self.readPacket(True, timeout)\n if not p:\n raise Exception('Timeout waiting for packet')\n while OPCODE(p[0]) != opCode or OPTYPE(p[1]) != opType:\n if OPCODE(p[0]) in invalidReceiveOpCodes:\n raise Exception('Received invalid opcode: ' + OPCODE(p[0]).name)\n p = self.readPacket(True, timeout)\n if not p:\n raise Exception('Timeout waiting for packet')\n\n return p\n\n def arm(self):\n \"\"\"\n Puts the robot in UART control mode\n \"\"\"\n self.writePacket(OPCODE.WRITE, OPTYPE.CONTROL_MODE, [1])\n\n def disarm(self):\n \"\"\"\n Returns the robot to normal bluetooth operation\n \"\"\"\n self.writePacket(OPCODE.WRITE, OPTYPE.CONTROL_MODE, [0])\n\n\ndef _sigint_handler(sig, frame):\n print('Received SIGINT... Stopping robot')\n rc = RoverController()\n rc.stopRobot()\n sys.exit(0)\n","sub_path":"micromelon/_rover_controller.py","file_name":"_rover_controller.py","file_ext":"py","file_size_in_byte":6626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326702948","text":"import cv2\nimport numpy as np\nimport math\nfrom scipy import signal\n\nimg = cv2.imread('./11.jpg')\nim = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n# cv2.imshow('img_gray',im)\n\ndef pascalsmooth(n):\n pascalsmooth = np.zeros([1,n],np.float32)\n for i in range(n):\n pascalsmooth[0][i] = math.factorial(n - 1)/(math.factorial(i)*math.factorial(n-1-i))\n return pascalsmooth\ndef pascalDiff(n):\n pascaldiff = np.zeros([1, n],np.float32)\n pascal_smooth = pascalsmooth(n-1)\n for i in range(n):\n if i == 0:\n pascaldiff[0][i] = pascal_smooth[0][i]\n elif i == n-1:\n pascaldiff[0][i] = -pascal_smooth[0][i-1]\n else:pascaldiff[0][i] = pascal_smooth[0][i] - pascal_smooth[0][i-1]\n return pascaldiff\ndef getsobel(n):\n pascal_smooth_kernel = pascalsmooth(n)\n pascal_diff_kernel = pascalDiff(n)\n #水平方向上的卷积核\n sobelkernel_x = signal.convolve2d(pascal_smooth_kernel.transpose(),pascal_diff_kernel,mode='full')\n #垂直方向上的卷积核\n sobelkernel_y = signal.convolve2d(pascal_smooth_kernel,pascal_diff_kernel.transpose(),mode='full')\n return sobelkernel_x,sobelkernel_y\ndef sobel(img,n):\n x,y = img.shape\n pascal_smooth = pascalsmooth(n)\n pascal_diff = pascalDiff(n)\n image_x = signal.convolve2d(img,pascal_smooth.transpose(),mode='same')\n image_x = signal.convolve2d(image_x,pascal_diff,mode='same')\n image_y = signal.convolve2d(im,pascal_smooth,mode='same')\n image_y = signal.convolve2d(image_y,pascal_diff.transpose(),mode='same')\n return image_x,image_y\n\nimg_sobel_x , img_sobel_y = sobel(im, 8)\n# cv2.imshow('_x',img_sobel_x.astype(np.uint8))\n# cv2.imshow('_y',img_sobel_y.astype(np.uint8))\n# edge = img_sobel_x + img_sobel_y\nedge = np.sqrt(np.power(img_sobel_x,2)+np.power(img_sobel_y,2))\n# edge = (img_sobel_x + img_sobel_y)/2.0\nedge = edge/np.max(edge)\nedge*= 255\nedge = edge.astype(np.uint8)\n\nedge = 255 - edge\n# cv2.imshow('pencil_edge',pencil_edge)\nprint('1')\ncv2.imshow('_img',edge)\ncv2.imwrite('edge_sobel.jpg',edge)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"图像处理/边缘检测/sobel.py","file_name":"sobel.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423616195","text":"#!/usr/bin/env python3\n\nclass Pozdrav:\n def __init__(self, paMeno):\n self.aPozdravEN = \"Hello\"\n self.aPozdravSK = \"Cus\"\n\n def pozdravMa(self, paJazyk):\n if paJazyk == \"EN\":\n return self.aPozdravEN + \" \" + self.aMeno\n if paJazyk == \"SK\":\n return self.aPozdravSK + \" \" + self.aMeno \n\njazyk = input(\"Choose your language/Vyber si svoj jazyk\")\nmeno = input(\"Enter your name/Zadaj svoje meno\")\n\nhello = Pozdrav(meno)\n\npozdrav = hello.pozdravMa(jazyk)\n\nprint(pozdrav)","sub_path":"cv1.py","file_name":"cv1.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426021395","text":"menu = [\n [\"egg\", \"bacon\"],\n [\"egg\", \"sausage\", \"bacon\"],\n [\"egg\", \"spam\"],\n [\"egg\", \"bacon\", \"spam\"],\n [\"egg\", \"bacon\", \"sausage\", \"spam\"],\n [\"spam\", \"sausage\", \"bacon\", \"spam\"],\n [\"spam\", \"egg\", \"spam\", \"spam\", \"bacon\", \"spam\"],\n [\"spam\", \"sausage\", \"spam\", \"bacon\", \"spam\", \"tomato\", \"spam\"],\n]\n\n# Solution 1, remove spam from each of the inner lists\n# for meal in menu: # iterate through the meals in the menu\n# for index in range(len(meal) - 1, - 1, -1): # iterate through the items in the meals backwards\n# if meal[index] == \"spam\": # find all the items that are spam\n# del meal[index] # remove the items with spam\n# print(meal) # print on this indention level prints the meals after the loops finish\n\n# Solution 2\n# for meal in menu: # iterate through the menu to get the meals\n# for item in meal: # iterate through the items in the meals\n# if item != \"spam\": # make sure that spam is not spam\n# print(item, end=\", \") # print the items that are not spam\n# print() # Put a space between each meal\n\n# The join method\nfor meal in menu:\n for index in range(len(meal) - 1, -1, - 1):\n if meal[index] == \"spam\":\n del meal[index]\n print(\", \".join(meal))\n","sub_path":"3.ListsAndTuples/1.Sequences/no_spam.py","file_name":"no_spam.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"588241734","text":"\"\"\"\n8.\tПосчитать, сколько раз встречается определенная цифра в введенной\n последовательности чисел. Количество вводимых чисел и цифра,\n которую необходимо посчитать, задаются вводом с клавиатуры.\n\nПример:\nСколько будет чисел? - 2\nКакую цифру считать? - 3\nЧисло 1: 223\nЧисло 2: 21\nБыло введено 1 цифр '3'\n\nЗДЕСЬ ДОЛЖНА БЫТЬ РЕАЛИЗАЦИЯ ЧЕРЕЗ ЦИКЛ\n\"\"\"\nprint(\"Данная программа подсчитывает количество повторений определенной цифры в ряде натуральных чисел.\")\ndef counting(n, aim_digit):\n repeat_counting = 0\n for i in range(1, n + 1):\n try:\n number = int(input(f'Введите натуральное число {str(i)}: '))\n while number > 0:\n if number % 10 == aim_digit:\n repeat_counting += 1\n number = number // 10\n except ValueError:\n print(\"Необходимо ввести натуральное число. Попробуйте ещё раз.\")\n print(f\"В данной последовательности чисел было введено {repeat_counting} цифр(ы) {aim_digit}.\")\n\n\nwhile True:\n try:\n n = int(input(\"Введите количество чисел: \"))\n m = int(input(\"Введите целевую цифру подсчета: \"))\n if n < 1:\n print(\"Попробуйте ещё раз.\")\n continue\n break\n except ValueError:\n print(\"Необходимо ввести натуральное число. Попробуйте ещё раз.\")\n\ncounting(n, m)","sub_path":"Урок 2. Практическое задание/task_8/task_8_1.py","file_name":"task_8_1.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527185670","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 28 20:15:04 2018\n\n@author: Kazuki\n\"\"\"\n\nimport gc, os\nfrom tqdm import tqdm\nimport pandas as pd\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count\n#from glob import glob\nfrom sklearn.model_selection import GroupKFold\nimport count\nimport utils_cat\nimport utils\nutils.start(__file__)\n#==============================================================================\n\nNFOLD = 5\n\nSEED = 71\n\nHEADS = list(range(300, 1000, 100))\n\nparam = {\n 'objective': 'binary',\n 'metric': 'auc',\n 'learning_rate': 0.01,\n 'max_depth': 6,\n 'num_leaves': 63,\n 'max_bin': 255,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.9,\n 'subsample': 0.9,\n 'nthread': 16,\n# 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n 'seed': SEED\n }\n\n# =============================================================================\n# load\n# =============================================================================\nimp = pd.read_csv('LOG/imp_801_imp_lgb.py.csv').sort_values('total', ascending=False)\n\nfiles = ('../feature_prev/train_' + imp.head(max(HEADS)).feature + '.f').tolist()\n\nX_all = pd.concat([\n pd.read_feather(f) for f in tqdm(files, mininterval=60)\n ], axis=1)\ny = utils.read_pickles('../data/prev_label').TARGET\n\ncol = [c for c in X_all.columns if c.startswith('f101_app_')]\nX_all.drop(col, axis=1, inplace=True)\n\nsub_train = utils.read_pickles('../data/prev_train', ['SK_ID_CURR']).set_index('SK_ID_CURR')\nsub_train['y'] = y.values\nsub_train['cnt'] = sub_train.index.value_counts()\nsub_train['w'] = 1 / sub_train.cnt.values\n\ngroup_kfold = GroupKFold(n_splits=NFOLD)\nsub_train['g'] = sub_train.index % NFOLD\n\n\nfor HEAD in HEADS:\n \n X = X_all.iloc[:, :HEAD]\n \n if X.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X.columns[X.columns.duplicated()] }')\n print('no dup :) ')\n print(f'X.shape {X.shape}')\n \n gc.collect()\n \n CAT = list( set(X.columns)&set(utils_cat.ALL))\n \n \n # =============================================================================\n # cv\n # =============================================================================\n dtrain = lgb.Dataset(X, y, categorical_feature=CAT )\n gc.collect()\n \n ret = lgb.cv(param, dtrain, 9999, folds=group_kfold.split(X, sub_train['y'], \n sub_train['g']), \n early_stopping_rounds=100, verbose_eval=50,\n seed=SEED)\n \n result = f\"CV auc-mean({HEAD}): {ret['auc-mean'][-1]}\"\n print(result)\n \n utils.send_line(result)\n\n\n#==============================================================================\nutils.end(__file__)\n#utils.stop_instance()\n\n\n","sub_path":"Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py_prev/803_cv_lgb_drop-curr.py","file_name":"803_cv_lgb_drop-curr.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37605295","text":"\nfrom CRMCal.QualificationEmotionCalat1800 import *\n\n#%%\nif __name__ == '__main__':\n # qualificationCal(\"20190128\")\n # publishInfo(\"101900152.IB\")\n engine_InvestSystem = create_engine('mssql+pyodbc://sa:tcl+nftx@10.28.7.43:1433/InvestSystem?driver=SQL+Server')\n\n # calableBonds('20190128')\n calList = []\n end = date.today().strftime(\"%Y%m%d\")\n start = (date.today()-timedelta(14)).strftime(\"%Y%m%d\")\n cur = conn_43.cursor()\n sql = \"\"\"\n delete from EmotionIndex where updatetime >= '%s';\n delete from QualificationIndex where updatetime >= '%s';\n \"\"\" % (start, start)\n cur.execute(sql)\n conn_43.commit()\n cur.close()\n\n date_range = pd.date_range(start=start, end=end, freq='d')\n # date_range = pd.date_range(start=\"20190101\", end=\"20190306\", freq='d')\n for dt in date_range:\n #for Date in tdays.Times:\n df = calableBonds(dt.strftime(\"%Y%m%d\"))\n # AAA9Minfo = AAA9M(Date.strftime(\"%Y%m%d\"))\n # if AAA9Minfo is not None:\n # calList.append([Date.strftime(\"%Y%m%d\"), qualificationCal(df), emotionCal(df), float(AAA9Minfo[1])])\n # print(Date.strftime(\"%Y%m%d\"))\n calList.append([dt.strftime(\"%Y%m%d\"), qualificationCal(df), emotionCal(df), np.nan])\n\n if calList.__len__() > 0:\n df = pd.DataFrame(calList, columns=['updatetime', 'qualification', 'emotion', 'AAA9MYield'])\n df[['updatetime', 'qualification']].to_sql(\"QualificationIndex\", engine_InvestSystem, index=False, if_exists='append')\n df[['updatetime', 'emotion', 'AAA9MYield']].to_sql(\"EmotionIndex\", engine_InvestSystem, index=False, if_exists='append')\n\n print(\"finished\")\n","sub_path":"CRMCal/QualificationEmotioncalrefresh.py","file_name":"QualificationEmotioncalrefresh.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401087106","text":"import fire\n\ndef compareVersion(version1, version2):\n \"\"\"\n :type version1: str\n :type version2: str\n :rtype: int\n \"\"\"\n ver1 = [int(i) for i in version1.split('.')]\n ver2 = [int(i) for i in version2.split('.')]\n\n ver1.extend([0] * (len(ver2)-len(ver1)))\n ver2.extend([0] * (len(ver1)-len(ver2)))\n print(ver1, ver2)\n return 0 if ver1==ver2 else {True:1, False:-1}[ver1>ver2]\n\ndef main():\n fire.Fire(compareVersion)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"165_compare_ver.py","file_name":"165_compare_ver.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529547406","text":"from django.shortcuts import render\nfrom rest_framework.decorators import api_view, permission_classes\n# from rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom rest_framework.request import Request\nfrom django.shortcuts import get_object_or_404\nfrom api.serializers import CategorySerailizer,ProductSerializer,FeedbackSerializer,CartSerializer,UserSerializer\nfrom rest_framework import generics\nfrom rest_framework import viewsets\nfrom rest_framework import mixins\nfrom rest_framework.exceptions import APIException\n# from django.http.response import HttpResponse, JsonResponse\n# from django.http.request import HttpRequest\nfrom api.models import*\n# Create your views here.\n@api_view(['GET','POST'])\ndef products_list(request):\n if request.method == 'GET':\n products = Product.objects.all()\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n data = request.data\n serializer = ProductSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response({'error': wrong})\n@api_view(['GET','PUT','DELETE'])\ndef product_detail(request, product_id):\n product = get_object_or_404(Product,id=product_id)\n if request.method == 'GET':\n serializer = ProductSerializer(product)\n return Response(serializer.data)\n elif request.method == 'PUT':\n serializer = ProductSerializer(instance=product, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response({'errors':serializer.errors})\n elif request.method == 'DELETE':\n product.delete()\n return Response({'deleted':True})\n \nclass CategoryAPIView(generics.ListCreateAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerailizer\n\nclass CategoryDetailAPIView(generics.RetrieveUpdateDestroyAPIView):\n queryset = Category.objects.all()\n serializer_class = CategorySerailizer\n lookup_url_kwarg = 'category_id'\n\ndef products_by_category(request,fk):\n try:\n category = Category.objects.get(id = fk)\n except Category.DoesNotExist as e:\n return JsonResponse({'error': str(e)}, safe=False)\n products = category.product_set.all()\n response = [product.to_json() for product in products]\n return JsonResponse(response, safe=False)\n\n# class CartViewSet(viewsets.ModelViewSet):\n# queryset = Cart.objects.all()\n# serializer_class = CartSerializer\n\nclass FeedbackAPIView(generics.CreateAPIView):\n queryset = Feedback.objects.all()\n serializer_class = FeedbackSerializer\n\n@api_view(['GET', 'POST', 'DELETE'])\n# @permission_classes([IsAuthenticated])\ndef cart_list(request, pk=None):\n if request.method == 'GET':\n cart_list = Cart.objects.all()\n serializer = CartSerializer(cart_list, many=True)\n return Response(serializer.data)\n\n if request.method == 'POST':\n serializer = CartSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors)\n\n if request.method == 'DELETE':\n if pk is None:\n return Response('error no pk matched')\n cart_item=Cart.objects.get(id=pk)\n cart_item.delete()\n return Response('DELETED')\n\n\n@api_view(['GET', 'POST'])\ndef category_product(request, pk):\n if request.method == 'GET':\n products = Product.objects.filter(category_id=pk)\n serializer = ProductSerializer(products, many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = ProductSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)","sub_path":"shopback/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"252064655","text":"# -*- mode: python -*-\n# Copyright (C) 2016 Niklas Rosenstein\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n__all__ = ['get_proto_meta', 'ProtoCompiler']\n\nfrom craftr import Target\nimport re\n\n\ndef get_proto_meta(filename):\n ''' Extracts the package declaration and various meta information\n from the specified .proto file. '''\n\n messages = []\n meta = {}\n with open(filename, 'r') as fp:\n for line in fp:\n index = line.find('//')\n if index >= 0:\n line = line[:index]\n match = re.search('package\\s+([\\w\\.]+)\\s*;', line)\n if match:\n meta['package'] = match.group(1)\n continue\n match = re.search('option\\s+([\\w_]+)\\s*=\\s*(.*);', line)\n if match:\n meta[match.group(1)] = match.group(2).strip()\n continue\n match = re.search('message\\s+(\\w+)', line)\n if match:\n messages.append(match.group(1))\n continue\n return messages, meta\n\n\ndef camelify(name):\n parts = name.split('_')\n return ''.join(x.capitalize() for x in parts)\n\n\nclass ProtoCompiler(object):\n ''' Interface for the Google Protocol Buffers Compiler. '''\n\n def __init__(self, program='protoc'):\n super().__init__()\n self.program = program\n\n def compile(self, sources, proto_path=(), cpp_out=None, java_out=None, python_out=None):\n if not any([cpp_out, java_out, python_out]):\n raise ValueError('need at least cpp_out, java_out or python_out')\n if not proto_path:\n proto_path = [path.commonpath(sources)]\n command = [self.program]\n command += ['--proto_path={0}'.format(x) for x in proto_path]\n command += ['--cpp_out={0}'.format(cpp_out)] if cpp_out else []\n command += ['--java_out={0}'.format(java_out)] if java_out else []\n command += ['--python_out={0}'.format(python_out)] if python_out else []\n command += ['$in']\n\n outputs = []\n for fn in sources:\n if not fn.endswith('.proto'):\n raise ValueError('not a .proto file: {0!r}'.format(fn))\n\n messages, meta = get_proto_meta(fn)\n fn = fn[:-6]\n base = path.basename(fn)\n camel_base = camelify(base)\n camel_messages = [camelify(x) for x in messages]\n\n for dirname in proto_path:\n relp = path.relpath(fn, dirname)\n if relp == path.curdir or relp.startswith(path.pardir):\n continue\n relp = path.join(path.dirname(relp), path.basename(relp).lower())\n\n if cpp_out:\n outputs.append(path.join(cpp_out, relp + '.pb.cc'))\n\n if java_out:\n # Determine the Java package name.\n package = meta.get('java_package', '\"\"').strip('\"')\n if not package:\n package = meta.get('package', '')\n parentdir = package.replace('.', path.sep)\n\n # Determine the outer class name.\n outer_class = meta.get('java_outer_classname', '')\n if not outer_class:\n if camel_base in camel_messages:\n outer_class = camel_base + 'OuterClass'\n else:\n outer_class = camel_base\n\n outputs.append(path.join(java_out, parentdir, outer_class + '.java'))\n\n if python_out:\n outputs.append(path.join(python_out, relp + '_pb2.py'))\n\n return Target(command, sources, outputs)\n","sub_path":"craftr/lib/craftr.ext.compiler.protoc.py","file_name":"craftr.ext.compiler.protoc.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620830408","text":"from Piece import Piece\nfrom Board import Board\nimport pickle\nimport numpy as np\nimport copy\nimport random\n\n# a move will be stored as (player,piece_num,orientation,translation)\n\nclass Player:\n \n #initialize\n def __init__(self,player_num,size_in,board,pieces):\n \n self.num = player_num\n \n # maintains a vector of played pieces (1 = played)\n self.played = np.ones([len(pieces)])\n # MIKE Changed this; should be 21 pieces\n self.played[0:21] = 0\n \n # keep a list of places you need to check for changes to valid moves - game manager will append to this\n self.update_new_corner_adjs = []\n self.update_adjacents_to_last_played = []\n self.board_before_previous_play = copy.deepcopy(board)\n \n # maintain a list of valid moves\n self.init_valid_moves(board,pieces)\n \n # initialize valid move list\n # Note from Mike: Not top priority, but I wonder if some of the code\n # from this function could be easily combined with the update_valid_moves\n # function to avoid duplication? \n def init_valid_moves(self,board,pieces):\n all_valid_moves = []\n \n if self.num == 1:\n corner = (0,0)\n elif self.num == 2:\n corner = (board.size-1,board.size-1)\n elif self.num == 3:\n corner = (0,board.size-1)\n else :\n corner = (board.size-1,0)\n \n # each i represents 1 piece\n for i in range (0,len(pieces)):\n #each j represents 1 orientation\n if self.played[i] == 0:\n for j in range (0,8):\n if j >= len(pieces[i]):\n break\n #each k represents 1 corner of the piece\n for k in range (0, 8):\n if k >= len(pieces[i][j][0].corners):\n break\n \n #find translation necessary to put piece corner into valid corner\n x = corner[0] - pieces[i][j][0].corners[k][0]\n y = corner[1] - pieces[i][j][0].corners[k][1]\n \n #check if move is valid\n temp = copy.deepcopy(pieces[i][j][0])\n temp.translate((x,y))\n if board.check_valid_move(self.num,temp):\n # Note from Mike: It may actually be helpful to create a Move class\n # not because this is a bad format as it is, but because it would\n # improve understandability \n all_valid_moves.append((self.num,i,j,(x,y)))\n self.valid_moves = all_valid_moves\n \n \n # make_move - updates all players' lists of tracked changes, updates available piecelist, returns move to Game, which will call board method to update board\n # a move will be stored as (player,piece_num,orientation,translation)\n def make_move(self,board,pieces,strategy):\n #Step 1 - update valid_moves list\n \n # for item in new corner adjs: search all unplayed piece orientations and add\n # each i represents 1 piece\n for i in range (0,len(pieces)):\n if self.played[i] == 0:\n #each j represents 1 orientation\n for j in range (0,8):\n if j >= len(pieces[i]):\n break\n #each k represents 1 corner of the piece\n for k in range (0, 8):\n if k >= len(pieces[i][j][0].corners):\n break\n #each m represents one valid corner placement on board for player\n for item in self.update_new_corner_adjs:\n #find translation necessary to put piece corner into valid corner\n x = item[0] - pieces[i][j][0].corners[k][0]\n y = item[1] - pieces[i][j][0].corners[k][1]\n \n #check if move is valid\n temp = copy.deepcopy(pieces[i][j][0])\n temp.translate((x,y))\n if board.check_valid_move(self.num,temp):\n self.valid_moves.append((self.num,i,j,(x,y))) \n\n \n\n # get list of changed squares\n # check for played piece, check for newly occupied square, and check for adjacents\n check = board.board-self.board_before_previous_play.board\n bad_squares = []\n # all squares that have changed since last turn are no longer playable\n for i in range(0,board.size):\n for j in range(0,board.size):\n if check[i,j] != 0:\n bad_squares.append((i,j))\n \n # add adjacents from last move to bad_squares list\n for point in self.update_adjacents_to_last_played:\n bad_squares.append(point)\n \n # check if any piece squares are now occupied or are\n # adjacent to player's own pieces\n for move_index in reversed(range((len(self.valid_moves)))):\n move = self.valid_moves[move_index]\n temp_piece = copy.deepcopy(pieces[move[1]][move[2]][0])\n temp_piece.translate(move[3])\n \n for point in bad_squares:\n \n if point in temp_piece.occupied:\n del self.valid_moves[move_index]\n break\n \n \n for i in range (0,len(self.played)):\n if self.played[i] == 1:\n for move_index in reversed(range((len(self.valid_moves)))):\n move = self.valid_moves[move_index]\n if move[1] == i:\n del self.valid_moves[move_index]\n \n success = False\n while success == False:\n #Step 2 - select a move from valid moves\n if len(self.valid_moves) == 0:\n return False\n else:\n if strategy == 'random':\n move_idx = random.randint(0,len(self.valid_moves)-1)\n move = self.valid_moves[move_idx]\n else:\n return 'That strategy doesnt exist yet.'\n \n #Step 3 - make move\n # call make_move on board\n temp = copy.deepcopy(pieces[move[1]][move[2]][0])\n temp.translate(move[3])\n success = board.play_piece(self.num,temp)\n if success == False:\n self.valid_moves.remove(move)\n print(\"Attempted to play a failed move\")\n board.display2()\n # update played_pieces\n print(\"Success\")\n self.played[move[1]] = 1\n \n #reset update lists\n self.update_new_corner_adjs = []\n self.update_adjacents_to_last_played = [] \n \n # add new corner_adjs to update_list\n for point in temp.diag_adjacents:\n self.update_new_corner_adjs.append(point)\n \n # add adjacents to update list \n for point in temp.adjacents:\n self.update_adjacents_to_last_played.append(point)\n \n return temp","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":7523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300262177","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.stats import linregress\n\ndef draw_plot():\n # Read data from file\n df = pd.read_csv(\"epa-sea-level.csv\")\n\n # Create scatter plot\n plt.scatter(df[\"Year\"], df[\"CSIRO Adjusted Sea Level\"])\n\n # Create first line of best fit\n xrange1 = pd.Series([i for i in range(1880, 2051)])\n reg1 = linregress(df[\"Year\"], df[\"CSIRO Adjusted Sea Level\"])\n intercept1 = reg1.intercept\n slope1 = reg1.slope\n \n plt.plot(xrange1, intercept1 + slope1*xrange1)\n\n # Create second line of best fit\n # df of data from 2020 up\n recent = df.loc[df[\"Year\"] >= 2000]\n xrange2 = pd.Series([i for i in range(2000,2051)])\n reg2 = linregress(recent[\"Year\"], recent[\"CSIRO Adjusted Sea Level\"])\n intercept2 = reg2.intercept\n slope2 = reg2.slope\n\n plt.plot(xrange2, intercept2 + slope2*xrange2)\n\n # Add labels and title\n plt.xlabel(\"Year\")\n plt.ylabel(\"Sea Level (inches)\")\n plt.title(\"Rise in Sea Level\")\n\n # Save plot and return data for testing (DO NOT MODIFY)\n plt.savefig('sea_level_plot.png')\n return plt.gca()","sub_path":"sea_level_predictor.py","file_name":"sea_level_predictor.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586399229","text":"#!/usr/bin/env python3\n# https://projecteuler.net/problem=3\n__author__ = 'chandannayak'\n\nimport math\n\n\ndef find_prime(n):\n if n > 0:\n for i in range(2, n//2+1):\n if n % i == 0:\n return False\n return True\n\n\ndef find_factors(n):\n l = []\n if n > 0:\n for i in range(2, int(math.sqrt(n))):\n if n % i == 0:\n # if find_prime(i):\n l.append(i)\n return l\n\nresult = find_factors(600851475143)\nresult.sort()\nprint(result[-1])\n# print(find_factors(600851475143))\n","sub_path":"practice/projecteuler-03.py","file_name":"projecteuler-03.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"261360192","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 - 2019 Tobias Benzin tbenzin@digital-nerv.net\n# Rally Vincent rvincent@digital-nerv.net\n\nimport os\nimport yaml\nimport jinja2\n\n\nclass SaltDoc:\n def __init__(self, file_roots='../file_roots', pillar_roots='../pillar_roots',\n source='docs/source'):\n self.docs_source = source\n self.file_roots = file_roots\n self.pillar_roots = pillar_roots\n self.env = jinja2.Environment(\n loader=jinja2.FileSystemLoader('.')\n )\n\n def template_sls(self, title, sls, literalinclude, language='salt'):\n template = self.env.get_template('template-sls.rst')\n if title:\n title = '%s\\n%s' % (title, self.titlemarkup(title))\n return template.render(title=title, sls=sls, literalinclude=literalinclude,\n language=language)\n\n def template_index(self, title, toc, maxdepth=4):\n template = self.env.get_template('template-index.rst')\n if title:\n title = '%s\\n%s' % (title, self.titlemarkup(title))\n return template.render(title=title, toc=toc, maxdepth=maxdepth)\n\n @staticmethod\n def titlemarkup(title, heading='='):\n if title:\n return heading * len(title)\n\n @staticmethod\n def targets(roots):\n filename = '%s/top.sls' % roots\n if os.path.exists(filename):\n with open(filename, 'r') as content:\n try:\n return yaml.load(content, yaml.FullLoader).get('base')\n except yaml.YAMLError as error:\n print(error)\n return\n\n @staticmethod\n def get_title_from_first_comment(filename):\n if os.path.exists(filename):\n with open(filename, 'r') as content:\n first_line = content.readline()\n if first_line.startswith('# '):\n return first_line.lstrip('# ').strip()\n\n def parse_states(self, folder, title):\n toc = []\n includes = self.parse_init(self.file_roots, folder)\n for include in includes:\n toc.append('../%s/index' % self.parse_include(include))\n sls = os.path.basename(self.parse_include(include))\n literalinclude = '../../../../../../file_roots/%s.sls' % self.parse_include(include)\n filename = '%s/file_roots/%s/index.rst' % (\n self.docs_source, self.parse_include(include))\n template = self.template_sls(\n self.get_title_from_first_comment(\n '../file_roots/%s.sls' % self.parse_include(include)), sls, literalinclude\n )\n self.write_rst(filename, template)\n document_filename = '%s/file_roots/%s/%s.rst' % (\n self.docs_source, self.parse_include(include), sls)\n open(document_filename, 'a').close()\n filename = '%s/file_roots/%s/index.rst' % (self.docs_source, folder)\n template = self.template_index(title, toc)\n self.write_rst(filename, template)\n\n def parse_pillars(self, folder, title):\n toc = []\n includes = self.parse_init(self.pillar_roots, folder)\n if includes:\n for include in includes:\n toc.append('../%s/index' % self.parse_include(include))\n sls = os.path.basename(self.parse_include(include))\n literalinclude = '../../../../../../pillar_roots/%s.sls' % self.parse_include(include)\n filename = '%s/pillar_roots/%s/index.rst' % (\n self.docs_source, self.parse_include(include))\n template = self.template_sls(\n self.get_title_from_first_comment(\n '../pillar_roots/%s.sls' % self.parse_include(include)), sls, literalinclude\n )\n self.write_rst(filename, template)\n document_filename = '%s/pillar_roots/%s/%s.rst' % (\n self.docs_source, self.parse_include(include), sls)\n open(document_filename, 'a').close()\n filename = '%s/pillar_roots/%s/index.rst' % (self.docs_source, folder)\n template = self.template_index(title, toc)\n self.write_rst(filename, template)\n\n @staticmethod\n def write_rst(filename, template):\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n with open(filename, \"w\") as handler:\n handler.write(template)\n\n def parse_init(self, roots, folder):\n filename = '%s/%s/init.sls' % (roots, folder)\n if os.path.exists(filename):\n with open(filename, 'r') as content:\n try:\n return yaml.load(content, yaml.FullLoader).get('include')\n except yaml.YAMLError as error:\n print(error)\n else:\n return self.parse_top(roots, folder)\n return\n\n @staticmethod\n def parse_include(include):\n if len(include.split('.')) > 0:\n elements = include.split('.')\n return '/'.join(elements)\n else:\n return\n\n def parse_top(self, roots, folder):\n targets = self.targets(roots)\n for target, states in targets.items():\n if folder in target:\n return states\n return\n","sub_path":"contrib/saltdoc.py","file_name":"saltdoc.py","file_ext":"py","file_size_in_byte":5487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584669259","text":"import matplotlib.pyplot as plt\nfrom MNIST_test import *\n\nlabels = sess.run(model, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob:1 })\n\nfig = plt.figure()\n\nfor i in range(10):\n # 2행 5열의 그래프 만들고, i+1번째에 숫자 이미지를 출력\n subplot = fig.add_subplot(2, 5, i+1)\n # 이미지를 깨끗하게 출력하기 위해 x와 y의 눈금을 출력하지 않음\n subplot.set_xticks([])\n subplot.set_yticks([])\n # 출력한 이미지 위에 예측한 숫자를 출력\n # np.argmax는 tf.argmax와 같은 기능의 함수\n # 결과값인 labels의 i번째 요소가 ont-hot 인코딩 형식으로 되어 있으므로,\n # 해당 배열에서 가장 높은 값을 가진 인덱스를 예측한 숫자로 출력\n subplot.set_title('%d' % np.argmax(labels[i]))\n # 1차원 배열로 되어 있는 i번째 이미지 데이터를 28x28 형식의 2차원 배열로 변형하여 이미지 형태로 출력\n # cmap 파라미터를 통해 이미지를 그레이스케일로 출력\n subplot.imshow(mnist.test.images[i].reshape((28, 28)), cmap=plt.cm.gray_r)\n\nplt.show()\n\n","sub_path":"TensorFlow_Test_200106/MNIST_matplot_test.py","file_name":"MNIST_matplot_test.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"7625944","text":"import numpy as np\nimport cv2\n\ndef increase_brightness(img, value):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img\n\ndef rotateImage(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\n return result\n\n\nimage = cv2.imread(\"Lenna.jpg\", cv2.IMREAD_GRAYSCALE)\ncv2.imshow(\"original image\", image)\n#for y in range(1,image.shape[0]):\n #for x in range(1,image.shape[1]):\n\n #Invert Image\n #image[y,x] = 255 - image[y,x];\n\n #Brightness Change\n #i = image[y,x]+100;\n #image[y,x] = i if i<256 else 255\n\n #flip image\n#for y in range(0, image.shape[0]):\n #for x in range(1,int(image.shape[1]/2)):\n # i = image[y, image.shape[1]-x]\n #image[y,image.shape[1]-x] = image[y,x]\n #image[y,x]=i\n\n # contrast image\n#for y in range(0,image.shape[0]):\n #for x in range(1,int(image.shape[1]/2)):\n # image[y,x,0] = (255,255,255)-image[y,x,0]\n\n #Binary Image\n#for y in range (0,image.shape[0]):\n #for x in range(1,int(image.shape[1])):\n #image[y,x] = 0 if image[y,x]<180 else 255\n\n # Detect horizontal edges\nfor y in range (0, image.shape[0]-1):\n for x in range(1,int(image.shape[1]-1)):\n i = int(image[y,x]) - int (image[y,x+1])\n image[y,x] = i*4 if i >0 else 0\n\n\n\n\ncv2.imshow(\"Modified Image\", image)\ncv2.waitKey(0)\n\n\n\n#brightness_function = increase_brightness(image, 20)\n","sub_path":"imagePro.py","file_name":"imagePro.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316509081","text":"# A simple metrics gathering interface for F1.\nimport time\n\n_emptydict = {}\n\n\nclass MetricsConsumer(object):\n def consume(self, data):\n raise NotImplementedError\n\n\nclass MetricsCollector(object):\n def __init__(self, consumer):\n self.consumer = consumer\n self.enabled = True\n\n def _get_distinct_attrs(self, distinct_ob):\n # 'distinct attributes' are anything we want to group the metrics by -\n # eg, it may include the concept of a 'user', a 'remote address', etc.\n # if it is already a dict, assume it is already a set of attributes.\n if distinct_ob is None:\n return _emptydict\n if isinstance(distinct_ob, dict):\n return distinct_ob\n return self._distinct_object_to_attrs(distinct_ob)\n\n def _distinct_object_to_attrs(self, distinct_ob):\n raise NotImplementedError\n\n def track(self, distinct_ob, id, **data):\n if not self.enabled:\n return\n data.update(self._get_distinct_attrs(distinct_ob))\n data['when'] = time.time() # can be formatted externally for lower perf impact here.\n data['id'] = id\n self.consumer.consume(data)\n\n def start_timer(self, distinct_ob, **init_data):\n init_data.update(self._get_distinct_attrs(distinct_ob))\n return TimedMetricsCollector(self, init_data)\n\n\nclass TimedMetricsCollector(object):\n def __init__(self, parent_collector, init_data):\n self.parent_collector = parent_collector\n self.init_data = init_data\n self.tracked = False\n self.started = time.clock()\n\n def track(self, id, **data):\n assert not self.tracked\n self.tracked = True\n if self.init_data is not None:\n data.update(self.init_data)\n assert 'took' not in data, data\n data['took'] = time.clock() - self.started\n self.parent_collector.track(None, id, **data)\n\n\n# F1 specific stuff - should probably go into its own module once it gets\n# more sophisticated or more options...\nimport logging\nlog = logging.getLogger('linkdrop-metrics')\n\n\nclass F1MetricsConsumer(MetricsConsumer):\n def consume(self, data):\n # gozer has requested a simple format of name=value, space sep'd and\n # strings quoted.\n msg = \" \".join((\"%s=%r\" % (n, v.encode(\"utf-8\") if isinstance(v, unicode) else v)\n for n, v in data.iteritems()))\n log.info(\"%s\", msg)\n\n\nclass F1MetricsCollector(MetricsCollector):\n def _distinct_object_to_attrs(self, distinct_ob):\n # distinct_ob is expected to be a pylons 'request' object\n # a proxy is used in production, so prefer HTTP_X_FORWARDED_FOR\n try:\n remote_address = distinct_ob.environ['HTTP_X_FORWARDED_FOR']\n except KeyError:\n remote_address = distinct_ob.environ.get(\"REMOTE_ADDR\")\n return {\n 'remote_address': remote_address,\n }\n\n# the object used by the code.\nmetrics = F1MetricsCollector(F1MetricsConsumer())\n","sub_path":"linkdrop/lib/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"630857125","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module contains list of white- and black-listed ``python`` members.\n\nIt contains lists of keywords and built-in functions we discourage to use.\nIt also contains some exceptions that we allow to use in our codebase.\n\"\"\"\n\nimport sys\nfrom typing import Tuple\n\n#: List of functions we forbid to use.\nBAD_FUNCTIONS = frozenset((\n # Code generation:\n 'eval',\n 'exec',\n 'compile',\n\n # Magic:\n 'globals',\n 'locals',\n 'vars',\n 'dir',\n\n # IO:\n 'input',\n\n # Attribute access:\n 'hasattr',\n 'delattr',\n\n # Misc:\n 'copyright',\n 'help',\n\n # Dynamic imports:\n '__import__',\n\n # OOP:\n 'staticmethod',\n))\n\n#: List of module metadata we forbid to use.\nBAD_MODULE_METADATA_VARIABLES = frozenset((\n '__author__',\n '__all__',\n '__version__',\n '__about__',\n))\n\n\n_BAD_VARIABLE_NAMES: Tuple[str, ...] = (\n 'data',\n 'result',\n 'results',\n 'item',\n 'items',\n 'value',\n 'values',\n 'val',\n 'vals',\n 'var',\n 'vars',\n 'content',\n 'contents',\n 'info',\n 'handle',\n 'handler',\n 'file',\n 'obj',\n 'objects',\n 'objs',\n 'foo',\n 'bar',\n 'baz',\n)\n\nif sys.version_info < (3, 7): # pragma: no cover\n _BAD_VARIABLE_NAMES += (\n # Compatibility with `python3.7`:\n 'async',\n 'await',\n )\n\n#: List of variable names we forbid to use.\nBAD_VARIABLE_NAMES = frozenset(_BAD_VARIABLE_NAMES)\n\n#: List of magic methods that are forbiden to use.\nBAD_MAGIC_METHODS = frozenset((\n # Since we don't use `del`:\n '__del__',\n '__delitem__',\n '__delete__',\n\n '__dir__', # since we don't use `dir()`\n '__delattr__', # since we don't use `delattr()`\n))\n\n#: List of nested classes' names we allow to use.\nNESTED_CLASSES_WHITELIST = frozenset((\n 'Meta',\n))\n\n#: List of nested functions' names we allow to use.\nNESTED_FUNCTIONS_WHITELIST = frozenset((\n 'decorator',\n 'factory',\n))\n\n#: List of allowed ``__future__`` imports.\nFUTURE_IMPORTS_WHITELIST = frozenset((\n 'annotations',\n 'generator_stop',\n))\n\n#: List of blacklisted module names:\nBAD_MODULE_NAMES = frozenset((\n 'util',\n 'utils',\n 'utilities',\n 'helpers',\n))\n\n#: List of allowed module magic names:\nMAGIC_MODULE_NAMES_WHITELIST = frozenset((\n '__init__',\n '__main__',\n))\n\n\n# Internal variables\n# They are not publicly documented since they are only used internally\n\n# This variable is used as a default filename, when it is not passed by flake8:\nSTDIN = 'stdin'\n\n# This variable is used to specify as a placeholder for `__init__.py`:\nINIT = '__init__'\n","sub_path":"wemake_python_styleguide/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"229058005","text":"import inspect\nimport logging\n\nimport pytest\n\n\n# @pytest.mark.parametrize(\"input,expected\", [('02-01-1990', 'Thu Feb 01 1990 00:00:00 GMT+0000'), ('02-01-2000', 'Tue Feb 01 2000 00:00:00 GMT+0000')])\n@pytest.mark.usefixtures(\"setup\")\nclass BaseClass:\n def getLogger(self):\n loggerName = inspect.stack()[1][3]\n logger = logging.getLogger(loggerName)\n fileHandler = logging.FileHandler('logfile.log')\n formatter = logging.Formatter(\"%(asctime)s :%(levelname)s : %(name)s :%(message)s\")\n fileHandler.setFormatter(formatter)\n\n logger.addHandler(fileHandler) # filehandler object\n\n logger.setLevel(logging.DEBUG)\n return logger\n","sub_path":"utilities/BaseClass.py","file_name":"BaseClass.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614425577","text":"import time\n\n\ndef cal_time(fn):\n print(\"我是外部函数,我被调用了\")\n print(\"fn = {}\".format(fn))\n\n def inner():\n start = time.time()\n fn()\n end = time.time()\n print(\"代码耗时:\", end - start)\n return inner\n\n\n# 第一件事:调用cal_time\n# 第二件事:把被装饰的函数传递给fn\n# 第三件事:将装饰器函数的返回值赋值给被装饰函数的标识符,没有返回值就赋值为None\n@cal_time\ndef demo():\n x = 0\n for i in range(1, 100000000):\n x += i\n print(x)\n\n\n# @cal_time\n# def foo():\n# print(\"hello\")\n# time.sleep(3)\n# print(\"world\")\n\n# 此时的demo函数已经不是原来的demo函数了\nprint(\"装饰后的demo = {}\".format(demo))\n# 此时 demo 指向 inner函数\ndemo()\n\n\n\n\n\n","sub_path":"study01/day07/06-装饰器的使用.py","file_name":"06-装饰器的使用.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"399068455","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 12:21:49 2019\n\n@author: brgupta\n\"\"\"\n\n#Problem statement:\n# https://www.hackerrank.com/challenges/swap-case/problem\n\n\ndef swap_case(s):\n new_s = []\n for ch in s:\n if ch.islower():\n new_s.append(ch.upper())\n elif ch.isupper():\n new_s.append(ch.lower())\n else:\n new_s.append(ch)\n\n new_s = ''.join(new_s)\n return (new_s)","sub_path":"Python/swap_case.py","file_name":"swap_case.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"381500327","text":"import tensorflow as tf\nimport numpy as np\nimport sys\nsys.path.insert(1, '5_pgdl_pretrain/src')\nfrom LSTMDA import LSTMDA, rmse_masked\n\n\ndef train_save_model(input_data, out_model_file, out_h_file, out_c_file):\n data = np.load(input_data)\n n_batch, seq_len, n_feat = data['x_trn'].shape\n mymodel = LSTMDA(1)\n mymodel(data['x_trn'])\n\n mymodel.compile(loss=rmse_masked,\n optimizer=tf.optimizers.Adam(learning_rate=0.3))\n mymodel.fit(x=data['x_trn'], y=data['y_trn'], epochs=1, batch_size=n_batch)\n mymodel.save_weights(out_model_file)\n h, c = mymodel.rnn_layer.states\n np.save(out_h_file, h.numpy())\n np.save(out_c_file, c.numpy())\n\n\ntrain_save_model('5_pgdl_pretrain/in/lstm_da_data_just_air_temp.npz',\n '5_pgdl_pretrain/out/lstm_da_trained_wgts/',\n '5_pgdl_pretrain/out/h.npy', \n '5_pgdl_pretrain/out/c.npy')\n","sub_path":"5_pgdl_pretrain/src/train_lstm.py","file_name":"train_lstm.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526670723","text":"'''https://leetcode.com/problems/longest-substring-without-repeating-characters/#/description\n给一个字符串,返回它的最大不含重复字符的子串长度\n思路:\n 建立空列表作为临时存储\n 迭代字符串\n 若该字符存在于列表中,则比较目前列表长度与目前最大子串长度\n 若目前列表长度更大,则更新最大子串长度\n 将列表从重复字符的下一个位置作为起始更新列表\n 将字符添加到列表尾部\n 迭代完成后,再次比较目前列表长度与目前最大子串长度\n\n2017/4/4 →_→Python查找和拷贝花销可能过大,以后改改\n'''\n\n\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n max_count = 0\n temp = []\n for i in s:\n if i in temp:\n if max_count < len(temp):\n max_count = len(temp)\n temp = temp[temp.index(i) + 1:] # 猜测的性能瓶颈\n temp.append(i)\n else:\n max_count = max(max_count, len(set(temp)))\n\n return max_count\n","sub_path":"src_python/3_LongestSubstringWithoutRepeatingCharacters.py","file_name":"3_LongestSubstringWithoutRepeatingCharacters.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"3674755","text":"import requests\nimport json\n\nlongitude =6.144475002129221\nlatitude = 60.256895003124455\nparams = dict(lon = longitude, lat = latitude )\nu = 'https://www.vegvesen.no/nvdb/api/vegreferanse/koordinat.json'\nresp = requests.get(url=u, params=params)\ndata = json.loads(resp.text)\n\nprint(data['visningsNavn'])\n\nprint(data.keys())","sub_path":"posisjon.py","file_name":"posisjon.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260234636","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 28 16:57:38 2015\n\n@author: Derrick\n\nSymple module to handle all of Detex logging\n\n\"\"\"\n\nimport logging, os\nreload(logging) #reload to reconfigure default ipython logging behavior (hopefully this doesnt screw up anything)\ncwd=os.getcwd()\n\n## Configure logger to be used across all of Detex\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n# create file handler which logs even debug messages\nfh = logging.FileHandler(os.path.join(cwd,'detex.log'))\nfh.setLevel(logging.DEBUG)\nfh.setFormatter(formatter)\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.WARNING)\nch.setFormatter(formatter)\n\n# add the handlers to the logger\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\nlogger.info('Imported Detex')\n\n\n","sub_path":"detex/detlog.py","file_name":"detlog.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349590878","text":"import os\nfrom time import sleep\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.python.keras.backend import set_session\n\n\ndef rgb2gray(rgb):\n r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n\n\nclass NPTUCaptcha:\n def __init__(self):\n pass\n\n def load_NN(self):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU\n # config.log_device_placement = True # to log device placement (on which device the operation ran)\n self.sess = tf.Session(config=config)\n set_session(self.sess) # set this TensorFlow session as the default session for Keras\n self.model = load_model(os.environ[\"HOME\"] + '/nptu/lab/smart_lab/webap_tools/webap_tools/nptu_captch.h5')\n self.graph = tf.get_default_graph()\n\n def unload_NN(self):\n self.sess.close()\n\n def get_ans(self, captcha_img):\n prediction_data = np.stack(np.array(captcha_img) / 255.0) # predict img local\n prediction_data = rgb2gray(prediction_data) # 灰階\n prediction_data = prediction_data.reshape(-1, 35, 95, 1)\n with self.graph.as_default():\n try:\n prediction = self.model.predict(prediction_data)\n except:\n clear_session()\n\n captcha_ans = \"\"\n\n for digit_onehot in prediction:\n digit_ans = digit_onehot.argmax()\n captcha_ans += str(digit_ans)\n\n return captcha_ans\n\n\ndef captcha_test():\n from ailab.webap_tools.webap_login import get_captcha\n for i in range(10): # predict 份數\n img = get_captcha()\n captcha_ans = get_ans(img)\n plt.figure(captcha_ans)\n plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), \"gray\")\n sleep(3)\n plt.show()\n\n\nif __name__ == '__main__':\n captcha_test()\n","sub_path":"webap_tools/webap_tools/captcha_prediction.py","file_name":"captcha_prediction.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653454278","text":"import asyncio\nimport os\nimport aiohttp\nfrom gidgethub.aiohttp import GitHubAPI\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n gh = GitHubAPI(\n session,\n \"mariatta\",\n oauth_token=os.getenv(\"GH_AUTH\")\n )\n pr_number = os.getenv('GH_PR_NUM')\n response = await gh.post(\n f'/repos/{os.getenv(\"GITHUB_REPOSITORY\")}/issues/{pr_number}/comments',\n data={\n 'body': 'Thanks for the PR!',\n }\n )\n\n\n # print(f\"Issue created at {response['html_url']}\")\n # issue_url = response[\"url\"]\n # response = await gh.patch(issue_url, data={\"state\": \"closed\"})\n # print(response)\n # print(\"issue closed\")\n\n\nasyncio.run(main())","sub_path":"saythanks.py","file_name":"saythanks.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76111231","text":"from django import forms\nfrom taxi_amigo.models import *\nfrom fcm_django.models import FCMDevice\n\n\nclass BookTaxiForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(BookTaxiForm, self).__init__(*args, **kwargs)\n # Para cambiar el valor de un campo\n self.fields['state'].initial = \"Valor de ejemplo\"\n\n # Si ademas estamos editando un objeto propiedad\n if self.instance:\n # Podemos hacer lo que queramos\n valor_propiedad = self.instance.state\n\n def save(self, *args, **kwargs):\n # Sobrecargar save devuelve el objeto apunto de ser guardado\n book_taxi = super(BookTaxiForm, self).save(*args, **kwargs)\n\n # Podemos hacer lo que queramos antes de guardarlo\n customer_player_id = book_taxi.customer.player_id\n book_hour = book_taxi.hour\n\n if book_taxi.driver is None:\n print (\"EL TAXISTA NO HA SIDO ASIGNADO\")\n print(customer_player_id)\n else:\n if customer_player_id == \"xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\" or customer_player_id == \"\":\n print (\"AQUI ENTRO PORQUE EL VALOR ES POR DEFECTO\")\n else:\n driver_player_id = book_taxi.driver.player_id\n print (driver_player_id)\n body = book_taxi.address + book_taxi.reference\n print(body)\n print(book_taxi.service_type)\n\n device = FCMDevice.objects.all().first()\n\n device.registration_id = driver_player_id\n\n #device.send_message(\"Title\", \"Message\")\n #device.send_message(data={\"test\": \"test\"})\n device.send_message(title=\"New ride request\", body=body, badge=\"1\", sound=\"default\",\n data={\"request\": {\"type\": \"reservation\", \"mainStreet\": book_taxi.address,\n \"intersection\": \"\", \"reference\": book_taxi.reference,\n \"serviceType\": str(book_taxi.service_type),\n \"latitude\": book_taxi.latitude,\n \"longitude\": book_taxi.longitude, \"orderInfo\": \"null\",\n \"destination_address\": book_taxi.destination_address,\n \"destination_latitude\": book_taxi.destination_latitude,\n \"destination_longitude\": book_taxi.destination_longitude,\n \"state\": book_taxi.state},\n \"rideInfo\": \"null\", \"clientId\": book_taxi.customer.id,\n \"pushTokenClient\": book_taxi.customer.player_id})\n # Y finalmente lo guardamos\n # book_taxi.save()\n return book_taxi\n\n def clean(self):\n # Sobrecargar clean devuelve un diccionario con los campos\n cleaned_data = super(BookTaxiForm, self).clean()\n valor_propiedad = cleaned_data.get(\"state\")\n\n if len(valor_propiedad) < 3:\n # Podemos lanzar una excepcion que aparecera sobre el campo\n raise forms.ValidationError(\"Debe tener almenos 3 caracteres\")\n\n # Siempre hay que devolver el diccionario\n return cleaned_data\n\n class Meta:\n model = BookTaxi\n fields = ['date', 'hour', 'address', 'latitude', 'longitude', 'reference', 'destination_address',\n 'destination_latitude', 'destination_longitude', 'state', 'service_type', 'customer', 'driver', ]\n","sub_path":"taxi_amigo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13535793","text":"from contextlib import suppress\nfrom itertools import chain\nfrom typing import Optional, NamedTuple\n\nfrom lifxlan3 import init_log, Group, Themes, LifxLAN\nfrom lifxlan3.routines import ColorTheme, colors_to_theme, parse_keyboard_inputs, left, right, up, down, ctrl_r, ctrl_w\n\n__author__ = 'acushner'\n\nlog = init_log(__name__)\n\nmults = dict(hue=65535 / 360, # base mult: 1 degree\n brightness=65535 / 40,\n saturation=65535 / 40)\n\n\nclass AttrOffset(NamedTuple):\n attr: str\n offset: int\n as_offset: bool = True\n\n @property\n def value(self):\n if self.as_offset:\n return self.offset * mults.get(self.attr, 1)\n return self.offset\n\n\ndef _init_keys(qwerty=False):\n \"\"\"map key presses to AttrOffset objects that determine how lights are controlled\"\"\"\n dvorak = not qwerty\n\n def _equal_offset(n, mult=1):\n return [mult * v for v in chain(range(-n, 0), range(1, n + 1))]\n\n res = {}\n\n # hue\n keys = 'aoeu' if dvorak else 'asdf'\n for k, v in zip(keys, _equal_offset(2)):\n res[ord(k)] = AttrOffset('hue', v)\n for k, v in zip(keys.upper().replace(';', ':'), _equal_offset(2, 10)):\n res[ord(k)] = AttrOffset('hue', v)\n\n # saturation\n res[left << 8] = AttrOffset('saturation', -1)\n res[right << 8] = AttrOffset('saturation', 1)\n res[left << 16] = AttrOffset('saturation', 0, False)\n res[right << 16] = AttrOffset('saturation', 65535, False)\n\n # brightness\n res[up << 8] = AttrOffset('brightness', 1)\n res[down << 8] = AttrOffset('brightness', -1)\n res[up << 16] = AttrOffset('brightness', 65535, False)\n res[down << 16] = AttrOffset('brightness', 0, False)\n\n # kelvin\n res[ord('k' if dvorak else 'v')] = AttrOffset('kelvin', 25)\n res[ord('j' if dvorak else 'c')] = AttrOffset('kelvin', -25)\n res[ord('K' if dvorak else 'V')] = AttrOffset('kelvin', 100)\n res[ord('J' if dvorak else 'C')] = AttrOffset('kelvin', -100)\n\n # reset\n res[ctrl_r] = AttrOffset('reset', None)\n\n # write light settings to screen\n res[ctrl_w] = AttrOffset('print', None)\n\n return res\n\n\ndef _get_offset() -> AttrOffset:\n keys = _init_keys()\n yield from parse_keyboard_inputs(keys)\n\n\ndef light_eq(lifx: Group, color_theme: Optional[ColorTheme] = None):\n \"\"\"\n a light equalizer to play with HSBk\n\n \\b\n - homerow (aoeu/asdf) controls hue\n - shift-homerow controls hue even more\n\n \\b\n - left/right controls saturation\n - shift-left/right mins/maxes saturation\n\n \\b\n - down/up controls brightness\n - shift-down/up mins/maxes brightness\n\n - jk (dvorak)/cv (qwerty) control kelvin\n\n - ctrl-r resets\n\n - ctrl-w prints light info to screen\n \"\"\"\n\n def _init_lights():\n lifx.turn_on()\n if theme:\n lifx.set_theme(theme)\n\n theme = colors_to_theme(color_theme)\n\n with suppress(KeyboardInterrupt), lifx.reset_to_orig():\n _init_lights()\n\n for ao in _get_offset():\n if ao.attr == 'reset':\n _init_lights()\n\n elif ao.attr == 'print':\n for l in lifx:\n print(l, l.color)\n\n else:\n getattr(lifx, f'set_{ao.attr}')(ao.value, offset=ao.as_offset)\n\n\ndef __main():\n # return getch_test()\n lifx = LifxLAN()\n # lifx.set_color(Colors.DEFAULT)\n print(lifx.on_lights)\n lifx = lifx['kitchen'] + lifx['living_room']\n # lifx = lifx['master']\n # lifx = lifx['living room 1']\n # control(lifx, [Colors.SNES_DARK_PURPLE, Colors.SNES_LIGHT_PURPLE])\n light_eq(lifx, Themes.copilot)\n # control(lifx, [Colors.DEFAULT])\n\n\nif __name__ == '__main__':\n __main()\n","sub_path":"lifxlan3/routines/light/light_eq.py","file_name":"light_eq.py","file_ext":"py","file_size_in_byte":3667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"404291661","text":"import numpy as np\nimport struct\n\ndef load_mnist_data(filename):\n\tim = np.array([])\n\twith open(filename, 'rb') as binfile:\n\t\tbuf = binfile.read()\n\n\t\tindex = 0\n\t\tmagic, numImages, numRows, numColumns = struct.unpack_from('>IIII', buf, index)\n\t\tindex += struct.calcsize('>IIII')\n\n\t\tim = struct.unpack_from('>%sB'%(numImages * numRows *numColumns), buf, index)\n\n\t\tim = np.array(im)\n\t\tim = im.reshape(numImages, numRows * numColumns)\n\treturn im\n\ndef load_mnist_labels(filename):\n\tlabels = np.array([])\n\twith open(filename, 'rb') as binfile:\n\t\tbuf = binfile.read()\n\n\t\tindex = 0\n\t\tmagic, numLabels = struct.unpack_from('>II', buf, index)\n\t\tindex += struct.calcsize('>II')\n\n\t\tlabels = struct.unpack_from('>%sB'%numLabels, buf, index)\n\t\tlabels = np.array(labels)\n\n\treturn labels","sub_path":"load_mnist.py","file_name":"load_mnist.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527617283","text":"import sys\nimport pygame\nimport random\n\n\nclass Unit:\n def __init__(self, row, col):\n self.row = row\n self.col = col\n\n def copy(self):\n return Unit(self.row, self.col)\n\n\ndef rect(unit, color):\n cell_width = W/COL\n cell_height = H/ROW\n left = unit.col*cell_width\n top = unit.row*cell_height\n pygame.draw.rect(\n window, color,\n (left, top, cell_width, cell_height)\n )\n\n\nW, H = 800, 600\nROW, COL = 30, 40\nsize = (W, H)\nbg_color = (0, 0, 0)\nhead = Unit(row=int(ROW/2), col=int(COL/2))\nhead_color = (186, 85, 211)\nfood = Unit(row=random.randint(0, ROW-1), col=random.randint(0, COL-1))\nfood_color = (173, 255, 47)\nsnake_color = (240, 128, 128)\ndirect = 'left'\nsnakes = [\n Unit(row=head.row, col=head.col+1),\n Unit(row=head.row, col=head.col+2)\n]\n\npygame.init()\nwindow = pygame.display.set_mode(size)\npygame.display.set_caption('贪吃蛇')\nclock = pygame.time.Clock()\n\nwhile True:\n for events in pygame.event.get():\n if events.type == pygame.QUIT:\n sys.exit()\n elif events.type == pygame.KEYDOWN:\n if events.key == 273 or events.key == 119:\n if direct != 'down':\n direct = 'up'\n elif events.key == 274 or events.key == 115:\n if direct != 'up':\n direct = 'down'\n elif events.key == 276 or events.key == 97:\n if direct != 'right':\n direct = 'left'\n elif events.key == 275 or events.key == 100:\n if direct != 'left':\n direct = 'right'\n\n eat = (head.row == food.row and head.col == food.col)\n if eat:\n food = Unit(row=random.randint(0, ROW - 1), col=random.randint(0, COL - 1))\n\n snakes.insert(0, head.copy())\n if not eat:\n snakes.pop()\n\n if direct == 'left':\n head.col -= 1\n elif direct == 'right':\n head.col += 1\n elif direct == 'up':\n head.row -= 1\n elif direct == 'down':\n head.row += 1\n\n pygame.draw.rect(window, bg_color, (0, 0, W, H))\n rect(head, head_color)\n rect(food, food_color)\n for snake in snakes:\n rect(snake, snake_color)\n pygame.display.flip()\n clock.tick(15)","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436211294","text":"from itertools import count\nimport logging\nimport os\nfrom typing import Dict, List\nfrom detectron2.data.catalog import MetadataCatalog\nimport torch\nimport hydra\nfrom detectron2 import model_zoo\nfrom omegaconf import OmegaConf, DictConfig\n\nfrom detectron2.checkpoint import DetectionCheckpointer\nfrom detectron2.config import get_cfg\n\nfrom detectron2.evaluation import COCOEvaluator, inference_on_dataset, DatasetEvaluators\nfrom detectron2.data import build_detection_test_loader\nfrom detectron2.data import (DatasetCatalog, DatasetMapper,\n build_detection_train_loader,\n build_detection_test_loader)\n\nfrom detectron2.engine import DefaultTrainer, launch, default_setup, DefaultPredictor\nfrom detectron2.data import transforms as T\n\nfrom data_loading import clasp_dataset\nfrom utils import visualize_det2, create_train_augmentation, create_test_augmentation\n\n\nclass Trainer(DefaultTrainer):\n @classmethod\n def build_train_loader(cls, cfg):\n mapper = DatasetMapper(cfg,\n is_train=True,\n augmentations=create_train_augmentation(cfg))\n return build_detection_train_loader(cfg, mapper=mapper)\n\n @classmethod\n def build_test_loader(cls, cfg, dataset_name):\n mapper = DatasetMapper(cfg,\n is_train=False,\n augmentations=create_test_augmentation(cfg))\n return build_detection_test_loader(cfg, dataset_name, mapper=mapper)\n\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n return COCOEvaluator(dataset_name, (\"bbox\", ),\n False,\n output_dir=output_folder)\n\n\nclass Predictor(DefaultPredictor):\n def __init__(self, cfg):\n super().__init__(cfg)\n self.aug = T.Resize((cfg.image_h, cfg.image_w))\n\n\ndef setup(args):\n \"\"\"\n Create configs and perform basic setups.\n \"\"\"\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(args.model_zoo))\n cfg.DATASETS.TRAIN = (args.train_dataset, )\n cfg.DATASETS.TEST = (args.test_dataset, )\n cfg.DATALOADER.NUM_WORKERS = args.num_workers\n\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_classes\n cfg.OUTPUT_DIR = args.output_dir\n cfg.image_w = args.size[0]\n cfg.image_h = args.size[1]\n\n if args.eval_only is False:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(args.model_zoo)\n cfg.SOLVER.IMS_PER_BATCH = args.batch_size\n cfg.SOLVER.BASE_LR = args.learning_rate\n cfg.SOLVER.MAX_ITER = args.max_iters\n cfg.SOLVER.WARMUP_ITERS = int(args.max_iters / 10)\n cfg.SOLVER.STEPS = (int(args.max_iters / 2),\n int(args.max_iters * 2 / 3))\n else:\n cfg.MODEL.WEIGHTS = os.path.join(\n cfg.OUTPUT_DIR,\n \"model_final.pth\") # path to the model we just trained\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold\n\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\n default_setup(cfg, args)\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n\n if args.eval_only is False:\n trainer = Trainer(cfg)\n\n trainer.resume_or_load(resume=args.resume)\n trainer.train()\n\n else:\n if args.visualize is False:\n model = Trainer.build_model(cfg)\n DetectionCheckpointer(model,\n save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=False)\n res = Trainer.test(cfg, model)\n print(res)\n return res\n\n else:\n test_dataset: List[Dict] = DatasetCatalog.get(args.test_dataset)\n metadata = MetadataCatalog.get(args.test_dataset)\n predictor = Predictor(cfg)\n visualize_det2(test_dataset,\n predictor,\n metadata=metadata,\n count=args.num_items)\n\n\n@hydra.main(config_name='configs', config_path='conf')\ndef hydra_main(args: DictConfig):\n print(\"Command Line Args:\", args)\n clasp_dataset.register_clasp_dataset(args)\n\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args, ),\n )\n\n\nif __name__ == \"__main__\":\n hydra_main()","sub_path":"training/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343500936","text":"__all__ = [\"OasisComputationCommand\"]\n\nfrom ..manager import OasisManager as om\n\nfrom .inputs import InputValues\nfrom .base import OasisBaseCommand\n\n\nclass OasisComputationCommand(OasisBaseCommand):\n \"\"\"\n Eventually, the Parent class for all Oasis Computation Command\n create the command line interface from parameter define in the associated computation step\n \"\"\"\n\n def add_args(self, parser):\n \"\"\"\n Adds arguments to the argument parser.\n\n :param parser: The argument parser object\n :type parser: ArgumentParser\n \"\"\"\n super().add_args(parser)\n\n for param in om.computations_params[self.computation_name]:\n add_argument_kwargs = {key: param.get(key) for key in ['action', 'nargs', 'const', 'type', 'choices',\n 'help', 'metavar', 'dest']}\n arg_name = f\"--{param['name'].replace('_', '-')}\"\n if param.get('flag'):\n parser.add_argument(param.get('flag'), arg_name, **add_argument_kwargs)\n else:\n parser.add_argument(arg_name, **add_argument_kwargs)\n\n def action(self, args):\n \"\"\"\n Generic method that call the correct manager method from the child class computation_name\n\n :param args: The arguments from the command line\n :type args: Namespace\n \"\"\"\n self.logger.info(f'\\nProcessing arguments - {self.computation_name}')\n inputs = InputValues(args)\n\n _kwargs = {\n param['name']: inputs.get(param['name'], required=param.get('required'), is_path=param.get('is_path')) for\n param in om.computations_params[self.computation_name]}\n\n manager_method = getattr(om(), om.computation_name_to_method(self.computation_name))\n manager_method(**_kwargs)\n","sub_path":"oasislmf/cli/computation_command.py","file_name":"computation_command.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41042828","text":"\"\"\"\nDefinition of ListNode\n\nclass ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\"\"\"\nclass Solution:\n \"\"\"\n @param head: The head of linked list\n @param m: start position\n @param n: end position\n \"\"\"\n def reverseBetween(self, head, m, n):\n # write your code here\n if head == None or m == n:\n return head\n \n # find previous node of m\n preM = None\n if m > 1:\n preM = head\n for i in range(m-2):\n preM = preM.next\n\n # get node m\n nodeM = head\n if preM != None:\n nodeM = preM.next\n \n \n # get node n\n nodeN = head\n for i in range(n-1):\n nodeN = nodeN.next\n \n # get next node of n\n nextN = None\n if nodeN != None:\n nextN = nodeN.next\n \n # reverse node between m and n\n next = nodeM.next\n nodeM2 = nodeM\n for i in range(m,n):\n temp = next.next\n next.next = nodeM\n nodeM = next\n next = temp\n \n if preM != None:\n preM.next = nodeN\n else:\n head = nodeN\n \n nodeM2.next = next\n \n \n return head\n","sub_path":"Python/reverseBetween.py","file_name":"reverseBetween.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"647383873","text":"import re\nfrom collections import Counter\nimport pandas as pd\nimport numpy as np\n\n### DATA PREPARATION ###\nclass DataProcessor():\n\n def __init__(self, filename):\n self.chunks = None\n self.sentences = []\n self.words = {}\n self.words_count = None\n self.data_tuples = None\n\n print(\"[read_training_data] Read in training chunks\")\n with open(filename) as f:\n content = f.read()\n pattern = '<\\?xml version=\"1\\.0\" encoding=\"UTF-8\"\\?>\\s*<\\!DOCTYPE cesAna SYSTEM \"xcesAnaIPI\\.dtd\">\\s*\\s*\\s*(?P[\\W\\s\\d\\w]+)<\\/chunkList>\\s*<\\/cesAna>'\n chunks_block = re.search(pattern, content)\n if chunks_block:\n all_chunks = chunks_block.groups('chunks')\n pattern = '\\s*(?P[.\\w\\W\\s]+?)<\\/chunk>\\s*'\n self.chunks = re.findall(pattern, all_chunks[0])\n \n def create_words_dictionary(self, gold=True):\n print(\"[create_dictionary_train] Create dictionary from chunks\")\n print(\"Number of chunks: {0}\".format(len(self.chunks)))\n for chunk in self.chunks:\n pattern = '(?P\\s*(?:[\\w\\W\\d.]+?)<\\/tok>\\s*?)(?:)?'\n tokens = re.findall(pattern, chunk)\n sentence = []\n for tok in tokens:\n pattern = '(?P.+)<\\/orth>\\s*(?:[\\w\\W\\d.]+)'\n orth = re.search(pattern, tok)\n x = orth.group('orth')\n sentence.append(x)\n if gold:\n pattern = '(?P.+)<\\/base>(?P.+)<\\/ctag><\\/lex>\\s*'\n lexes = re.findall(pattern, tok)\n self.words[x] = [lexes[0][1]] \n else:\n pattern = '(?P.+)<\\/base>(?P.+)<\\/ctag><\\/lex>\\s*'\n lexes = re.findall(pattern, tok)\n self.words[x] = [lexes]\n self.sentences.append(sentence)\n\ndef split_data_into_training_and_test_sets():\n ### SPLIT DATA INTO TRAINING AND TEST SETS ###\n correct = pd.DataFrame()\n non_correct = pd.DataFrame()\n correct_test = pd.DataFrame()\n non_correct_test = pd.DataFrame()\n \n for j, chunk in enumerate(pd.read_csv('input-output-dataset.csv', chunksize=10000)):\n del chunk['0'] \n del chunk['Unnamed: 0']\n \n if j % 5 == 0:\n correct_test = pd.concat([correct_test, chunk[chunk['disamb'] == True]])\n non_correct_test = pd.concat([non_correct_test, chunk[chunk['disamb'] == False]])\n else:\n correct = pd.concat([correct, chunk[chunk['disamb'] == True]])\n non_correct = pd.concat([non_correct, chunk[chunk['disamb'] == False]])\n bar_all.update(j)\n \n del correct['disamb']\n del non_correct['disamb']\n del correct_test['disamb']\n del non_correct_test['disamb']\n\n \n with open('in-out_correct.csv', 'w') as f:\n correct.to_csv(f, header=False, index=False)\n with open('in-out_non-correct.csv', 'w') as f:\n non_correct.to_csv(f, header=False, index=False)\n with open('in-out_correct_test.csv', 'w') as f:\n correct_test.to_csv(f, header=False, index=False)\n with open('in-out_non-correct_test.csv', 'w') as f:\n non_correct_test.to_csv(f, header=False, index=False)\n\n\n\n","sub_path":"data_processor.py","file_name":"data_processor.py","file_ext":"py","file_size_in_byte":3492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"5312509","text":"import csv\nimport os\nimport shutil\nimport argparse\n\ndef createParser():\n parser = argparse.ArgumentParser()\n parser.add_argument('-csv_file', required=True)\n parser.add_argument('-source', required=True)\n parser.add_argument('-dest', required=True)\n\n return parser\n\nparser = createParser()\nnamespace = parser.parse_args()\n\nprint(namespace.csv_file, namespace.source, namespace.dest)\n\ndef replace(csv_filename, s, d):\n\n folders = ['Abstract_painting', 'Cityscape', 'Genre_painting', 'Illustration', 'Landscape', 'Nude_painting',\n 'Portrait', 'Religious_painting', 'Sketch_and_study', 'Still_life']\n\n for folder in folders:\n folder_path = os.path.join(d, folder)\n if not os.path.isdir( folder_path):\n os.makedirs(folder_path)\n\n #берём csv\n with open(csv_filename, \"r\", newline=\"\") as file:\n print('CSV', csv_filename)\n print('From', s)\n print('To', d)\n\n reader = csv.reader(file)\n\n problems_file = open('log_'+csv_filename+'.txt', 'w')\n\n #для каждой строки\n for row in reader :\n file = row[0].split('/')[-1]\n # print(file)\n otkuda = os.path.join(s, row[0])\n kuda = os.path.join(d, folders[int(row[1])])\n\n if not os.path.isdir(kuda):\n os.makedirs(kuda)\n\n kuda = os.path.join(kuda, file)\n # print(otkuda, kuda)\n if not os.path.exists(otkuda):\n problems_file.write('Problem with: '+ otkuda + '\\n')\n else:\n if not os.path.exists(kuda):\n shutil.move(otkuda, kuda)\n problems_file.write('Remove: ' + kuda + '\\n')\n\n problems_file.close()\n\n# catalog_path_from = os.getcwd()\n# print(catalog_path_from)\n# os.chdir('..')\n# catalog_path_to = os.getcwd()\n# print(catalog_path_to)\n#\n# replace(os.path.join(catalog_path_from, 'genre_train.csv'), os.path.join(catalog_path_from, 'wikiart'),\n# os.path.join(catalog_path_to, 'wikiart'))\n\nreplace(namespace.csv_file, namespace.source, namespace.dest)\nprint('------------------------------done------------------------------')","sub_path":"train/remove.py","file_name":"remove.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"429656391","text":"import json\nfrom staticjinja import make_site\n\n\ndef load_data(filepath):\n with open(filepath, 'r') as file_handler:\n return json.load(file_handler)\n\n\ndef get_contexts():\n app_pages_iter = get_applications_page()\n last_applications = next(app_pages_iter)\n index_context = {\n 'static': get_static(),\n 'last_applications': last_applications\n }\n applications_page1_context = {\n 'static': get_static(),\n 'applications': last_applications\n }\n applications_page2_context = {\n 'static': get_static(),\n 'applications': next(app_pages_iter)\n }\n account_context = catalogue_context = company_context = concrete_goods_context = {\n 'static': get_static()\n }\n contexts = [\n ('index.html', index_context),\n ('account.html', account_context),\n ('applications_page1.html', applications_page1_context),\n ('applications_page2.html', applications_page2_context),\n ('catalogue.html', catalogue_context),\n ('company.html', company_context),\n ('concrete_goods.html', concrete_goods_context)\n ]\n\n return contexts\n\n\ndef get_static():\n return {\n 'css': {\n 'bootstrap': 'css/bootstrap.min.css',\n 'jumbotron': 'css/jumbotron-narrow.css',\n 'style': 'css/style.css'\n },\n 'js': {\n 'jquery': 'js/jquery-3.1.1.min.js',\n 'bootstrap': 'js/bootstrap.min.js'\n },\n 'img': {\n 'favicon': 'img/favicon.ico',\n 'logo': 'img/logo.png'\n }\n }\n\n\ndef get_applications_page():\n applications_json_dir = './templates/_json/applications.json'\n applications = load_data(applications_json_dir)\n app_page_count = 5\n for pos in range(0, len(applications), app_page_count):\n yield applications[pos: pos + app_page_count]\n\n\nif __name__ == \"__main__\":\n site = make_site(\n searchpath='templates',\n outpath='site',\n contexts=get_contexts()\n )\n site.render(use_reloader=True)\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573184383","text":"from django.conf.urls import url\n\nfrom views import *\n\nurlpatterns = [\n # ********************** EDIT/PROFILE **********************\n url(r'^profile/$', Profile.as_view(), name='profile'),\n # ********************** LOGIN/LOGOUT **********************\n url(r'^login/$', Login.as_view(), name='login'),\n url(r'^logout/$', Logout.as_view(), name='logout'),\n # ********************** DASHBOARD ***********************\n url(r'^dashboard/$', Dashboard.as_view(), name='dashboard'),\n # ********************** CREATE/LIST ***********************\n url(r'^users/$', UserListCreate.as_view(), name='users'),\n url(r'^storemanagers/add/$', StoreManagerFormCreate.as_view(), name='storemanagers-add'),\n url(r'^storemanagers/$', StoreManagerListCreate.as_view(), name='storemanagers'),\n url(r'^promotionmanagers/add/$', PromotionManagerFormCreate.as_view(), name='promotionmanagers-add'),\n url(r'^promotionmanagers/$', PromotionManagerListCreate.as_view(), name='promotionmanagers'),\n url(r'^admins/$', AdminListCreate.as_view(), name='admins'),\n # **************** DETAILS/UPDATE/DELETE *******************\n url(r'^users/(?P[0-9]+)/$', UserView.as_view(), name='user'),\n url(r'^storemanagers/(?P[0-9]+)/$', StoreManagerView.as_view(), name='storemanager'),\n url(r'^storemanagers/(?P[0-9]+)/edit/$', StoreManagerFormEdit.as_view(), name='storemanager-edit'),\n url(r'^storemanagers/(?P[0-9]+)/edit/status/$', StoreManagerEditStatus.as_view(), name='storemanager-edit-status'),\n url(r'^storemanagers/(?P[0-9]+)/delete/$', StoreManagerDelete.as_view(), name='storemanager-delete'),\n url(r'^promotionmanagers/(?P[0-9]+)/$', PromotionManagerView.as_view(), name='promotionmanager'),\n url(r'^promotionmanagers/(?P[0-9]+)/edit/$', PromotionManagerFormEdit.as_view(), name='promotionmanager-edit'),\n url(r'^promotionmanagers/(?P[0-9]+)/edit/status/$', PromotionManagerEditStatus.as_view(), name='promotionmanager-edit-status'),\n url(r'^promotionmanagers/(?P[0-9]+)/delete/$', PromotionManagerDelete.as_view(), name='promotionmanager-delete'),\n url(r'^admins/(?P[0-9]+)/$', AdminView.as_view(), name='admin'),\n url(r'^admins/(?P[0-9]+)/edit/$', AdminFormEdit.as_view(), name='admin-edit'),\n]","sub_path":"promoapp_user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316206640","text":"# ******************************************************************************\n# Copyright 2017-2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ******************************************************************************\nimport numpy as np\nimport pytest\n\nimport ngraph as ng\n\n\n@pytest.fixture()\ndef _proposal_node():\n attributes = {\n \"base_size\": np.uint16(1),\n \"pre_nms_topn\": np.uint16(20),\n \"post_nms_topn\": np.uint16(64),\n \"nms_thresh\": np.float64(0.34),\n \"feat_stride\": np.uint16(16),\n \"min_size\": np.uint16(32),\n \"ratio\": np.array([0.1, 1.5, 2.0, 2.5], dtype=np.float64),\n \"scale\": np.array([2, 3, 3, 4], dtype=np.float64),\n }\n batch_size = 7\n\n class_probs = ng.parameter([batch_size, 12, 34, 62], np.float64, \"class_probs\")\n bbox_deltas = ng.parameter([batch_size, 24, 34, 62], np.float64, \"bbox_deltas\")\n image_shape = ng.parameter([3], np.float64, \"image_shape\")\n return ng.proposal(class_probs, bbox_deltas, image_shape, attributes)\n\n\ndef test_dynamic_attributes_softmax():\n axis = 2\n data = ng.parameter([1, 2, 3, 4], np.float32, \"data_in\")\n node = ng.softmax(data, axis)\n\n assert node.get_axis() == axis\n node.set_axis(3)\n assert node.get_axis() == 3\n\n\n@pytest.mark.parametrize(\n \"int_dtype, fp_dtype\",\n [\n (np.int8, np.float32),\n (np.int16, np.float32),\n (np.int32, np.float32),\n (np.int64, np.float32),\n (np.uint8, np.float32),\n (np.uint16, np.float32),\n (np.uint32, np.float32),\n (np.uint64, np.float32),\n (np.int32, np.float16),\n (np.int32, np.float64),\n ],\n)\ndef test_dynamic_get_attribute_value(int_dtype, fp_dtype):\n attributes = {\n \"num_classes\": int_dtype(85),\n \"background_label_id\": int_dtype(13),\n \"top_k\": int_dtype(16),\n \"variance_encoded_in_target\": True,\n \"keep_top_k\": np.array([64, 32, 16, 8], dtype=int_dtype),\n \"code_type\": \"pytorch.some_parameter_name\",\n \"share_location\": False,\n \"nms_threshold\": fp_dtype(0.645),\n \"confidence_threshold\": fp_dtype(0.111),\n \"clip_after_nms\": True,\n \"clip_before_nms\": False,\n \"decrease_label_id\": True,\n \"normalized\": True,\n \"input_height\": int_dtype(86),\n \"input_width\": int_dtype(79),\n \"objectness_score\": fp_dtype(0.77),\n }\n\n box_logits = ng.parameter([4, 1, 5, 5], fp_dtype, \"box_logits\")\n class_preds = ng.parameter([2, 1, 4, 5], fp_dtype, \"class_preds\")\n proposals = ng.parameter([2, 1, 4, 5], fp_dtype, \"proposals\")\n aux_class_preds = ng.parameter([2, 1, 4, 5], fp_dtype, \"aux_class_preds\")\n aux_box_preds = ng.parameter([2, 1, 4, 5], fp_dtype, \"aux_box_preds\")\n\n node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds)\n\n assert node.get_num_classes() == int_dtype(85)\n assert node.get_background_label_id() == int_dtype(13)\n assert node.get_top_k() == int_dtype(16)\n assert node.get_variance_encoded_in_target()\n assert np.all(np.equal(node.get_keep_top_k(), np.array([64, 32, 16, 8], dtype=int_dtype)))\n assert node.get_code_type() == \"pytorch.some_parameter_name\"\n assert not node.get_share_location()\n assert np.isclose(node.get_nms_threshold(), fp_dtype(0.645))\n assert np.isclose(node.get_confidence_threshold(), fp_dtype(0.111))\n assert node.get_clip_after_nms()\n assert not node.get_clip_before_nms()\n assert node.get_decrease_label_id()\n assert node.get_normalized()\n assert node.get_input_height() == int_dtype(86)\n assert node.get_input_width() == int_dtype(79)\n assert np.isclose(node.get_objectness_score(), fp_dtype(0.77))\n assert node.get_num_classes() == int_dtype(85)\n\n\n@pytest.mark.parametrize(\n \"int_dtype, fp_dtype\",\n [\n (np.uint8, np.float32),\n (np.uint16, np.float32),\n (np.uint32, np.float32),\n (np.uint64, np.float32),\n (np.uint32, np.float16),\n (np.uint32, np.float64),\n ],\n)\ndef test_dynamic_set_attribute_value(int_dtype, fp_dtype):\n attributes = {\n \"base_size\": int_dtype(1),\n \"pre_nms_topn\": int_dtype(20),\n \"post_nms_topn\": int_dtype(64),\n \"nms_thresh\": fp_dtype(0.34),\n \"feat_stride\": int_dtype(16),\n \"min_size\": int_dtype(32),\n \"ratio\": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype),\n \"scale\": np.array([2, 3, 3, 4], dtype=fp_dtype),\n }\n batch_size = 7\n\n class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, \"class_probs\")\n bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, \"bbox_deltas\")\n image_shape = ng.parameter([3], fp_dtype, \"image_shape\")\n node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes)\n\n node.set_base_size(int_dtype(15))\n node.set_pre_nms_topn(int_dtype(7))\n node.set_post_nms_topn(int_dtype(33))\n node.set_nms_thresh(fp_dtype(1.55))\n node.set_feat_stride(int_dtype(8))\n node.set_min_size(int_dtype(123))\n node.set_ratio(np.array([1.1, 2.5, 3.0, 4.5], dtype=fp_dtype))\n node.set_scale(np.array([2.1, 3.2, 3.3, 4.4], dtype=fp_dtype))\n node.set_clip_before_nms(True)\n node.set_clip_after_nms(True)\n node.set_normalize(True)\n node.set_box_size_scale(fp_dtype(1.34))\n node.set_box_coordinate_scale(fp_dtype(0.88))\n node.set_framework(\"OpenVINO\")\n\n assert node.get_base_size() == int_dtype(15)\n assert node.get_pre_nms_topn() == int_dtype(7)\n assert node.get_post_nms_topn() == int_dtype(33)\n assert np.isclose(node.get_nms_thresh(), fp_dtype(1.55))\n assert node.get_feat_stride() == int_dtype(8)\n assert node.get_min_size() == int_dtype(123)\n assert np.allclose(node.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=fp_dtype))\n assert np.allclose(node.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=fp_dtype))\n assert node.get_clip_before_nms()\n assert node.get_clip_after_nms()\n assert node.get_normalize()\n assert np.isclose(node.get_box_size_scale(), fp_dtype(1.34))\n assert np.isclose(node.get_box_coordinate_scale(), fp_dtype(0.88))\n assert node.get_framework() == \"OpenVINO\"\n\n\ndef test_dynamic_attr_cache(_proposal_node):\n node = _proposal_node\n\n assert not node._attr_cache_valid\n node.set_nms_thresh(1.3453678102)\n assert not node._attr_cache_valid\n assert np.isclose(node.get_nms_thresh(), np.float64(1.3453678102))\n assert node._attr_cache_valid\n\n\ndef test_dynamic_attr_transitivity(_proposal_node):\n node = _proposal_node\n node2 = node\n\n node.set_ratio(np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64))\n assert np.allclose(node.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64))\n assert np.allclose(node2.get_ratio(), np.array([1.1, 2.5, 3.0, 4.5], dtype=np.float64))\n\n node2.set_scale(np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64))\n assert np.allclose(node2.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64))\n assert np.allclose(node.get_scale(), np.array([2.1, 3.2, 3.3, 4.4], dtype=np.float64))\n\n\ndef test_dynamic_attributes_simple():\n batch_size = 1\n input_size = 16\n hidden_size = 128\n\n X_shape = [batch_size, input_size]\n H_t_shape = [batch_size, hidden_size]\n W_shape = [3 * hidden_size, input_size]\n R_shape = [3 * hidden_size, hidden_size]\n B_shape = [4 * hidden_size]\n\n parameter_X = ng.parameter(X_shape, name=\"X\", dtype=np.float32)\n parameter_H_t = ng.parameter(H_t_shape, name=\"H_t\", dtype=np.float32)\n parameter_W = ng.parameter(W_shape, name=\"W\", dtype=np.float32)\n parameter_R = ng.parameter(R_shape, name=\"R\", dtype=np.float32)\n parameter_B = ng.parameter(B_shape, name=\"B\", dtype=np.float32)\n\n activations = [\"tanh\", \"relu\"]\n activations_alpha = [1.0, 2.0]\n activations_beta = [1.0, 2.0]\n clip = 0.5\n linear_before_reset = True\n\n node = ng.gru_cell(\n parameter_X,\n parameter_H_t,\n parameter_W,\n parameter_R,\n parameter_B,\n hidden_size,\n activations,\n activations_alpha,\n activations_beta,\n clip,\n linear_before_reset,\n )\n\n assert node.get_hidden_size() == hidden_size\n assert all(map(lambda x, y: x == y, node.get_activations(), activations))\n assert all(np.equal(node.get_activations_alpha(), activations_alpha))\n assert all(np.equal(node.get_activations_beta(), activations_beta))\n assert node.get_linear_before_reset() == linear_before_reset\n assert np.isclose(node.get_clip(), clip)\n","sub_path":"ngraph/python/tests/test_ngraph/test_dyn_attributes.py","file_name":"test_dyn_attributes.py","file_ext":"py","file_size_in_byte":9054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272306850","text":"import urllib.request\n\nfor i in range(100):\n url = 'http://graph.facebook.com/%s/picture?Type=large' %str(i)\n imagem = urllib.request.urlopen(url).read()\n arquivo = '%s.jpg' %str(i)\n f = open(arquivo, 'wb')\n f.write(imagem)\n f.close()\n print('%s gravado...' %arquivo)\n","sub_path":"Exercicios VII/Exemplos/questao06.py","file_name":"questao06.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"362163533","text":"from pycmp.grammar import EOF\nfrom pycmp.parsing import ShiftReduceParser\n\n\ndef evaluate_parse(left_parse, tokens):\n if not left_parse or not tokens:\n return\n\n left_parse = iter(left_parse)\n tokens = iter(tokens)\n result = evaluate(next(left_parse), left_parse, tokens)\n\n assert isinstance(next(tokens).ttype, EOF)\n return result\n\n\ndef evaluate(production, left_parse, tokens, inherited_value=None):\n _, body = production\n attributes = production.attributes\n\n synteticed = [None]\n inherited = [inherited_value]\n\n for i, symbol in enumerate(body, 1):\n inherited.append(attributes[i] and attributes[i](inherited, synteticed))\n if symbol.is_terminal:\n assert inherited[i] is None\n lex = next(tokens).lex\n synteticed.append(lex)\n else:\n next_production = next(left_parse)\n assert symbol == next_production.left\n synteticed.append(\n evaluate(next_production, left_parse, tokens, inherited[i])\n )\n\n synteticed[0] = attributes[0] and attributes[0](inherited, synteticed)\n return synteticed[0]\n\n\ndef evaluate_reverse_parse(right_parse, operations, tokens):\n if not right_parse or not operations or not tokens:\n return\n\n right_parse = iter(right_parse)\n tokens = iter(tokens)\n stack = []\n for operation in operations:\n if operation == ShiftReduceParser.SHIFT:\n token = next(tokens)\n stack.append(token.lex)\n elif operation == ShiftReduceParser.REDUCE:\n production = next(right_parse)\n _, body = production\n attributes = production.attributes\n assert all(\n rule is None for rule in attributes[1:]\n ), \"There must be only synteticed attributes.\"\n rule = attributes[0]\n\n if len(body):\n synteticed = [None] + stack[-len(body) :]\n value = rule(None, synteticed)\n stack[-len(body) :] = [value]\n else:\n stack.append(rule(None, None))\n else:\n raise Exception(\"Invalid action!!!\")\n\n assert len(stack) == 1\n assert isinstance(next(tokens).ttype, EOF)\n return stack[0]\n","sub_path":"src/pycmp/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172790576","text":"from django.shortcuts import render\nfrom rest_framework import views\nfrom rest_framework.response import Response\n# Create your views here.\n\nfrom .serializers import UserInfoSerializer\nfrom .models import UserInfo\n\n\nclass UserInfoView(views.APIView):\n \n def get(self, request):\n queryset = UserInfo.objects.filter(used=1)\n\n if queryset.exists():\n user = queryset[queryset.count()-1]\n else:\n user = UserInfo()\n user.name = \"xxx\"\n user.nickname = \"xxx\"\n user.github = \"xxx.com\"\n user.email = \"xxx@xx.com\"\n user.words = \"xxx\"\n user.avatar = \"static/image/2.jpg\"\n user.used = 1\n\n ser = UserInfoSerializer(instance=user, many=False)\n\n return Response(ser.data)\n","sub_path":"apps/userinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400465017","text":"\"\"\"Update DNS data for dynamic ip addresses.\"\"\"\n\nimport argparse\nimport configparser\nimport glob\nimport importlib\nimport inspect\nimport logging\nimport math\nimport os\nimport os.path\nimport stat\nimport sys\nimport time\nimport ast\n\nfrom ddupdate.ddplugin import AddressPlugin, AddressError\nfrom ddupdate.ddplugin import ServicePlugin, ServiceError\nfrom ddupdate.ddplugin import IpAddr\n\n# pylint: disable=ungrouped-imports\nif sys.version_info >= (3, 5):\n import importlib.util\nelse:\n from importlib.machinery import SourceFileLoader\n\n\nif 'XDG_CACHE_HOME' in os.environ:\n CACHE_DIR = os.environ['XDG_CACHE_HOME']\nelse:\n CACHE_DIR = os.path.expanduser('~/.cache')\n\nDEFAULTS = {\n 'hostname': 'host.nowhere.net',\n 'address-plugin': 'default-if',\n 'service-plugin': 'dry-run',\n 'loglevel': 'info',\n 'ip-version': 'v4',\n 'service-options': None,\n 'address-options': None,\n 'ip-cache': os.path.join(CACHE_DIR, 'ddupdate'),\n 'force': False\n}\n\n\nclass _GoodbyeError(Exception):\n \"\"\"General error, implies sys.exit().\"\"\"\n\n def __init__(self, msg=\"\", exitcode=0):\n Exception.__init__(self, msg)\n self.exitcode = exitcode\n self.msg = msg\n\n\ndef envvar_default(var, default=None):\n \"\"\"Return var if found in environment, else default.\"\"\"\n return os.environ[var] if var in os.environ else default\n\n\ndef ip_cache_setup(opts):\n \"\"\"Ensure that our cache directory exists, return cache file path.\"\"\"\n if not os.path.exists(opts.ip_cache):\n os.makedirs(opts.ip_cache)\n return os.path.join(opts.ip_cache, opts.service_plugin + '.ip')\n\n\ndef ip_cache_clear(opts, log):\n \"\"\"Remove the cache file for actual service plugin in opts.\"\"\"\n path = ip_cache_setup(opts)\n if not os.path.exists(path):\n return\n log.debug(\"Removing cache file: \" + path)\n os.unlink(path)\n\n\ndef ip_cache_data(opts, log, default=(IpAddr(ipv4=\"0.0.0.0\"), 100000)):\n \"\"\"\n Return an (address, cache age in minute) tuple.\n\n If not existing, the default value is returned.\n \"\"\"\n path = ip_cache_setup(opts)\n if not os.path.exists(path):\n return default\n mtime = os.stat(path)[stat.ST_MTIME]\n now = time.time()\n delta = math.floor((now - mtime) / 60)\n with open(path) as f:\n astr = f.read().strip()\n try:\n ll = ast.literal_eval(astr)\n ip = IpAddr(ipv4=ll[0], ipv6=ll[1])\n except SyntaxError:\n log.debug(\"SyntaxError while reading ip cache.\")\n ip_cache_clear(opts, log)\n ip, delta = default\n return ip, delta\n\n\ndef ip_cache_set(opts, ip):\n \"\"\"Set the cached address to IpAddr ip.\"\"\"\n path = ip_cache_setup(opts)\n ip = ip if ip else IpAddr(ipv4=\"0.0.0.0\")\n with open(path, \"w\") as f:\n f.write(str(ip))\n\n\ndef here(path):\n \"\"\"Return path added to current dir for __file__.\"\"\"\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), path)\n\n\ndef parse_conffile(log):\n \"\"\"Parse config file path, returns verified path or None.\"\"\"\n path = envvar_default('XDG_CONFIG_HOME', os.path.expanduser('~/.config'))\n path = os.path.join(path, 'ddupdate.conf')\n if not os.path.exists(path):\n path = '/etc/ddupdate.conf'\n for i in range(len(sys.argv)):\n arg = sys.argv[i]\n if arg.startswith('-c') or arg.startswith('--conf'):\n if arg.startswith('-c') and len(arg) > 2:\n path = arg[2:]\n elif '=' in arg:\n path = arg.split('=')[1]\n elif i < len(sys.argv) - 1:\n path = sys.argv[i + 1]\n else:\n # Trust that the regular parsing handles the error.\n return None\n if not os.access(path, os.R_OK):\n log.warning(\"Cannot open config file '%s' for read\", path)\n return None\n return path\n\n\ndef parse_config(path, log):\n \"\"\"Parse config file, return fully populated dict of key-values.\"\"\"\n results = {}\n config = configparser.ConfigParser()\n config.read(path)\n if 'update' in config:\n items = config['update']\n else:\n log.warning(\n 'No [update] section found in %s, file ignored', path)\n items = {}\n for key in DEFAULTS:\n if key in items:\n results[key] = items[key]\n else:\n results[key] = DEFAULTS[key]\n return results\n\n\ndef get_parser(conf):\n \"\"\"Construct the argparser.\"\"\"\n parser = argparse.ArgumentParser(\n prog='ddupdate',\n add_help=False,\n description=\"Tool to update DNS data for dynamic ip addresses\")\n normals = parser.add_argument_group()\n normals.title = \"Normal operation options\"\n normals.add_argument(\n \"-H\", \"--hostname\", metavar=\"host\",\n help='Hostname to update [host.nowhere.net]',\n default=conf['hostname'])\n normals.add_argument(\n \"-s\", \"--service-plugin\", metavar=\"plugin\",\n help='Plugin updating a dns hostname address [%s]'\n % conf['service-plugin'],\n default=conf['service-plugin'])\n normals.add_argument(\n \"-a\", \"--address-plugin\", metavar=\"plugin\",\n help='Plugin providing ip address to use [%s]'\n % conf['address-plugin'],\n default=conf['address-plugin'])\n normals.add_argument(\n \"-c\", \"--config-file\", metavar=\"path\",\n help='Config file with default values for all options'\n + ' [' + envvar_default('XDG_CONFIG_HOME', ' ~/.config/ddupdate.conf')\n + ':/etc/dupdate.conf]',\n dest='config_file', default='/etc/ddupdate.conf')\n normals.add_argument(\n \"-l\", \"--loglevel\", metavar='level',\n choices=['error', 'warning', 'info', 'debug'],\n help='Amount of printed diagnostics [warning]',\n default=conf['loglevel'])\n normals.add_argument(\n \"-v\", \"--ip-version\", metavar='version',\n choices=['all', 'v6', 'v4'],\n help='Ip address version(s) to register (v6, v4, all) [v4]',\n default=conf['ip-version'])\n normals.add_argument(\n \"-o\", \"--service-option\", metavar=\"plugin option\",\n help='Service plugin option (enter multiple times if required)',\n dest='service_options', action='append')\n normals.add_argument(\n \"-O\", \"--address-option\", metavar=\"plugin option\",\n help='Address plugin option (enter multiple times if required)',\n dest='address_options', action='append')\n normals.add_argument(\n \"-i\", \"--ip-plugin\", help=argparse.SUPPRESS)\n others = parser.add_argument_group()\n others.title = \"Other options\"\n others.add_argument(\n \"-S\", \"--list-services\",\n help='List service provider plugins. ',\n default=False, action='store_true')\n others.add_argument(\n \"-A\", \"--list-addressers\",\n help='List plugins providing ip address. ',\n default=False, action='store_true')\n others.add_argument(\n \"-f\", \"--force\",\n help='Force run even if the cache is fresh',\n default=False, action='store_true')\n others.add_argument(\n \"-h\", \"--help\", metavar=\"plugin\",\n help='Print overall help or help for given plugin',\n nargs='?', const='-')\n others.add_argument(\n \"-V\", \"--version\",\n help='Print ddupdate version and exit',\n action='version')\n return parser\n\n\ndef parse_options(conf):\n \"\"\"Parse command line using conf as defaults, return namespace.\"\"\"\n level_by_name = {\n 'error': logging.ERROR,\n 'warn': logging.WARNING,\n 'warning': logging.WARNING,\n 'info': logging.INFO,\n 'debug': logging.DEBUG,\n }\n parser = get_parser(conf)\n parser.version = \"0.6.4\"\n opts = parser.parse_args()\n if opts.help == '-':\n parser.print_help()\n raise _GoodbyeError()\n if not opts.address_options:\n opts.address_options = []\n if conf['address-options']:\n opts.address_options = conf['address-options'].split()\n if not opts.service_options:\n opts.service_options = []\n if conf['service-options']:\n opts.service_options = conf['service-options'].split()\n opts.loglevel = level_by_name[opts.loglevel]\n opts.ip_cache = conf['ip-cache']\n return opts\n\n\ndef log_setup():\n \"\"\"Initialize and return the module log.\"\"\"\n log = logging.getLogger('ddupdate')\n log.setLevel(logging.DEBUG)\n handler = logging.StreamHandler()\n handler.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(levelname)s - %(message)s\")\n handler.setFormatter(formatter)\n log.addHandler(handler)\n return log\n\n\ndef log_options(log, args):\n \"\"\"Print some info on seledted options.\"\"\"\n log.info(\"Loglevel: \" + logging.getLevelName(args.loglevel))\n log.info(\"Using hostname: \" + args.hostname)\n log.info(\"Using ip address plugin: \" + args.address_plugin)\n log.info(\"Using service plugin: \" + args.service_plugin)\n log.info(\"Service options: \" +\n (' '.join(args.service_options) if args.service_options else ''))\n log.info(\"Address options: \" +\n (' '.join(args.address_options) if args.address_options else ''))\n\n\ndef load_module(path):\n \"\"\"Return instantiated module loaded from given path.\"\"\"\n # pylint: disable=deprecated-method\n name = os.path.basename(path).replace('.py', '')\n if sys.version_info >= (3, 5):\n spec = importlib.util.spec_from_file_location(name, path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n else:\n module = SourceFileLoader(name, path).load_module()\n return module\n\n\ndef load_plugin_dir(dirpath, parent_class):\n \"\"\"\n Load all plugins in dirpath having a class derived from parent_class.\n\n Parameters:\n - dirpath: string, all path/*.py files are plugin candidates.\n - parent_class: class, objects being a subclass of parent are loaded.\n\n Returns:\n List of instantiated plugins, all derived from parent_class.\n\n \"\"\"\n found = []\n for plugpath in glob.glob(os.path.join(dirpath, '*.py')):\n try:\n module = load_module(plugpath)\n except ImportError:\n continue\n for member_class in [m[1] for m in inspect.getmembers(module)]:\n # pylint: disable=undefined-loop-variable\n if not inspect.isclass(member_class):\n continue\n if not issubclass(member_class, parent_class):\n continue\n if member_class == parent_class:\n continue\n instance = member_class()\n instance.module = module\n found.append(instance)\n return found\n\n\ndef load_plugins(path, log):\n \"\"\"Load ip and service plugins into dicts keyed by name.\"\"\"\n setters = load_plugin_dir(os.path.join(path, 'plugins'), ServicePlugin)\n getters = load_plugin_dir(os.path.join(path, 'plugins'), AddressPlugin)\n getters_by_name = {plug.name(): plug for plug in getters}\n setters_by_name = {plug.name(): plug for plug in setters}\n log.debug(\"Loaded %d address and %d service plugins from %s\",\n len(getters), len(setters), path)\n return getters_by_name, setters_by_name\n\n\ndef list_plugins(plugins):\n \"\"\"List given plugins.\"\"\"\n for name, plugin in sorted(plugins.items()):\n print(\"%-20s %s\" % (name, plugin.oneliner()))\n\n\ndef plugin_help(ip_plugins, service_plugins, plugid):\n \"\"\"Print full help for given plugin.\"\"\"\n if plugid in ip_plugins:\n plugin = ip_plugins[plugid]\n elif plugid in service_plugins:\n plugin = service_plugins[plugid]\n else:\n raise _GoodbyeError(\"No help found (nu such plugin?): \" + plugid, 1)\n print(\"Name: \" + plugin.name())\n print(\"Source file: \" + plugin.module.__file__ + \"\\n\")\n print(plugin.info())\n\n\ndef filter_ip(ip_version, ip):\n \"\"\"Filter the ip address to match the --ip-version option.\"\"\"\n if ip_version == 'v4':\n ip.v6 = None\n elif ip_version == 'v6':\n ip.v4 = None\n if ip.empty():\n raise AddressError(\"No usable address\")\n return ip\n\n\ndef build_load_path(log):\n \"\"\"Return list of paths to load plugins from.\"\"\"\n paths = []\n paths.append(envvar_default('XDG_DATA_HOME',\n os.path.expanduser('~/.local/share')))\n syspaths = envvar_default('XDG_DATA_DIRS', '/usr/local/share:/usr/share')\n paths.extend(syspaths.split(':'))\n paths = [os.path.join(p, 'ddupdate') for p in paths]\n home = os.path.join(\n os.path.dirname(os.path.realpath(__file__)), '..', '..')\n paths.insert(0, os.path.abspath(home))\n log.debug('paths :%s', ':'.join(paths))\n return paths\n\n\ndef setup(loglevel=None):\n \"\"\"Return a standard log, arg_parser tuple.\"\"\"\n log = log_setup()\n conffile_path = parse_conffile(log)\n conf = parse_config(conffile_path, log) if conffile_path else DEFAULTS\n opts = parse_options(conf)\n log.handlers[0].setLevel(loglevel if loglevel else opts.loglevel)\n log.debug('Using config file: %s', conffile_path)\n log_options(log, opts)\n return log, opts\n\n\ndef get_plugins(log, opts):\n \"\"\"\n Handles plugin listing, plugin help or load plugins.\n\n return: (ip plugin, service plugin) tuple.\n \"\"\"\n ip_plugins = {}\n service_plugins = {}\n for path in build_load_path(log):\n getters, setters = load_plugins(path, log)\n for name, plugin in getters.items():\n ip_plugins.setdefault(name, plugin)\n for name, plugin in setters.items():\n service_plugins.setdefault(name, plugin)\n if opts.list_services:\n list_plugins(service_plugins)\n raise _GoodbyeError()\n if opts.list_addressers:\n list_plugins(ip_plugins)\n raise _GoodbyeError()\n if opts.help and opts.help != '-':\n plugin_help(ip_plugins, service_plugins, opts.help)\n raise _GoodbyeError()\n if opts.ip_plugin:\n raise _GoodbyeError(\n \"--ip-plugin has been replaced by --address-plugin.\")\n elif opts.address_plugin not in ip_plugins:\n raise _GoodbyeError('No such ip plugin: ' + opts.address_plugin, 2)\n elif opts.service_plugin not in service_plugins:\n raise _GoodbyeError(\n 'No such service plugin: ' + opts.service_plugin, 2)\n service_plugin = service_plugins[opts.service_plugin]\n ip_plugin = ip_plugins[opts.address_plugin]\n return ip_plugin, service_plugin\n\n\ndef main():\n \"\"\"Indeed: main function.\"\"\"\n try:\n log, opts = setup()\n ip_plugin, service_plugin = get_plugins(log, opts)\n try:\n ip = ip_plugin.get_ip(log, opts.address_options)\n except AddressError as err:\n raise _GoodbyeError(\"Cannot obtain ip address: \" + str(err), 3)\n if not ip or ip.empty():\n log.info(\"Using ip address provided by update service\")\n ip = None\n else:\n ip = filter_ip(opts.ip_version, ip)\n log.info(\"Using ip address: %s\", ip)\n if opts.force:\n ip_cache_clear(opts, log)\n cached_ip, age = ip_cache_data(opts, log)\n if age < service_plugin.ip_cache_ttl() and (cached_ip == ip or not ip):\n log.info(\"Update inhibited, cache is fresh (%d/%d min)\",\n age, service_plugin.ip_cache_ttl())\n raise _GoodbyeError()\n except _GoodbyeError as err:\n if err.exitcode != 0:\n log.error(err.msg)\n sys.stderr.write(\"Fatal error: \" + str(err) + \"\\n\")\n sys.exit(err.exitcode)\n try:\n service_plugin.register(log, opts.hostname, ip, opts.service_options)\n except ServiceError as err:\n log.error(\"Cannot update DNS data: %s\", err)\n else:\n ip_cache_set(opts, ip)\n log.info(\"Update OK\")\n\n\nif __name__ == '__main__':\n main()\n\n\n# vim: set expandtab ts=4 sw=4:\n","sub_path":"lib/ddupdate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40733630","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/py3o/renderserver/service.py\n# Compiled at: 2018-07-04 04:43:43\nfrom ConfigParser import SafeConfigParser\nimport optparse, logging, sys, os\nfrom os.path import *\nimport win32serviceutil, win32service, win32event, win32process, win32api\nfrom win32com.client import constants\nimport _winreg, pkg_resources\nfrom pkg_resources import iter_entry_points\nfrom pkg_resources import working_set, Environment\norganization = 'py3o'\nproduct_name = 'py3o-renderserver'\n\ndef get_config():\n \"\"\"find the config file path in the registry\n \"\"\"\n\n class Config(object):\n pass\n\n config = Config()\n try:\n reg_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\\\%s\\\\%s' % (organization, product_name))\n config.soffice_host = _winreg.QueryValueEx(reg_key, 'soffice_host')[0]\n config.soffice_port = _winreg.QueryValueEx(reg_key, 'soffice_port')[0]\n config.listen_port = _winreg.QueryValueEx(reg_key, 'listen_port')[0]\n config.listen_interface = _winreg.QueryValueEx(reg_key, 'listen_interface')[0]\n config.javalib = _winreg.QueryValueEx(reg_key, 'javalib')[0]\n config.driver = _winreg.QueryValueEx(reg_key, 'driver')[0]\n config.maxmem = _winreg.QueryValueEx(reg_key, 'maxmem')[0]\n except WindowsError as e:\n logging.exception(str(e))\n\n return config\n\n\ndef scan_directory(directory):\n distributions, errors = working_set.find_plugins(Environment([directory]))\n map(working_set.add, distributions)\n if len(errors) > 0:\n raise ValueError(\"Couldn't load %s\" % errors)\n\n\nclass NullOutput(object):\n \"\"\"a file-like object that behaves like a black hole.\n Does not consume memory and gives nothing back. Ever.\n \"\"\"\n\n def noop(self, *args, **kw):\n pass\n\n write = writelines = close = seek = flush = truncate = noop\n\n def __iter__(self):\n return self\n\n def next(self):\n raise StopIteration\n\n def isatty(self):\n return False\n\n def tell(self):\n return 0\n\n def read(self, *args, **kw):\n return ''\n\n readline = read\n\n def readlines(self, *args, **kw):\n return list()\n\n\ndef set_config(options):\n \"\"\"set the config file path in the registry\n \"\"\"\n reg_key = _winreg.CreateKey(_winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\\\%s\\\\%s' % (organization, product_name))\n reg_val = _winreg.SetValueEx(reg_key, 'soffice_host', None, _winreg.REG_SZ, options.soffice_host)\n reg_val = _winreg.SetValueEx(reg_key, 'soffice_port', None, _winreg.REG_SZ, options.soffice_port)\n reg_val = _winreg.SetValueEx(reg_key, 'listen_port', None, _winreg.REG_SZ, options.listen_port)\n reg_val = _winreg.SetValueEx(reg_key, 'listen_interface', None, _winreg.REG_SZ, options.listen_interface)\n reg_val = _winreg.SetValueEx(reg_key, 'javalib', None, _winreg.REG_SZ, options.javalib)\n reg_val = _winreg.SetValueEx(reg_key, 'driver', None, _winreg.REG_SZ, options.driver)\n reg_val = _winreg.SetValueEx(reg_key, 'maxmem', None, _winreg.REG_SZ, options.maxmem)\n return\n\n\nclass Py3oWindowsService(win32serviceutil.ServiceFramework):\n \"\"\"The Py3oWindowsService class contains all the functionality required\n for running a py3o renderserver as a Windows Service. The only\n user edits required for this class are located in the following class\n variables:\n\n _svc_name_: The name of the service (used in the Windows registry).\n DEFAULT: The capitalized name of the current directory.\n _svc_display_name_: The name that will appear in the Windows Service Manager.\n DEFAULT: The capitalized name of the current directory.\n\n For information on installing the application, please refer to the\n documentation at the end of this module or navigate to the directory\n where this module is located and type \"service.py\" from the command\n prompt.\n \"\"\"\n _svc_name_ = '%s' % product_name\n _svc_display_name_ = _svc_name_\n _svc_deps = list()\n\n def __init__(self, args):\n \"\"\"set some usefull variables\n \"\"\"\n sys.stdout = NullOutput()\n sys.stderr = NullOutput()\n win32serviceutil.ServiceFramework.__init__(self, args)\n\n def SvcDoRun(self):\n \"\"\"Called when the Windows Service runs.\"\"\"\n self.ReportServiceStatus(win32service.SERVICE_START_PENDING)\n try:\n config = get_config()\n from py3o.renderserver.server import start_server\n self.ReportServiceStatus(win32service.SERVICE_RUNNING)\n start_server(config)\n except Exception as e:\n self.ReportServiceStatus(win32service.SERVICE_ERROR_CRITICAL)\n import servicemanager\n servicemanager.LogErrorMsg('The service could not start for the folloing reason: %s' % str(e))\n self.ReportServiceStatus(win32service.SERVICE_STOPPED)\n\n def SvcStop(self):\n \"\"\"Called when Windows receives a service stop request.\"\"\"\n self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)\n from twisted.internet import reactor\n reactor.stop()\n self.ReportServiceStatus(win32service.SERVICE_STOPPED)\n\n\ndef config():\n \"\"\"write the configuration parameters of the py3o-renderserver service in the windows registry\n \"\"\"\n optparser = optparse.OptionParser()\n optparser.add_option('-a', '--sofficehost', dest='soffice_host', help='specify the open office hostname/ip address ADDR', metavar='ADDR', default='127.0.0.1')\n optparser.add_option('-p', '--sofficeport', dest='soffice_port', help='specify the open office port PORT', metavar='PORT', default='8997')\n optparser.add_option('-l', '--listenport', dest='listen_port', help='specify the PORT on which our service will listen', metavar='PORT', default=8994)\n optparser.add_option('-i', '--listeninterface', dest='listen_interface', help='specify the INTERFACE on which our service will listen (default: all interfaces)', metavar='INTERFACE', default=None)\n optparser.add_option('-d', '--driver', dest='driver', help='choose a driver between juno & pyuno', default='juno')\n optparser.add_option('-j', '--java', dest='javalib', help='choose a jvm.dll/jvm.so to use if you are using the juno driver', default=None)\n optparser.add_option('-m', '--maxmem', dest='maxmem', help='how much memory to give to the JVM if you are using juno driver, default is 150Mb', default='150')\n options, args = optparser.parse_args()\n set_config(options)\n return\n\n\ndef setup():\n \"\"\"basic win32 service setup: install or remove or update or start or stop the service\n \"\"\"\n win32serviceutil.HandleCommandLine(Py3oWindowsService)","sub_path":"pycfiles/py3o.template-0.10.0-py3-none-any/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":6854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"570030456","text":"import matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\nfrom netCDF4 import Dataset\nimport numpy as np\n\n\ndata = Dataset(\"/Users/brownscholar/Desktop/copied_atlantic-w.nc\")\nw_array = data.variables['w'][:]\ndate = 0 #what formati do I need to reference time in?\ntimeslice = w_array[date,:,:,0]\n\n#this stuff defines the colorspace (we can google colormaps to learn more if we want to)\ntop = cm.get_cmap('Blues_r', 128)\nbottom = cm.get_cmap('Reds', 128)\n\nnewcolors = np.vstack((top(np.linspace(0, 1, 128)),\n bottom(np.linspace(0, 1, 128))))\nnewcmp = ListedColormap(newcolors, name='RedBlue')\n\n\np = plt.pcolormesh(timeslice[2:-2,2:-2],cmap = newcmp)\n\nplt.colorbar()\nplt.xlabel(\"Longitude\")# labels x axis\nplt.ylabel(\"Latitude\")# labels the y axis\nplt.title(\"ColorMap\")# labels the entire map\nplt.scatter([],[], color = \"blue\", label = \"going down\")#lables legend\nplt.scatter([],[], color = \"red\", label = \"going up\")#lables legend\nplt.legend(bbox_to_anchor=[1.0,1.0])# create legend\n#plt.xticks(np.arange(0,num_lon,10),lon[::10])\n#plt.yticks(np.arange(0,num_lat,10),lat[::10])\nplt.show()\n\n# import data (atlantic-w.nc) into a variable\n# cut a time slice using same method from density.py\n# create plot with this time slice ","sub_path":"3-23/hovmoller-plots.py","file_name":"hovmoller-plots.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545282086","text":"\n\nimport xlsxwriter\n\n#1.创建一个Excel文件\n\nwork=xlsxwriter.Workbook(\"text_0.xlsx\")#文件名+路径\n\n#创建图表\nchart=work.add_chart({'type':'radar'})\n\n#column柱状图、area面积图、bar条形图、line折线图、雷达图\n#不可以添加空图表\n\n#2.创建一个表格\nworksheet=work.add_worksheet(\"while\")\n\n\n#添加数据--声明一个数据容器\ntitle=\"abcdefghij\"\ndata=[1,2,3,4,5,6,7,8,9,10]\nvalue=[80,30,30,123,30,120,30,324,200,120]\n\nfor i,j in enumerate(title):\n\tpoint=\"B%d\"%(i+1)\n\tworksheet.write(point,j)\n\nfor i,j in enumerate(data):\n\tpoint=\"C%d\"%(i+1)\n\tworksheet.write(point,j)\n\nfor i,j in enumerate(value):\n\tpoint=\"D%d\"%(i+1)\n\tworksheet.write(point,j)\n\t\n#为图表添加数据\nchart.add_series(\n\t{\n\t\t\"categories\":\"=while!$b$1:$b$10\",#类别biao签的范围\n\t\t\"values\":\"=while!$c$1:$c$10\",\n\t\t\"line\":{\"color\":\"blue\"}\n\t}\n)\nworksheet.insert_chart(\"B11\",chart)\n\n\n\n\n#3.修改格式\n\n\t#3.1 修改表格的格式\nworksheet.set_column(\"A:A\",20)\n\t#3.2修改内容的格式\nbold=work.add_format({\"bold\":True})#定义一个内容样式\n\n#4.写入内容\n\t#写入字符\nworksheet.write(\"A1\",\"while\",bold)\n\t#写入图片\n#SBworksheet.insert_image(\"A2\",\"branches.png\")\n\t#写入函数SUM\nworksheet.write(\"A3\",2,bold)\nworksheet.write(\"A4\",34,bold)\nworksheet.write(\"A5\",\"=SUM(A3:A4)\",bold)\n\n\n\n#close file\nwork.close()\n","sub_path":"Python与office编程/test_0.py","file_name":"test_0.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"580773645","text":"\"\"\" Basic Hekr protocol implementation based on Wisen app. \"\"\"\n\n__all__ = [\n 'async_setup',\n 'async_setup_entry',\n 'async_unload_entry',\n 'CONFIG_SCHEMA',\n 'HekrData',\n 'AnyDeviceIdentifier',\n]\n\n__title__ = 'HomeAssistant Hekr Component'\n__version__ = '0.1.6'\n__author__ = 'Alexander Ryazanov '\n\nimport asyncio\nimport logging\nfrom asyncio import Task\nfrom typing import Optional, Dict, List, Set, TYPE_CHECKING, Union, Callable\n\nfrom homeassistant import config_entries\nfrom homeassistant.const import CONF_TOKEN, CONF_PROTOCOL, CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP, \\\n EVENT_HOMEASSISTANT_START, CONF_NAME, CONF_SCAN_INTERVAL, CONF_PLATFORM\nfrom homeassistant.helpers.event import async_track_time_interval\nfrom homeassistant.helpers.typing import HomeAssistantType, ConfigType\n\nfrom hekrapi import ACTION_COMMAND_RESPONSE, ACTION_DEVICE_MESSAGE, DeviceResponseState, DeviceID\nfrom hekrapi.device import Device\nfrom hekrapi.exceptions import HekrAPIException\nfrom .const import *\nfrom .schemas import CONFIG_SCHEMA\nfrom .supported_protocols import SUPPORTED_PROTOCOLS\n\nif TYPE_CHECKING:\n from homeassistant.helpers.device_registry import DeviceRegistry, DeviceEntry\n from hekrapi.device import Device, _BaseConnector\n from .base_platform import HekrEntity\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup(hass, yaml_config):\n \"\"\"Set up cloud authenticators from config.\"\"\"\n domain_config = yaml_config.get(DOMAIN)\n if not domain_config:\n return True\n\n hekr_data = HekrData.get_instance(hass)\n hekr_data.use_model_from_protocol = domain_config[CONF_USE_MODEL_FROM_PROTOCOL]\n\n devices_config = domain_config.get(CONF_DEVICES)\n if devices_config:\n for item_config in devices_config:\n _LOGGER.debug('Device entry from YAML: %s' % item_config)\n\n device_id = item_config.get(CONF_DEVICE_ID)\n if device_id in hekr_data.devices_config:\n _LOGGER.warning('Device with ID \"%s\" set up multiple times. Please, check your configuration.')\n continue\n\n _LOGGER.debug('Adding device entry with ID \"%s\"' % device_id)\n hekr_data.devices_config[device_id] = item_config\n hass.async_create_task(\n hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": config_entries.SOURCE_IMPORT},\n data={CONF_DEVICE: item_config},\n )\n )\n\n accounts_config = domain_config.get(CONF_ACCOUNTS)\n if accounts_config:\n _LOGGER.warning('Accounts are not supported in current release. Please, remove [%s->%s] key from your YAML'\n 'configuration file to avoid further incompatibilities.' % (DOMAIN, CONF_ACCOUNTS))\n\n return True\n\n\nasync def async_setup_entry(hass: HomeAssistantType, config_entry: config_entries.ConfigEntry):\n conf = config_entry.data\n\n hekr_data = HekrData.get_instance(hass)\n hass_devices_config = hekr_data.devices_config\n\n try:\n if CONF_DEVICE in conf:\n device_cfg = conf[CONF_DEVICE]\n device_id = device_cfg.get(CONF_DEVICE_ID)\n\n if config_entry.source == config_entries.SOURCE_IMPORT:\n if device_id not in hass_devices_config:\n _LOGGER.info('Removing entry %s after removal from YAML configuration.' % config_entry.entry_id)\n hass.async_create_task(\n hass.config_entries.async_remove(config_entry.entry_id)\n )\n return False\n elif device_id in hass_devices_config:\n _LOGGER.warning('Duplicate entry for device \"%s\" detected. Please, check your integrations.' % device_id)\n return False\n\n _LOGGER.debug('Setting up config entry for device with ID \"%s\"' % device_id)\n hekr_data.devices_config[device_id] = device_cfg\n\n device = await hekr_data.create_connected_device(device_cfg)\n #await hekr_data.create_device_registry_entry(device, config_entry.entry_id)\n hekr_data.setup_entities(config_entry)\n\n _LOGGER.debug('Successfully set up device with ID \"%s\"' % device_id)\n return True\n\n except HekrAPIException:\n _LOGGER.exception(\"API exception while setting up config entry %s\" % config_entry.entry_id)\n return False\n\n _LOGGER.error('Unknown configuration format for entry ID %s, must remove' % config_entry.entry_id)\n hass.async_create_task(\n hass.config_entries.async_remove(config_entry.entry_id)\n )\n return False\n\n\nasync def async_unload_entry(hass: HomeAssistantType, config_entry: config_entries.ConfigEntry):\n _LOGGER.debug('Unloading Hekr config entry with ID \"%s\"' % config_entry.entry_id)\n conf = config_entry.data\n\n hekr_data = HekrData.get_instance(hass)\n\n try:\n if CONF_DEVICE in conf:\n device_cfg = conf[CONF_DEVICE]\n device_id = device_cfg.get(CONF_DEVICE_ID)\n\n _LOGGER.debug('Unloaded device ID: %s, device config: %s' % (device_id, device_cfg))\n\n #await hekr_data.delete_device_registry_entry(device_id)\n await asyncio.wait(hekr_data.unload_entities(config_entry))\n\n device = hekr_data.devices.pop(device_id)\n if device.connector.listener is not None and device.connector.listener.is_running:\n device.connector.listener.stop()\n await device.connector.close_connection()\n\n if config_entry.source != config_entries.SOURCE_IMPORT:\n del hekr_data.devices_config[device_id]\n\n except HekrAPIException:\n _LOGGER.exception('Exception occurred while unloading entry %s' % config_entry.entry_id)\n\n return True\n\nAnyDeviceIdentifier = Union[DeviceID, 'Device']\nclass HekrData:\n @classmethod\n def get_instance(cls, hass):\n hekr_data = hass.data.get(DOMAIN)\n if hekr_data is None:\n hekr_data = cls(hass)\n hass.data[DOMAIN] = hekr_data\n return hekr_data\n\n def __init__(self, hass: HomeAssistantType):\n if isinstance(hass.data.get(DOMAIN), HekrData):\n raise Exception('One instance of HekrData is already installed')\n\n self.hass = hass\n\n self.devices: Dict[DeviceID, 'Device'] = dict()\n self.devices_config: Dict[DeviceID, ConfigType] = dict()\n self.device_entries: Dict[DeviceID, DeviceEntry] = dict()\n self.update_intervals: Dict[DeviceID, Dict[str, timedelta]] = dict()\n self.device_entities: Dict[DeviceID, List['HekrEntity']] = dict()\n self.updaters: Dict[DeviceID, Callable] = dict()\n\n self.use_model_from_protocol = DEFAULT_USE_MODEL_FROM_PROTOCOL\n\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_START, self.homeassistant_start\n )\n self.hass.bus.async_listen_once(\n EVENT_HOMEASSISTANT_STOP, self.homeassistant_stop\n )\n\n\n # Helper methods (not related to HekrData directly)\n @staticmethod\n def detect_protocol_from_info(device_info) -> Optional[str]:\n for protocol_id, supported_protocol in SUPPORTED_PROTOCOLS:\n if PROTOCOL_DETECTION in supported_protocol:\n if supported_protocol.get(PROTOCOL_DETECTION)(device_info):\n return protocol_id\n else:\n # @TODO: add more protocol detection algorithms\n continue\n\n return None\n\n @staticmethod\n def resolve_device_id(device_id: AnyDeviceIdentifier) -> str:\n if isinstance(device_id, str):\n return device_id\n return device_id.device_id\n\n def resolve_device(self, device_id: AnyDeviceIdentifier) -> 'Device':\n if isinstance(device_id, str):\n return self.devices[device_id]\n return device_id\n\n\n # HomeAssistant event listeners\n async def homeassistant_start(self, *_):\n pass\n\n async def homeassistant_stop(self, *_):\n _LOGGER.debug('Hekr system is shutting down')\n for device_id, device in self.devices.items():\n connector = device.connector\n listener = connector.listener\n if listener is not None and listener.is_running:\n _LOGGER.debug('Shutting down listener for device ID \"%s\"' % device_id)\n listener.stop()\n\n if connector.is_connected:\n _LOGGER.debug('Shutting down connector for device ID \"%s\"' % device_id)\n await connector.close_connection()\n\n async def update_entities_callback(self, hekr_device, message_id, state, action, data):\n if hekr_device and action in (ACTION_COMMAND_RESPONSE, ACTION_DEVICE_MESSAGE) \\\n and state == DeviceResponseState.SUCCESS:\n\n _LOGGER.debug('Received response (message ID: %d) from information command (action: %s) with data: %s'\n % (message_id, action, data))\n command, data, frame_number = data\n\n update_entities = self.device_entities.get(hekr_device.device_id)\n\n if update_entities:\n protocol = self.get_device_protocol(device_id=hekr_device.device_id)\n attribute_filter = protocol.get(PROTOCOL_FILTER)\n attributes = attribute_filter(data) if callable(attribute_filter) else data\n\n tasks = [\n entity.handle_data_update(attributes)\n for entity in update_entities\n if entity.command_receive == command.name\n ]\n\n if tasks:\n _LOGGER.debug('Performing update on %d entities for command \"%s\"' % (len(tasks), command.name))\n await asyncio.wait(tasks)\n _LOGGER.debug('Update complete!')\n else:\n _LOGGER.debug('No updates scheduled for command \"%s\"' % command.name)\n\n\n # Device registry management\n async def get_device_registry_entry(self, device_id: AnyDeviceIdentifier) -> Optional['DeviceEntry']:\n device_id = self.resolve_device_id(device_id)\n device_registry = await self.hass.helpers.device_registry.async_get_registry()\n return device_registry.async_get(self.device_entries[device_id])\n\n async def delete_device_registry_entry(self, device_id: AnyDeviceIdentifier) -> None:\n device_id = self.resolve_device_id(device_id)\n device_registry: DeviceRegistry = await self.hass.helpers.device_registry.async_get_registry()\n device_registry.async_remove_device(self.device_entries[device_id].id)\n\n def get_device_info_dict(self, device: AnyDeviceIdentifier):\n device = self.resolve_device(device)\n device_cfg = self.devices_config[device.device_id]\n\n protocol_id = device_cfg.get(CONF_PROTOCOL)\n protocol = SUPPORTED_PROTOCOLS[protocol_id]\n\n attrs = dict()\n attrs['identifiers'] = {(DOMAIN, device.device_id)}\n\n if device.device_info is None:\n model = protocol.get(PROTOCOL_NAME, protocol_id)\n manufacturer = None\n attrs['connections'] = set()\n attrs['name'] = device_cfg.get(CONF_NAME)\n else:\n model = device.product_name\n manufacturer = None\n attrs['connections'] = set()\n attrs['name'] = device.device_name\n attrs['sw_version'] = device.firmware_version\n\n if self.use_model_from_protocol:\n attrs['model'] = protocol.get(PROTOCOL_MODEL, model)\n attrs['manufacturer'] = protocol.get(PROTOCOL_MANUFACTURER, manufacturer)\n else:\n attrs['model'] = model or protocol.get(PROTOCOL_MODEL)\n attrs['manufacturer'] = manufacturer or protocol.get(PROTOCOL_MANUFACTURER)\n\n return attrs\n\n async def create_device_registry_entry(self, device: 'Device', config_entry_id: str) -> 'DeviceEntry':\n \"\"\"Create device registry entry for device.\"\"\"\n attrs = self.get_device_info_dict(device)\n dev_reg: 'DeviceRegistry' = await self.hass.helpers.device_registry.async_get_registry()\n device_entry = dev_reg.async_get_or_create(\n config_entry_id=config_entry_id,\n **attrs\n )\n\n self.device_entries[device.device_id] = device_entry\n\n return device_entry\n\n # Entity management\n def setup_entities(self, config_entry: config_entries.ConfigEntry) -> List[Task]:\n # @TODO: Refactor for CONF_DOMAINS\n _LOGGER.debug('Setting up components for config entry %s' % config_entry.entry_id)\n tasks = []\n for conf_key, (entity_domain, protocol_key) in CONF_DOMAINS.items():\n _LOGGER.debug('Forwarding entry ID %s set up for entity domain %s for'\n % (config_entry.entry_id, entity_domain))\n\n tasks.append(self.hass.async_create_task(\n self.hass.config_entries.async_forward_entry_setup(config_entry, entity_domain)\n ))\n\n return tasks\n\n def unload_entities(self, config_entry: config_entries.ConfigEntry) -> List[Task]:\n _LOGGER.debug('Unloading components for config entry %s' % config_entry.entry_id)\n tasks = []\n for conf_key, (entity_domain, protocol_key) in CONF_DOMAINS.items():\n _LOGGER.debug('Forwarding entry ID %s set up for entity domain %s for'\n % (config_entry.entry_id, entity_domain))\n\n tasks.append(self.hass.async_create_task(\n self.hass.config_entries.async_forward_entry_unload(config_entry, entity_domain)\n ))\n\n return tasks\n\n # Setup methods\n def get_device_protocol(self, device_id: AnyDeviceIdentifier):\n device_id = self.resolve_device_id(device_id)\n protocol_id = self.devices_config[device_id].get(CONF_PROTOCOL)\n return SUPPORTED_PROTOCOLS.get(protocol_id)\n\n def create_device(self, config: ConfigType) -> 'Device':\n _LOGGER.debug('Creating device via get_add_device with config: %s' % config)\n protocol_id = config.get(CONF_PROTOCOL)\n protocol = SUPPORTED_PROTOCOLS[protocol_id]\n\n from hekrapi.device import Device, CloudConnector, LocalConnector\n\n token = config.get(CONF_TOKEN)\n if token is None:\n connect_port = config.get(CONF_PORT, protocol.get(PROTOCOL_PORT))\n if connect_port is None:\n raise Exception('Protocol \"%s\" for device with ID \"%s\" does not provide default port. Please, '\n 'configure port manually.' % (protocol_id, config.get(CONF_DEVICE_ID)))\n\n connector = LocalConnector(\n host=config.get(CONF_HOST),\n port=connect_port,\n application_id=config.get(CONF_APPLICATION_ID, DEFAULT_APPLICATION_ID),\n )\n else:\n connector = CloudConnector(\n token=config.get(CONF_TOKEN),\n connect_host=config.get(CONF_CLOUD_HOST, DEFAULT_CLOUD_HOST),\n connect_port=config.get(CONF_CLOUD_PORT, DEFAULT_CLOUD_PORT),\n application_id=config.get(CONF_APPLICATION_ID, DEFAULT_APPLICATION_ID),\n )\n\n device = Device(\n device_id=config.get(CONF_DEVICE_ID),\n control_key=config.get(CONF_CONTROL_KEY),\n protocol=protocol[PROTOCOL_DEFINITION]\n )\n device.connector = connector\n device.add_callback(self.update_entities_callback)\n self.devices[device.device_id] = device\n self.devices_config[device.device_id] = config\n\n return device\n\n def get_create_device(self, config: ConfigType, compare_configs: bool = True) -> 'Device':\n device_id = config.get(CONF_DEVICE_ID)\n\n device: Optional['Device'] = self.devices.get(device_id)\n if device is None:\n device = self.create_device(config)\n\n elif compare_configs:\n device_config = self.devices_config[device_id]\n\n exclude_compare = [*CONF_DOMAINS.keys(), CONF_SCAN_INTERVAL, CONF_PLATFORM, CONF_NAME]\n invalid_keys = []\n for conf_key in {*device_config.keys(), *config.keys()}:\n if conf_key not in exclude_compare and device_config.get(conf_key) != config.get(conf_key):\n invalid_keys.append(conf_key)\n\n if invalid_keys:\n raise Exception(\"Cannot create device because a similar one exists, but with different configuration on\"\n \"keys: %s\" % \", \".join(invalid_keys))\n\n return device\n\n async def create_connected_device(self, config: ConfigType) -> 'Device':\n device = self.create_device(config)\n await device.connector.open_connection()\n self.refresh_connections()\n return device\n\n async def get_create_connected_device(self, config: ConfigType) -> 'Device':\n device = self.get_create_device(config)\n if device.connector.is_connected:\n return device\n await device.connector.open_connection()\n self.refresh_connections()\n return device\n\n\n # Updater and listener management\n def _create_updater(self, device_id: DeviceID, commands: Set[str], interval: timedelta):\n async def call_command(*_):\n device = self.devices.get(device_id)\n if device is None:\n _LOGGER.debug('Device with ID \"%s\" is missing, cannot run updater' % device_id)\n return\n\n _LOGGER.debug('Running updater for device \"%s\" with commands: %s' % (device_id, ', '.join(commands)))\n device = self.devices[device_id]\n command_iter = iter(commands)\n first_command = next(command_iter)\n\n _LOGGER.debug('Running update command: %s' % first_command)\n await device.command(first_command)\n for command in command_iter:\n _LOGGER.debug('Sleeping for %d seconds before running command: %s' % (DEFAULT_SLEEP_INTERVAL, command))\n await asyncio.sleep(DEFAULT_SLEEP_INTERVAL)\n _LOGGER.debug('Running update command: %s' % command)\n await device.command(command)\n\n len_cmd = len(commands)\n # assumed: 1 second per command, N second(-s) intervals between commands\n min_seconds = len_cmd + (len_cmd - 1) * DEFAULT_SLEEP_INTERVAL\n if interval.seconds < min_seconds:\n _LOGGER.warning('Interval provided for updater (%d seconds) is too low to perform updates! '\n 'Adjusted automatically to %d seconds to prevent hiccups.'\n % (interval.seconds, min_seconds))\n interval = timedelta(seconds=min_seconds)\n\n # noinspection PyTypeChecker\n return async_track_time_interval(\n hass=self.hass,\n action=call_command,\n interval=interval\n )\n\n def _refresh_updaters(self):\n for device_id, entities in self.device_entities.items():\n if device_id in self.updaters:\n # cancel running updater\n self.updaters[device_id]()\n del self.updaters[device_id]\n\n update_commands = set([entity.command_update for entity in entities])\n if update_commands:\n _LOGGER.debug('Creating updater for device with ID \"%s\" with commands: %s'\n % (device_id, ', '.join(update_commands)))\n device_cfg = self.devices_config[device_id]\n interval = device_cfg.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)\n if isinstance(interval, int):\n interval = timedelta(seconds=interval)\n\n self.updaters[device_id] = self._create_updater(\n device_id=device_id,\n commands=update_commands,\n interval=interval,\n )\n else:\n _LOGGER.debug('No updater required for device with ID \"%s\"' % device_id)\n\n _LOGGER.debug('Refreshed updaters: %s' % self.updaters)\n\n def _create_listener(self, connector: '_BaseConnector'):\n from hekrapi.device import Listener\n return Listener(connector,\n callback_exec_function=self.hass.add_job,\n callback_task_function=self.hass.async_create_task,\n auto_reconnect=True)\n\n def _refresh_listeners(self):\n required_device_ids = self.updaters.keys()\n _LOGGER.debug('Required device IDs for listening: %s' % required_device_ids)\n active_listeners = set()\n required_listeners = set()\n for device_id, device in self.devices.items():\n if device_id in required_device_ids:\n listener = device.connector.get_listener(listener_factory=self._create_listener)\n required_listeners.add(listener)\n if listener.is_running:\n active_listeners.add(listener)\n else:\n listener = device.connector.listener\n if listener is not None and listener.is_running:\n active_listeners.add(listener)\n\n for listener in active_listeners - required_listeners:\n if listener.is_running:\n listener.stop()\n\n for listener in required_listeners - active_listeners:\n if not listener.is_running:\n listener.start()\n\n def refresh_connections(self):\n # 1. Refresh updaters\n self._refresh_updaters()\n # 2. Refresh listeners\n self._refresh_listeners()\n","sub_path":"custom_components/hekr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":21669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643925608","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import pi\nfrom configparser import ConfigParser\n\n\n#LECTURA DE PARAMETROS DE ENTRADA\n\nparser = ConfigParser()\nparser.read('Entradas.ini') \n\nK = parser.getfloat('Parametros','K')\nn = parser.getint('Parametros','n')\n\nt0 = parser.getfloat('CondicionesIniciales','t0')\np0 = parser.getfloat('CondicionesIniciales','p0')\n\n\n#defino arreglos de dimensión n, siendo n el número de iteraciones\ntt = np.zeros(n) \npp = np.zeros(n)\nee = np.zeros(n) \nxx = np.zeros(n)\ndelta = np.zeros(n)\nLyap = np.zeros(n)\n\n#condiciones iniciales\n\nt = t0\np = p0\n\n#Separación inicial entre las trayectorias\nd0=1.e-9\ne=d0\nx=d0\n#e = eta = p' - p\n#x = xi = t' - t\n\nK=K/(2*pi)\n\n\nfor i in range(1,n):\t\t\n\t#con mod(,1) me aseguro que p y t estén entre 0 y 1\n\tp = np.mod(p + K * np.sin(2.*pi*t),1.)\n\tt = np.mod(t + p,1.)\n\t\n\te = e + K*np.cos(2.*pi*t)*x\n\tx = x + e\n\t\n\tpp[i] = p\n\ttt[i] = t\n\t\n\txx[i] = x\n\tee[i] = e\n\t\n\tdelta[i]=np.sqrt(e**2+x**2)\n\tLyap[i]=(1./i)*np.log(delta[i]/d0)\n\nx=np.linspace(1,n,n)\n\n#Defino un string que contiene los valores de las condiciones iniciales\t\t\nnombre='t0='+str(t0)+'. p0='+str(p0)\n\n\n\n#para graficar el punto\ntp0=[t0,p0]\nplt.figure()\nplt.plot(tt,pp,'.',markersize='5',color='k')\t\nplt.plot(tp0[0],tp0[1],'.',markersize='10',color='r')\nplt.xlim(0,1)\nplt.ylim(0,1)\nplt.grid()\nplt.xlabel('t')\nplt.ylabel('p')\nplt.title('MapaStandart.'+nombre)\t\t\nplt.savefig('MapaStandart.'+nombre+'.png')\t\t\n\n\nplt.show()\n\nplt.figure()\nplt.plot(x,Lyap,color='k',label='Lyapunov')\t\nplt.xlim(1,n)\nplt.xlabel('n')\nplt.ylabel(r'$\\sigma$(n)')\nplt.title('Exponente de Lyapunov.'+nombre)\nplt.savefig('Lyapunov.'+nombre+'.png')\n\t\nplt.figure()\nplt.plot(x,delta,color='k',label=r'$\\delta$(n)')\t\nplt.xlabel('n')\nplt.ylabel(r'$\\delta$(n)')\nplt.xlim(1,n)\nplt.title('Delta.'+nombre)\nplt.savefig('Delta.'+nombre+'.png')\n","sub_path":"MapaTangente.py","file_name":"MapaTangente.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"236646672","text":"import cv2\nimport RPi.GPIO as gpio\nfrom datetime import datetime\nimport requests\nimport time\nimport zbarlight\nimport requests\n#from pyzbar.pyzbar import decode\nfrom PIL import Image\nimport PIL\nimport json\nimport os\nimport numpy as np\nimport paho.mqtt.client as mqtt\nfrom threading import Thread\nim_center = (300,240)\nsq_center = (0,0)\n\nadjust_conf = 0\ngo_x_conf = 0\n\nbashCommand = \"uvcdynctrl -v -d video0 --set='Focus, Auto' 0\"\nos.system(bashCommand)\n\n\nproduct = None\nimage = None\ncamera = cv2.VideoCapture(0)\nx,y,w,h = None,None,None,None\n\nstart = 'start'\n\ncv2.namedWindow('Original')\ncv2.moveWindow('Original', 500,100)\n\nmsgFromJohn = \"\"\nclass ListenJohn(Thread):\n \n def __init__(self, name):\n Thread.__init__(self)\n self.name = name\n listener.connect(\"192.168.0.110\",1883,60)\n listener.on_connect = self.on_connect\n listener.on_message = self.on_message\n print(\"client is created\")\n \n def run(self):\n print(\"thread start\")\n listener.loop_forever()\n \n def on_connect(self, client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n listener.subscribe(\"ev4/to/pi\")\n print(\"I am listening to ev4\")\n\n def on_message(self, client, userdata, msg):\n global msgFromJohn\n msgFromJohn = msg.payload.decode()\n print(\"ev4 says: \" + msgFromJohn)\n\n \nlistener = mqtt.Client()\nsender = mqtt.Client()\n\n\n\n\nmy_thread = ListenJohn(\"Listen to John\")\n\nmy_thread.start()\nsender.connect(\"192.168.0.110\",1883,1000)\ndef check_box():\n global x\n global y\n global w\n global sq_center\n global image\n global camera\n ret, image = camera.read()\n\n arrMinW = np.array([25, 106, 0])\n arrMaxW = np.array([40, 255, 255])\n\n frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n thresh = cv2.inRange(frame_to_thresh, (arrMinW), (arrMaxW))\n \n \n \n kernel = np.ones((5,5),np.uint8)\n mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]\n \n if len(cnts) > 0:\n \n c = max(cnts, key=cv2.contourArea)\n M = cv2.moments(c)\n x,y,w,h = cv2.boundingRect(c)\n cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)\n print(x, w, y, h)\n #((x, y), radius) = cv2.minEnclosingCircle(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n print(center)\n sq_center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n return True\n \ndef go_x_1():\n global x\n global y\n global w\n global h\n global adjust_conf\n global go_x_conf\n global image\n best = 170\n go_x_conf = 0\n while True:\n dlin = (abs(best - w) >= 5)\n check_box()\n cv2.imshow(\"Original\", image)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n if w - best >= 9:\n dlina_h = w - best * 2\n print('GO BACKWARD...' + str(dlina_h))\n sender.publish('pi/to/ev4', 'a270' + 'd' + str(dlina_h))\n continue\n else:\n if dlin == False:\n go_x_conf = 1\n print(\"X OK\")\n break\n else:\n dlin_f = abs(best - w) * 2\n print(\"MOVE FORWARD \" + str(dlin_f))\n sender.publish(\"pi/to/ev4\", \"a90\" + \"d\" + str(dlin_f))\n continue\n \ndef check_end():\n global sq_center\n global im_center\n global sender\n global go_x_conf\n global adjust_conf\n \n if go_x_conf == 1 and adjust_conf == 1:\n sender.publish('pi/to/ev4', 'a0' + 'd0')\n print('done')\n return True\n \n else:\n return True\n\n\n\ndef go_x():\n global x\n global y\n global w\n global h\n global adjust_conf\n global go_x_conf\n global image\n best = 200\n go_x_conf = 0\n while True:\n dlin = (abs(best - w) >= 5)\n check_box()\n cv2.imshow(\"Original\", image)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n if w - best >= 9:\n dlina_h = w - best * 2\n print('GO BACKWARD...' + str(dlina_h))\n sender.publish('pi/to/ev4', 'a270' + 'd' + str(dlina_h))\n continue\n else:\n if dlin == False:\n go_x_conf = 1\n print(\"X OK\")\n break\n else:\n dlin_f = abs(best - w) * 2\n print(\"MOVE FORWARD \" + str(dlin_f))\n sender.publish(\"pi/to/ev4\", \"a90\" + \"d\" + str(dlin_f))\n continue\ndef qrRead():\n global camera\n global sender\n global product\n ret,frame = camera.read()\n \n \n url = 'http://robots.therdteam.com/api/v2/products/createfromapi'\n \n headers2 = {'content-type': 'application/json'}\n \n \n cv2.imwrite('foo.jpg',frame)\n \n print(\"Scanning image..\")\n f = open('foo.jpg','rb')\n \n #ar = frame.array\n #qr = PIL.Image.open(f);\n qr = PIL.Image.open(f)\n qr.load()\n #data = decode(qr)\n \n codes = zbarlight.scan_codes(\"qrcode\",qr)\n if(codes==None):\n #os.remove('qr_codes/qr_0.jpg')\n print('No QR code found')\n return ''\n else:\n print('QR code(s):')\n print (codes)\n print (str(codes[0])[2])\n #if codes[0] == '1':\n if str(codes[0])[2] == 'e':\n return ''\n else:\n print(str(codes[0].decode()))\n data_r = str(codes[0].decode())\n print('hochu post') \n print('post bil')\n #print(str(r.content))\n \n product = data_r\n return data_r\ndef adjust():\n \n global sq_center\n global sender\n global im_center\n global adjust_conf\n global go_x_conf\n global image\n adjust_conf = 0\n while True:\n check_box()\n cv2.imshow(\"Original\", image)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n if abs(sq_center[0] - im_center[0]) <= 5:\n adjust_conf = 1\n print('POS OK')\n break\n else:\n print(sq_center)\n print(im_center)\n if (sq_center[0] < im_center[0]) :\n print('q')\n dlin = abs(im_center[0] - sq_center[0])\n dlin_h = abs(sq_center[0] - im_center[0])\n \n print(\"ADJUST LEFT... \" + str(dlin))\n sender.publish(\"pi/to/ev4\", \"a180\" + \"d\" + str(dlin))\n continue\n \n else:\n print('r')\n dlin = abs(sq_center[0] - im_center[0])\n print(\"ADJUST RIGHT...\" + str(dlin))\n sender.publish(\"pi/to/ev4\", \"a0\" + \"d\" + str(dlin))\n continue\n \n\n\ndef readGAZ():\n global product\n global sender\n gpio.setmode(gpio.BCM)\n gpio.setup(14, gpio.IN)\n url_gaz = 'http://robots.therdteam.com/api/v2/products/checkgaz'\n url_nogaz = 'http://robots.therdteam.com/api/v2/products/checkgaznogaz'\n i = 0\n while i <= 1000000:\n data = False\n input_value = gpio.input(14)\n i+=1\n if (i==1000000):\n print('vishli: ',data)\n sender.publish('pi/to/ev4', str(product) +' '+ 'True')\n return str(data)\n if input_value == False:\n data = True\n print('The button has been pressed...')\n time.sleep(7)\n sender.publish('pi/to/ev4', str(product) +' '+ 'False')\n return str(data)\n \n break\n \n while input_value == False:\n input_value = gpio.input(14)\n \ndef main():\n global camera\n global msgFromJohn\n global go_x_conf\n global adjust_conf\n try:\n while True:\n \n ret, img = camera.read()\n cv2.circle(img,(260,240), 10, (0,255,255), -1)\n cv2.imshow(\"Original\", img)\n \n \n \n if msgFromJohn != '':\n \n if msgFromJohn == 'adjust':\n msgFromJohn = ''\n check_box()\n adjust()\n go_x_1()\n adjust()\n print('!!!')\n sender.publish('pi/to/ev4','a0d0')\n \n \n if msgFromJohn == 'adjust1':\n msgFromJohn = ''\n check_box()\n adjust()\n go_x()\n adjust()\n print('!!!')\n sender.publish('pi/to/ev4','a0d0')\n \n if msgFromJohn == 'readQR':\n time.sleep(2)\n qrRead()\n msgFromJohn = ''\n \n if msgFromJohn == 'readGAZ':\n readGAZ()\n msgFromJohn = ''\n \n adjust_conf = 0\n go_x_conf = 0\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n finally:\n listener.disconnect()\n self = False\n\nif start == 'start':\n main()","sub_path":"Rpi/RpiRobotProgram2.py","file_name":"RpiRobotProgram2.py","file_ext":"py","file_size_in_byte":9368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254077123","text":"import fnmatch\nimport logging\nimport os\nimport pandas\nfrom nose.plugins.attrib import attr\n\nfrom test.test_base import AssetManagementUnitTest\n\nlogging.basicConfig()\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\n\n\n@attr('UNIT')\nclass DeploymentFilesUnitTest(AssetManagementUnitTest):\n asset_map = {\n 'mooring.uid': 'Platform',\n 'sensor.uid': 'Sensor',\n 'node.uid': 'Node'\n }\n\n required_ids = {\n 'CUID_Deploy',\n 'Reference Designator',\n 'deploymentNumber',\n 'startDateTime',\n 'mooring.uid',\n 'sensor.uid',\n 'lat',\n 'lon',\n }\n\n optional_ids = {\n 'deployedBy',\n 'CUID_Recover',\n 'recoveredBy',\n 'versionNumber',\n 'stopDateTime',\n 'node.uid', # only required for profilers\n 'orbit',\n 'depth'\n }\n\n def setUp(self):\n \"\"\"\n Read bulk load asset management data and save UID and serial numbers\n \"\"\"\n super(DeploymentFilesUnitTest, self).setUp()\n # dictionary of all the UIDs and corresponding serial numbers\n self.ids = {str(record.uid): str(record.asset_type) for _, record in self.bulk_data.iterrows()}\n self.cruise_ids = set(self.cruise_data.CUID.values)\n\n def check_type_match(self, record, asset_key):\n \"\"\"\n verify UIDs exist and match type\n :param record:\n :param asset_key: one of the three asset record headings (mooring.uid, sensor.uid, node.uid)\n :return: list of errors (if any)\n \"\"\"\n if asset_key not in self.asset_map.keys():\n return 'Unexpected asset type provided (%s not one of %r)' % (asset_key, self.asset_map.keys())\n\n # make sure the asset has a UID loaded from bulk load\n asset_type = self.asset_map[asset_key]\n asset = str(record[asset_key])\n\n # nodes are optional, skip check if not present\n if asset_key == 'node.uid' and not asset:\n return\n\n if asset not in self.ids:\n return 'Missing UID for %s %s' % (asset_type, asset)\n\n if self.ids[asset] != asset_type:\n return 'Type mismatch for %s - expected \"%s\", found \"%s\"' % (asset_key, asset_type, self.ids[asset])\n\n def check_deploy_file(self, fn):\n \"\"\"\n Check a single deployment file for format and consistency\n :param fn: deployment filename\n :return: list of errors (if any)\n \"\"\"\n errors = []\n deployment = pandas.read_csv(fn).fillna('')\n\n # TODO: remove this once uframe has been updated\n # Drop water_depth and rename deployment_depth to depth until\n # the parser in uframe is updated to reflect these changes\n deployment.rename(columns={'deployment_depth': 'depth'}, inplace=True)\n deployment.drop('water_depth', 1, inplace=True)\n\n # make sure all fields are present\n missing = self.required_ids.union(self.optional_ids) - set(deployment.columns)\n if missing:\n errors.append('Missing required column identifiers: %s' % missing)\n\n # check types for Platform and Sensor\n for index, record in deployment.iterrows():\n try:\n # rows which begin with # are considered comments\n if record.CUID_Deploy.startswith('#'):\n continue\n\n # make sure all required fields are filled out\n set_fields = {name for name in record.index if getattr(record, name)}\n missing = self.required_ids - set_fields\n if missing:\n errors.append('Missing value(s) for required fields: %s on row %d - %r' %\n (missing, index, record.values))\n return errors\n\n # make sure the cruise ID exists\n deploy_cuid = record.CUID_Deploy\n if deploy_cuid and deploy_cuid not in self.cruise_ids:\n errors.append('Invalid cruise ID - \"%r\" - row %d' % (deploy_cuid, index))\n\n recover_cuid = record.CUID_Recover\n if recover_cuid and recover_cuid not in self.cruise_ids:\n errors.append('Invalid cruise ID - \"%r\" - row %d' % (recover_cuid, index))\n\n # check asset types for matching UID records\n for asset_type in self.asset_map.keys():\n error = self.check_type_match(record, asset_type)\n if error:\n errors.append(error + ' - row %d' % index)\n\n # start and stop (if present) must have correct format\n start_time = record['startDateTime']\n if start_time and not self.valid_time_format(start_time):\n errors.append('Invalid time format for startDateTime - \"%r\" - row %d' % (start_time, index))\n\n stop_time = record['stopDateTime']\n if stop_time and not self.valid_time_format(stop_time):\n errors.append('Invalid time format for stopDateTime - \"%r\" - row %d' % (stop_time, index))\n\n if start_time == stop_time:\n errors.append('Equivalent startDateTime and stopDateTime - (%r, %r) - row %d' %\n (start_time, stop_time, index))\n\n # reference designator must be valid. from ../../misc/reference_designators.csv\n if not record['Reference Designator'] in self.reference_designators:\n errors.append('Reference Designator (%s) at csv index (%s) not in list of valid reference designators (./vocab/vocab.csv)' % (record['Reference Designator'], index+2))\n\n lat = record['lat']\n if not self.valid_float(lat):\n errors.append('Invalid format for latitude - \"%r\" - row %d' % (lat, index))\n\n lon = record['lon']\n if not self.valid_float(lon):\n errors.append('Invalid format for longitude - \"%r\" - row %d' % (lon, index))\n\n # depth = record['depth']\n # if not valid_float(depth):\n # errors.append('Invalid format for depth - \"%r\" - row %d' % (depth, index))\n\n except AttributeError as e:\n errors.append('Deployment file is missing required fields: %s - row %d' % (e, index))\n break # do not process the rest of the file\n\n return errors\n\n def test_deploy(self):\n \"\"\"\n Cycle through all available deployment files and check\n \"\"\"\n error_count = 0\n for root, dirs, files in os.walk(self.DEP_ROOT):\n for name in fnmatch.filter(files, '*.csv'):\n filename = os.path.join(root, name)\n errors = self.check_deploy_file(filename)\n if errors:\n log.error('%s: %d error%s processing deployment file:', filename, len(errors),\n '' if len(errors) == 1 else 's')\n for error in errors:\n log.error(' %s', error)\n error_count += len(errors)\n\n self.assertEqual(error_count, 0, '%s errors encountered processing deployment files' % error_count)\n","sub_path":"test/unit/test_deploy.py","file_name":"test_deploy.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"480238420","text":"import tensorflow as tf\ntf.keras.backend.set_floatx('float64')\n\nfrom os import mkdir\nimport numpy as np\nfrom copy import deepcopy\nfrom time import time\n\nfrom agents.sac import sac\nfrom agents.utils.gae import get_gaes\n\n\n\nclass Trainer(tf.keras.Model):\n def __init__(self, actions, observations, is_discrete=False, batch_size=64, mem_size=40000,\n gamma=0.99, actor_lr=0.0003, critic_lr=0.0003, alpha_lr=0.0003, delay=1, tau=0.005, **kargs):\n super(Trainer, self).__init__()\n \n self.agent = sac(actions, observations, is_discrete, **kargs)\n\n self.is_discrete = is_discrete\n self.mem_size = mem_size\n self.batch_size = batch_size\n self.gamma = gamma\n self.tau = tau\n self.delay = delay\n self.counter = 0\n self.replay_buffer = {\n 'states': np.zeros((mem_size,)+observations),\n 'actions': np.zeros((mem_size)) if is_discrete else np.zeros((mem_size, actions)),\n 'logits': np.zeros((mem_size, actions)),\n 'rewards': np.zeros((mem_size,)),\n 'next_states': np.zeros((mem_size,)+observations),\n 'dones': np.zeros((mem_size,)),\n }\n self.actor_optimizer = tf.keras.optimizers.Adam(actor_lr, epsilon=1e-10)\n self.critic_1_optimizer = tf.keras.optimizers.Adam(critic_lr, epsilon=1e-10)\n self.critic_2_optimizer = tf.keras.optimizers.Adam(critic_lr, epsilon=1e-10)\n self.alpha_optimizer = tf.keras.optimizers.Adam(alpha_lr, epsilon=1e-10)\n\n\n # @tf.function\n def action(self, states):\n logits, _ = self.agent.get_action(self.agent.actor_net, np.array([states]))\n \n if self.is_discrete:\n # action = tf.random.categorical(logits, 1)\n action = tf.argmax(logits, axis=-1)\n action = tf.squeeze(action).numpy()\n else:\n action = tf.squeeze(logits).numpy()\n \n logits = tf.squeeze(logits).numpy()\n \n return action, logits\n\n\n\n def add(self, state, action, logits, reward, next_state, done):\n i = self.counter % self.mem_size\n self.replay_buffer['states'][i] = state\n self.replay_buffer['actions'][i] = action\n self.replay_buffer['logits'][i] = logits\n self.replay_buffer['rewards'][i] = reward\n self.replay_buffer['next_states'][i] = next_state\n self.replay_buffer['dones'][i] = float(not done)\n self.counter+=1\n \n \n def save_model(self, path):\n try:\n self.save_weights(path, save_format='tf')\n print('Saved!')\n except:\n mkdir(path)\n self.save_weights(path, save_format='tf')\n \n \n def load_model(self, path):\n try:\n self.load_weights(path)\n except:\n print(\"\\n\\nWeights not Found\\n\")\n pass\n \n \n \n def sample(self):\n sample_indx = np.random.choice(self.counter if self.counter < self.mem_size else self.mem_size, self.batch_size, replace=False)\n states = self.replay_buffer['states'][sample_indx]\n actions = self.replay_buffer['actions'][sample_indx]\n logits = self.replay_buffer['logits'][sample_indx]\n rewards = self.replay_buffer['rewards'][sample_indx]\n next_states = self.replay_buffer['next_states'][sample_indx]\n dones = self.replay_buffer['dones'][sample_indx]\n\n return states, actions, logits, rewards, next_states, dones\n\n\n\n def update(self):\n if self.counter > self.batch_size:\n \n states, actions, logits, rewards, next_states, dones = self.sample()\n\n self.critic_step(states, actions, logits, rewards, next_states, dones)\n self.actor_step(states)\n self.update_weights()\n\n\n @tf.function\n def critic_step(self, states, actions, logits, rewards, next_states, dones):\n with tf.GradientTape() as tape_1, tf.GradientTape() as tape_2:\n loss_1, loss_2 = self.agent.critic_loss(states, actions, logits, rewards, next_states, dones)\n\n grads_1 = tape_1.gradient(loss_1, self.agent.critic_net_1.trainable_variables)\n grads_2 = tape_2.gradient(loss_2, self.agent.critic_net_2.trainable_variables)\n self.critic_1_optimizer.apply_gradients(zip(grads_1, self.agent.critic_net_1.trainable_variables))\n self.critic_2_optimizer.apply_gradients(zip(grads_2, self.agent.critic_net_2.trainable_variables))\n\n\n\n @tf.function\n def actor_step(self, states):\n with tf.GradientTape() as actor_tape, tf.GradientTape() as alpha_tape:\n actor_loss, alpha_loss = self.agent.actor_loss(states)\n\n actor_grads = actor_tape.gradient(actor_loss, self.agent.actor_net.trainable_variables)\n alpha_grads = alpha_tape.gradient(alpha_loss, [self.agent.log_alpha])\n self.actor_optimizer.apply_gradients(zip(actor_grads, self.agent.actor_net.trainable_variables))\n self.alpha_optimizer.apply_gradients(zip(alpha_grads, [self.agent.log_alpha]))\n \n \n \n def update_weights(self):\n self.agent.target_critic_net_1.set_weights( list( map( lambda net, target_net: ((self.tau) * net) + ((1-self.tau) * target_net), self.agent.critic_net_1.get_weights(), self.agent.target_critic_net_1.get_weights()) ) )\n self.agent.target_critic_net_2.set_weights( list( map( lambda net, target_net: ((self.tau) * net) + ((1-self.tau) * target_net), self.agent.critic_net_2.get_weights(), self.agent.target_critic_net_2.get_weights()) ) )\n self.agent.target_actor_net.set_weights( list( map( lambda net, terget_net: ((self.tau) * net) + ((1-self.tau) * terget_net), self.agent.actor_net.get_weights(), self.agent.target_actor_net.get_weights()) ) )\n self.agent.alpha.assign(tf.exp(self.agent.log_alpha))\n \n \n ","sub_path":"trainers/sac_trainer.py","file_name":"sac_trainer.py","file_ext":"py","file_size_in_byte":5948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594243789","text":"import params as p\nfrom src.config import DB\n\n# IMPORTANT! The criteria used to choose the parameters is that there are 3 samples of +/- per subject\n# in every subepoch. There are 600 segments / subepoch.\n# 1*5*20*100 = 10000 ( N_EXAMPLES_PER_CLASS * N_CLASSES * N_SUBEPOCHS * N_SUBJECTS_TRAIN = N_SEGMENTS_TRAIN )\ndef get_params():\n params = {\n p.DATABASE: 'BraTS2017',\n p.INPUT_DIM: [192,192,160],#[64,64,64],#\n\n p.N_CLASSES: 4,\n p.N_EPOCHS: 100,\n p.N_SUBEPOCHS: None,#1,#\n p.BATCH_SIZE: 1,#10,#\n p.CLASS_WEIGHTS: 'inverse_weights',\n\n p.SAMPLING_SCHEME: 'whole',#'whole',#\n p.SAMPLING_WEIGHTS: None,\n p.N_SEGMENTS_TRAIN: None,#120,#\n p.N_SUBJECTS_TRAIN: None,#None,#\n\n p.N_SEGMENTS_VALIDATION: None,#10,#\n p.N_SUBJECTS_VALIDATION: None,#None, #\n\n p.TRAIN_SIZE: 0.6,\n p.DEV_SIZE: 0.4,\n\n p.DATA_AUGMENTATION_FLAG: False,\n p.DATA_AUGMENTATION_PLANES: None,\n\n p.NUM_MODALITIES: 4,\n\n p.LOSS: 'xentropy',\n\n p.OUTPUT_PATH: '/work/acasamitjana/segmentation/BraTS/train',\n p.MODEL_NAME: 'v_net_BN',\n p.LR: 0.0005\n }\n\n return params\n\n","sub_path":"params/BraTS/params_train.py","file_name":"params_train.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451736290","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\nclass Player():\n def __init__(self, room):\n self.room = room\n self.items = []\n\n def changeRoom(self, mov):\n if mov == 'q':\n exit()\n elif mov == 'i':\n print(\"Inventory: \")\n for item in self.items:\n print(f\"\\t{item.name} - {item.description}\")\n elif mov == 'n':\n if hasattr(self.room, 'n_to'):\n self.room = self.room.n_to\n else:\n print(\"North is not a valid direction\")\n elif mov == 'e':\n if hasattr(self.room, 'e_to'):\n self.room = self.room.e_to\n else:\n print(\"East is not a valid direction\")\n elif mov == 's':\n if hasattr(self.room, 's_to'):\n self.room = self.room.s_to\n else:\n print(\"South is not a valid direction\")\n elif mov == 'w':\n if hasattr(self.room, 'w_to'):\n self.room = self.room.w_to\n else:\n print(\"West is not a valid direction\")\n else:\n print(\"[Error] Unexpected Input:\", mov)\n print(\"Expected: n, e, s, w, i, q\")\n\n def modifyItem(self, command):\n args = command.split() # List containing take/drop and item name\n if len(args) < 2:\n print('[Error]: Unexpected Input:', command)\n else:\n modifier = args[0]\n itemname = \"\"\n # Full itemname if spaces are included eg 'Gold Coin' input\n # NOTE Needs to be fixed\n if len(args) > 2:\n for arg in args[1:]:\n itemname += f\"{arg} \"\n itemname = itemname[:-1]\n else:\n itemname = args[1]\n # Check item exists in room\n item = None\n for i in self.room.items:\n if i.name == itemname:\n item = i\n break\n if item == None:\n # Check item exists on player\n for i in self.items:\n if i.name == itemname:\n item = i\n # If item doesn't exist, print error\n if item == None:\n print(f\"[Error]: Could not find item named \\\"{itemname}\\\"\")\n return\n\n # Check modifier\n if modifier == 'take' or modifier == 'get' or modifier == 'grab':\n self.room.items.remove(item)\n self.items.append(item)\n print('You picked up the', item.name)\n elif modifier == 'drop':\n self.items.remove(item)\n self.room.items.append(item)\n print('You dropped the', item.name)\n else:\n print(f'[Error]: invalid modifier {modifier}, use take/drop.') ","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88716028","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport MySQLdb\nimport time\nimport logging\n\n#to ensure that password is not in main sources\n#prototype file is as follows\n\n'''\nexample /var/gmcs_config/astm_var.py\n#!/usr/bin/python3.7\nmy_user='uuu'\nmy_pass='ppp'\n'''\n\n'''\nif anything is redirected, last newline is added.\nTo prevent it, use following\nI needed this while outputting relevant data to a file via stdout redirection\n\necho -n `./astm_file2mysql_general.py` > x\n'''\n\n#print(dir(astm_var))\n\nsys.path.append('/var/gmcs_config')\nimport astm_var\n\n#check if import successful\n\n#Globals for configuration################\nlog=1\nmy_host='127.0.0.1'\nmy_user=astm_var.my_user\nmy_pass=astm_var.my_pass\nmy_db='cl_general'\n\ninbox='/root/astm_general.data/'\narchived='/root/astm_general.arch/'\nlog_filename='/var/log/astm_file2mysql_general.log'\n\nlogging.basicConfig(filename=log_filename,level=logging.DEBUG)\nif log==0:\n logging.disable(logging.CRITICAL)\n\n##########MYSQL##\ndef run_query(prepared_sql,data_tpl):\n con=MySQLdb.connect(my_host,my_user,my_pass,my_db)\n if(debug==1): print(con)\n if(con==None):\n if(debug==1): print(\"Can't connect to database\")\n else:\n pass\n #print('connected')\n cur=con.cursor()\n cur.execute(prepared_sql,data_tpl)\n con.commit()\n return cur\n\ndef get_single_row(cur):\n return cur.fetchone()\n\n#classes#################################\nclass astm_file(object):\n \n def __init__(self,inbox_folder,archived_folder):\n self.inbox=inbox_folder\n self.archived=archived_folder\n self.current_file=''\n self.wait_for=''\n self.previous_byte=''\n self.next_frame_number=1\t#First frame after ENQ-STX is always 1 (not 0)\n self.next_char_chksum_1=False\n self.next_char_chksum_2=False\n self.file_chksum=''\n self.checksum=0\n self.relevant_data=[]\n self.previous_char_was_checksum2=False\n \n self.s1='|'\n self.s2='`'\n self.s3='^'\n self.s4='&'\n \n self.stx=b'\\x02'\n self.etx=b'\\x03'\n self.eot=b'\\x04'\n self.enq=b'\\x05'\n self.ack=b'\\x06'\n self.lf =b'\\x0a'\n self.cr =b'\\x0d'\n self.etb=b'\\x17'\n self.text_data=b''\n \n self.sample_id=''\n self.result=() #a tuple to store one sample result\n self.final_data=() #a tuple to store all sample results of one file\n \n def get_first_file(self):\n inbox_files=os.listdir(self.inbox)\n for each_file in inbox_files:\n if(os.path.isfile(self.inbox+each_file)):\n self.current_file=each_file\n msg='File in queue is: '+self.current_file\n logging.debug(msg)\n return True\n return False #no file to read\n\n def analyse_file(self):\n fh=open(self.inbox+self.current_file,'rb')\n msg='File full path is: '+self.inbox+self.current_file\n logging.debug(msg)\n \n while True:\n data=fh.read(1)\n #logging.debug(data)\n if data==b'':\n break\n elif data==b'\\x06':\n self.manage_ack(data)\n \n elif data==b'\\x02':\n self.manage_stx(data)\n \n elif data==b'\\x0d':\n self.manage_cr(data)\n \n elif data==b'\\x0a':\n self.manage_lf(data)\n \n elif data==b'\\x17':\n self.manage_etb(data)\n two_digit_checksum_string='{chksum:X}'.format(chksum=self.checksum).zfill(2)\n logging.debug('checksum='+two_digit_checksum_string)\n self.next_char_chksum_1=True\n \n elif data==b'\\x03':\n self.manage_etx(data)\n two_digit_checksum_string='{chksum:X}'.format(chksum=self.checksum).zfill(2)\n logging.debug('checksum='+two_digit_checksum_string)\n self.next_char_chksum_1=True \n \n elif data==b'\\x05':\n self.manage_enq(data)\n \n elif data==b'\\x04':\n self.manage_eot(data)\n\n else:\n self.manage_other(data)\n \n self.previous_byte=data\n\n\n \n def manage_ack(self,data):\n logging.debug('ACK')\n\n def manage_stx(self,data):\n logging.debug('STX')\n if (self.wait_for==self.stx):\n msg='Received :STX after ENQ. Wait for Frame number'\n logging.debug(msg)\n self.wait_for=''\n self.checksum=0\n \n def manage_cr(self,data):\n logging.debug('CR')\n self.checksum=(self.checksum+ord(data))%256\n if self.previous_char_was_checksum2==False:\n self.relevant_data=self.relevant_data+[chr(ord(data))]\n \n\n\n def manage_lf(self,data):\n logging.debug('LF')\n\n def manage_etb(self,data):\n logging.debug('ETB')\n self.checksum=(self.checksum+ord(data))%256\n\n def manage_etx(self,data):\n logging.debug('ETX')\n self.checksum=(self.checksum+ord(data))%256\n\n def manage_enq(self,data):\n logging.debug('ENQ')\n self.wait_for=self.stx\n msg='Waiting for :STX '\n logging.debug(msg)\n \n def manage_eot(self,data):\n logging.debug('EOT')\n\n def manage_other(self,data):\n\n logging.debug(data)\n this_is_frame_number=False #by default, it can be local\n \n #verfy frame number\n #if it is make sure it is not part of relevant data\n if(self.previous_byte==self.stx):\n if chr(ord(data)).isnumeric()==True :\n msg='Number found, it is a frame number:'+ chr(ord(data))\n logging.debug(msg)\n if(self.next_frame_number==int(data)):\n this_is_frame_number=True\n msg='Expected frame number :'+ chr(ord(data)) + ' is correct'\n logging.debug(msg)\n self.next_frame_number=self.next_frame_number+1\n if self.next_frame_number>7 :\n self.next_frame_number=0\n else:\n #msg='Un-Expected frame number ??:'+ chr(ord(data) + ' >> Expected '+ self.next_frame_number )\n msg='Un-Expected frame number ??:{} but {} >> Expected '.format(chr(ord(data)), self.next_frame_number)\n logging.debug(msg)\n \n #verify checksum or calculate it\n if self.next_char_chksum_1 ==True:\n self.file_chksum=self.file_chksum + chr(ord(data))\n self.next_char_chksum_1=False\n self.next_char_chksum_2=True\n self.previous_char_was_checksum2=False\n \n elif self.next_char_chksum_2 ==True:\n self.file_chksum=self.file_chksum + chr(ord(data))\n self.next_char_chksum_2=False\n two_digit_checksum_string='{chksum:X}'.format(chksum=self.checksum).zfill(2) \n self.previous_char_was_checksum2=True\n \n #two_digit_file_checksum_string=''.join(self.file_chksum) \n two_digit_file_checksum_string=self.file_chksum\n \n if two_digit_file_checksum_string==two_digit_checksum_string:\n msg='Checksum matched'\n else:\n msg='Checksum not matched'+str(self.file_chksum) +'<>'+ two_digit_checksum_string\n logging.debug(msg) \n self.file_chksum='' \n else: \n self.checksum=(self.checksum+ord(data))%256\n #checksum include everything after stx(not stx) including/upto ETB/ETX \n #ETX,ETB,CR taken care of in its function\n \n self.previous_char_was_checksum2=False\n\n #include everything except STX,ETX EOT,ETB,LF etc \n # include CR in its own function (if not after checksum)\n # exclude frame numbers too \n if this_is_frame_number!=True:\n self.relevant_data=self.relevant_data+[chr(ord(data))]\n \n def send_to_mysql(self):\n #sql='insert into primary_result_blob (sample_id,examination_id,result,uniq) values (%s,%s,%s,%s) ON DUPLICATE KEY UPDATE result=%s'\n #data_tpl=(self.abx_result[30].rstrip(' '),key,self.abx_result[key],self.abx_result[26],self.abx_result[key])\n #run_query(sql,data_tpl)\n pass #useful during debugging\n \n def archive_file(self):\n os.rename(self.inbox+self.current_file,self.archived+self.current_file)\n pass #useful during debugging\n \n\n\n def mk_tuple(self):\n raw_data=''.join(m.relevant_data)\n each_line=raw_data.split('\\x0d')\n \n #last char is , so last element of tuple is empty\n for x in each_line:\n if len(x)>0:\n if x[0]=='H':\n self.on_header(x)\n elif x[0]=='P':\n self.on_patient(x)\n elif x[0]=='O':\n self.on_order(x)\n elif x[0]=='R':\n self.on_result(x)\n elif x[0]=='L':\n self.on_termination(x)\n else:\n self.on_any_line(x)\n \n #print(self.final_data)\n for each_sample in self.final_data:\n msg='{}\\t{}'.format(each_sample[0],each_sample[1])\n logging.debug(msg)\n\n \n def on_any_line(self,any_line):\n #print(any_line)\n temp=any_line.split(self.s1)\n logging.debug(temp)\n return temp \n \n def on_header(self,header_line):\n self.s1=header_line[1]\n self.s2=header_line[2]\n self.s3=header_line[3]\n self.s4=header_line[4]\n header_tuple=self.on_any_line(header_line)\n\n def on_patient(self,patient_line):\n #Manage previous patient (here and last patient on getting termination record\n if len(str(self.sample_id))>0:\n self.final_data=self.final_data + ((self.sample_id,self.result),)\n \n patient_tuple=self.on_any_line(patient_line)\n #initialize\n pstr='New Patient number:{pn} arrived. Initializing...'.format(pn=patient_tuple[1])\n logging.debug(pstr)\n pstr='Previous Sample Id:({psid}) ...'.format(psid=self.sample_id)\n logging.debug(pstr)\n self.sample_id=''\n pstr='Sample Id:({sid}) after initialization'.format(sid=self.sample_id)\n logging.debug(pstr) \n\n pstr='Previous Results:({res}) ...'.format(res=self.result)\n logging.debug(pstr)\n self.result=()\n pstr='Results:({res}) after initialization'.format(res=self.result)\n logging.debug(pstr)\n\n def on_order(self,order_line):\n order_tuple=self.on_any_line(order_line)\n #initialize\n self.sample_id=order_tuple[2]\n pstr='New Sample Id:({sid})'.format(sid=self.sample_id)\n logging.debug(pstr)\n\n def on_result(self,result_line):\n result_tuple=self.on_any_line(result_line)\n #initialize\n examination_id_tuple=result_tuple[2].split(self.s3)\n examination_id=examination_id_tuple[3]\n self.result=self.result + ((examination_id,result_tuple[3],result_tuple[4]),)\n pstr='New Result:({res})'.format(res=self.result)\n logging.debug(pstr)\n\n def on_termination(self,termination_line):\n termination_tuple=self.on_any_line(termination_line)\n #update final data on recept of new patient and at last on termination\n if len(str(self.sample_id))>0:\n self.final_data=self.final_data + ((self.sample_id,self.result),)\n\n#Main Code###############################\nif __name__=='__main__':\n #print('__name__ is ',__name__,',so running code')\n while True:\n m=astm_file(inbox,archived)\n if(m.get_first_file()):\n m.analyse_file()\n m.mk_tuple()\n m.send_to_mysql()\n m.archive_file()\n time.sleep(1)\n #break; #useful during debugging\n \n \n","sub_path":"misc/astm_file2mysql_general-2020-04-07.py","file_name":"astm_file2mysql_general-2020-04-07.py","file_ext":"py","file_size_in_byte":10694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44432374","text":"import pygame, random\n\nclass Map:\n X_STEP = 40\n Y_STEP = 5\n MIDDLE_RATE = 0.65\n HIGH_RATE = 0.9\n LOW_RATE = 0.4\n LINE_COLOR = (0, 125, 0)\n POLYGON_COLOR = (34, 139, 34)\n\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.createXYList()\n self.memorizeY()\n\n def createXYList(self):\n self.xyList = []\n middle = self.height * self.MIDDLE_RATE\n high = self.height * self.HIGH_RATE\n low = self.height * self.LOW_RATE\n prev = middle\n for i in range(0, self.width + self.X_STEP, self.X_STEP):\n y = -1\n while y < low or y > high:\n r = random.randint(-10, 10) * self.Y_STEP\n y = prev + r\n prev = y\n self.xyList.append((i, int(y)))\n\n def memorizeY(self):\n self.yList = []\n for i in range(0, self.width):\n self.yList.append(self.calculateY(i))\n\n def calculateY(self, x):\n x1 = int(x / self.X_STEP)\n xy1 = self.xyList[x1]\n xy2 = self.xyList[x1+1]\n m = float(xy2[1] - xy1[1]) / (xy2[0] - xy1[0])\n y = m * (x - xy1[0]) + xy1[1]\n return int(y)\n\n def getXYList(self):\n return self.xyList\n\n def getY(self, x):\n return self.yList[x]\n\n def draw(self, screen):\n xyList = self.xyList\n prev_xy = xyList[0]\n for xy in xyList:\n pygame.draw.line(screen, self.LINE_COLOR, prev_xy, xy, 5)\n pygame.draw.polygon(screen, self.POLYGON_COLOR, [prev_xy, xy, (xy[0] ,self.height), (prev_xy[0], self.height)])\n prev_xy = xy\n","sub_path":"gamemap.py","file_name":"gamemap.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"242760568","text":"# -*- coding: UTF-8 -*-\n\nimport requests\nimport json\n'''\nurl = \"http://test\"\nheaders= {\"content-type\":\"application/x-www-form-urlencoded;charset=utf-8\"}\ndata = {\"Id\":\"1\",\n \"Nums\":json.dumps([{\"SpecID\":\"1\",\"Name\":\"水果\"}, {\"SpecID\":\"1\",\"Name\":\"蔬菜\"}])}\nr = requests.post(url, headers=headers, data=data)\n\nprint(json.dumps([{\"SpecID\":\"1\",\"Name\":\"水果\"}, {\"SpecID\":\"1\",\"Name\":\"蔬菜\"}]))\n'''\n\nurl = \"http://192.168.200.241:8050/api/1.0/Rule/addRule\"\n#headers= {\"content-type\":\"application/x-www-form-urlencoded;charset=utf-8\"}\nheaders = {\"content-type\":\"application/json;charset=utf-8\"}\ndata = {\"outport_groupid\":\"10\",\n \"action\":\"1\",\n \"save\":\"1\",\n \"hitstats\":\"1\",\n \"match_mode\":\"0\",\n \"direction\":\"0\",\n \"isp\":\"1\",\n \"type\":\"5\",\n \"control_start_time\":\"2020-01-01 09:52:15\",\n \"control_end_time\":\"2020-04-10 14:00:00\",\n \"msg_type\":\"6\",\n \"msg_type_name\":\"协议和应用规则\",\n \"userId\":\"2\",\n \"userName\":\"test\",\n \"ruleDetailList\":json.dumps([{\"rule_cond\":\"0\",\"rule_code\":\"5\",\"app_id\":\"1030036\"}])}\nr = requests.post(url, headers=headers, data=data)","sub_path":"python/Project/TProject/R2server/testPostJson.py","file_name":"testPostJson.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"325525369","text":"import asyncio\r\nimport aiohttp\r\nfrom colorama import Fore, init\r\nfrom prettytable import PrettyTable\r\n\r\nfrom fpl import FPL\r\n\r\n\r\nasync def main():\r\n async with aiohttp.ClientSession() as session:\r\n fpl = FPL(session)\r\n fdr = await fpl.FDR()\r\n\r\n fdr_table = PrettyTable()\r\n fdr_table.field_names = [\r\n \"Team\",\r\n \"All (H)\",\r\n \"All (A)\",\r\n \"GK (H)\",\r\n \"GK (A)\",\r\n \"DEF (H)\",\r\n \"DEF (A)\",\r\n \"MID (H)\",\r\n \"MID (A)\",\r\n \"FWD (H)\",\r\n \"FWD (A)\",\r\n ]\r\n\r\n for team, positions in fdr.items():\r\n row = [team]\r\n for difficulties in positions.values():\r\n for location in [\"H\", \"A\"]:\r\n if difficulties[location] == 5.0:\r\n row.append(Fore.RED + \"5.0\" + Fore.RESET)\r\n elif difficulties[location] == 1.0:\r\n row.append(Fore.GREEN + \"1.0\" + Fore.RESET)\r\n else:\r\n row.append(f\"{difficulties[location]:.2f}\")\r\n\r\n fdr_table.add_row(row)\r\n\r\n fdr_table.align[\"Team\"] = \"l\"\r\n print(fdr_table)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n loop = asyncio.get_event_loop()\r\n loop.run_until_complete(main())\r\n","sub_path":"fdr.py","file_name":"fdr.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302587575","text":"import pygame, random\npygame.init()\n\nclass Game:\n\n def __init__(self):\n self.display_width = 600\n self.display_height = 800\n\n self.bg = pygame.image.load('tlo2.png')\n self.gameDisplay = pygame.display.set_mode((self.display_width,self.display_height))\n self.ibg = pygame.image.load('intro_bg.png')\n self.igameDisplay = pygame.display.set_mode((self.display_width, self.display_height))\n self.ebg = pygame.image.load('end_bg.png')\n self.egameDisplay = pygame.display.set_mode((self.display_width, self.display_height))\n\n pygame.display.set_caption(\"Whack a mole\")\n\n self.rab_width = 41\n self.rab_height = 50\n self.rabbit = pygame.image.load('rab.png')\n\n self.hole_position = ((130,400),(430,400),(280,450),(80,500),(480,500),(280,550),(130,600),(430,600))\n\n self.score = 0\n self.counter = 0\n self.speed = 1000\n self.start_time = 0\n self.play_time = 60\n\n self.intro = True\n self.run = True\n self.end = True\n\n def update_score(self):\n\n self.font = pygame.font.SysFont(None,25)\n self.text_score = self.font.render(\"Score:\" + str(self.score),True,(100,100,100))\n self.text_time = self.font.render(\"Time: \" + str(self.timer), True,(100,100,200))\n self.gameDisplay.blit(self.text_score,(50,50))\n self.gameDisplay.blit(self.text_time, (50, 25))\n\n def redrawDisplay(self):\n\n self.gameDisplay.blit(self.bg,(0,0))\n self.gameDisplay.blit(self.rabbit, (self.rand_xy))\n self.update_score()\n pygame.display.update()\n\n def text_objects(self):\n\n self.textSurface = self.font.render(self.text_score, True, (0,0,0))\n return self.textSurface, self.textSurface.get_rect()\n\n def time(self):\n\n self.timer = pygame.time.get_ticks() - self.start_time - 2000\n self.timer = self.timer/1000\n self.timer = int(self.play_time - self.timer)\n\n def game_loop(self):\n\n self.gameDisplay.blit(self.bg, (0, 0))\n self.rand_xy = random.choice(self.hole_position)\n self.start_time = pygame.time.get_ticks()\n\n while self.run:\n pygame.time.delay(1)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.run = False\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.run = False\n self.end_screen()\n if self.timer < 1:\n self.run = False\n self.end_screen()\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.mx, self.my = pygame.mouse.get_pos()\n if (self.rand_xy[0] + self.rab_width > self.mx > self.rand_xy[0]) and (self.rand_xy[1] + self.rab_height > self.my > self.rand_xy[0]):\n self.score += 10\n else:\n self.score -= 5\n\n self.counter += 1\n self.time()\n\n if self.score < 50:\n self.speed = 1000\n elif 50 <= self.score < 150:\n self.speed = 900\n elif 150 <= self.score < 300:\n self.speed = 850\n else:\n self.speed = 750\n\n if self.counter == self.speed:\n self.rand_xy = random.choice(self.hole_position)\n self.redrawDisplay()\n self.counter = 0\n\n def game_intro(self):\n\n self.gameDisplay.blit(self.ibg, (0, 0))\n pygame.display.update()\n\n while self.intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.intro = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n self.game_loop()\n\n def end_screen(self):\n\n self.gameDisplay.blit(self.ebg, (0, 0))\n self.font = pygame.font.SysFont(None, 75)\n self.text_score = self.font.render(\"Score: \" + str(self.score), True, (55, 97, 38))\n self.gameDisplay.blit(self.text_score, (185, 400))\n pygame.display.update()\n\n self.end = True\n while self.end:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.end = False\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n self.run = True\n self.score = 0\n self.game_loop()\n\nmy_game = Game()\nmy_game.game_intro()\npygame.quit()\n","sub_path":"whack_arab.py","file_name":"whack_arab.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30459258","text":"#!/usr/bin/env python3\n\n# Emma Gloekler gloekler@seattle\n\n\ndef inputToFile(fileName):\n \n f = open(fileName, 'w')\n i = ''\n while i != 'DONE': \n i = input(str('write something'))\n f.write(str(i) + '/n')\n f.close()\n print('finished')\n\ninputToFile('teest.py')\n","sub_path":"Week_5/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524691811","text":"__author__ = 'Russell J. Boag'\nimport pandas as pd\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plot\nfrom random import uniform\ntarget_url = (\"https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data\")\n\n# read data in pandas data frame\nrocksVmines = pd.read_csv(target_url, header=None, prefix=\"V\")\n\n# change target labels to numeric values\ntarget = []\nfor i in range(208):\n # assign 0 or 1 target values based on \"M\" or \"R\" labels\n if rocksVmines.iat[i,60] == \"M\":\n target.append(1.0)\n else:\n target.append(0.0)\n\n# plot 35th attribute\ndataRow = rocksVmines.iloc[0:208,35]\nplot.scatter(dataRow, target)\n\nplot.xlabel(\"Attribute Value\")\nplot.ylabel(\"Target Value\")\nplot.show()\n\n# same plot with points dithered for better readability\ntarget = []\nfor i in range(208):\n # assign 0 or 1 target values based on \"M\" or \"R\" labels\n # and add dither\n if rocksVmines.iat[i,60] == \"M\":\n target.append(1.0 + uniform(-0.1, 0.1))\n else:\n target.append(0.0 + uniform(-0.1, 0.1))\n\n# plot 35th attribute with semi-opaque points\ndataRow = rocksVmines.iloc[0:208,35]\nplot.scatter(dataRow, target, alpha=0.5, s=120)\n\nplot.xlabel(\"Attribute Value\")\nplot.ylabel(\"Target Value\")\nplot.show()\n","sub_path":"ml.py/2.8.targetCorr.py","file_name":"2.8.targetCorr.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394346188","text":"def blackjack(num_of_human_players, num_of_computer_players, ante, num_of_decks):\n '''\n It might be easier to use num_of_players = num_of_human_players + num_of_computer_players + 1\n '''\n num_of_players = num_of_human_players + num_of_computer_players\n # num_of_players + 1 because the dealer doesn't count as a player\n players = [[] for i in range(num_of_players + 1)]\n # every player and the dealer will be a list within players\n # the dealer is the last list since the dealer is the last to go during gameplay\n\n # visible_cards is for the AI to use to make decisions\n visible_cards = []\n\n # to keep track of the cards that have been generated\n deck = []\n\n # to keep track of players who have last\n busted_index = []\n # Dealing\n\n from DeckGenerator import DeckGenerator\n\n # This is only to create face down cards\n # num_of_players + 1 to include the dealer\n while len(deck) < (num_of_players + 1):\n\n # for loop so that each player and the dealer gets one face down card\n for i in range(len(players)):\n new_card = DeckGenerator(num_of_decks)\n # if the new card does not exist, gets added to deck and dealt to player\n if new_card not in deck:\n deck.append(new_card)\n players[i].append(new_card)\n # if the new card does exist, a new card is generated until a card that does not exist is generated\n else:\n while new_card in deck:\n new_card = DeckGenerator(num_of_decks)\n\n # the generated card is then added to the deck and dealt to the player\n deck.append(new_card)\n players[i].append(new_card)\n\n # Create visible cards\n # num_of_players + 1 to include dealer\n while len(visible_cards) < (num_of_players + 1):\n\n # for loop so that each player and the dealer gets one face up card\n for i in range(len(players)):\n new_card = DeckGenerator(num_of_decks)\n\n # if the new card does not exist, gets added to deck, dealt to the player, and added to visible_cards\n if new_card not in deck:\n deck.append(new_card)\n visible_cards.append(new_card)\n players[i].append(new_card)\n\n # if the new card does exist, a new card is generated until a card that does not exist is generated\n else:\n while new_card in deck:\n new_card = DeckGenerator(num_of_decks)\n\n # the generated card is then added to deck, dealt to the player, and added to visible_cards\n deck.append(new_card)\n players[i].append(new_card)\n visible_cards.append(new_card)\n\n # The start of check for blackjack\n\n # Convert the numbers to actual playing cards\n from NumberConvertor import NumberConvertor\n player_hands = NumberConvertor(players, num_of_decks)\n # player_hands now contains every hand as a list of string values with the names of playing cards\n\n # Convert the string values into the integer values that the playing cards represent\n from cardconvertor import handconvertor\n from cardconvertor import cardconvertor\n hand_value = handconvertor(player_hands)\n # hand_value now contains a list with the numeric value of each player's hand\n\n # list to know which player, if any, has a blackjack\n index_blackjack = []\n\n # for loop to check each player's hand or blackjack\n for i in range(len(hand_value)):\n if hand_value[i][0] == 21:\n\n # when i == len(hand_value), the dealer's hand is being checked for blackjack\n if i == len(hand_value):\n # if the dealer has blackjack, the game has ended with the house winning\n return 'Dealer says, \"Blackjack.\" All players lost.'\n # report who has a blackjack\n else:\n print('Player ' + str(i + 1) + ' has a blackjack.')\n\n # save which players got a blackjack\n index_blackjack.append(i)\n\n # Player input?\n # the initial players are all human players so num_of_human_players\n for i in range(num_of_human_players):\n\n # skip human players who have blackjack\n if i in index_blackjack:\n continue\n\n # the actual player input part\n else:\n answer = ''\n\n # Turns the numbers in the player's hand into the actual cards\n hand = NumberConvertor(players[i], num_of_decks)\n\n # Whose turn it is\n print('Player', str(i + 1) + \"'s turn\")\n\n # Player's hand\n print('Hand:', hand, '\\n')\n\n # Repeats question until player chooses to stay\n while answer != ('2' or 'stay'):\n\n # Asks for player's decision\n answer = input(\"Would you like to:\"\n \"\\n\" \"1. Hit, or\"\n \"\\n\" \"2. Stay?\"\n \"\\n\")\n\n # Ensures the input is all lowercase\n if type(answer) == str:\n answer = answer.lower()\n else:\n continue\n\n # For Hit\n if answer == '1' or answer == 'hit':\n # hand_value will be used to check for if the player's new card causes a bust\n hand_value = 0\n\n # generate the new card\n new_card = DeckGenerator(num_of_decks)\n\n # add new card to player's hand, deck, and visible card\n if new_card not in deck:\n players[i].append(new_card)\n deck.append(new_card)\n visible_cards.append(new_card)\n hand = NumberConvertor(players[i], num_of_decks)\n\n # prints player's hand so they can decide whether or not to take another card\n print(hand)\n\n else:\n # generate new card until a card that is not in the deck is created\n while new_card in deck:\n new_card = DeckGenerator(num_of_decks)\n\n # add new card to player's hand, deck, and visible card\n players[i].append(new_card)\n deck.append(new_card)\n visible_cards.append(new_card)\n hand = NumberConvertor(players[i], num_of_decks)\n\n # print player's hand so they can decide whether or not to take another card\n print(hand)\n\n # Check for bust\n # Ace conversion from 11 to 1 is not working as intended\n # (Doesn't convert once, can't handle two aces)\n\n # j because i is currently in use to ask each human player whether or not they want a new card\n for j in range(len(hand)):\n\n # take only the face value of the card\n # 'Ace of Spades' becomes 'Ace'\n card = hand[j].split(' ', )[0]\n\n # 'Ace' becomes its numeric value (11)\n card = cardconvertor(card)\n\n # Calculate the new hand value once another card is converted\n hand_value = hand_value + card\n\n # Check for bust\n if hand_value > 21:\n card_sans_suit = []\n\n # k because j is in use\n for k in range(len(hand)):\n card_sans_suit.append(hand[k].split(' ',)[0])\n\n get_around_cant_assign_to_literal = 'Ace'\n for get_around_cant_assign_to_literal in card_sans_suit:\n hand_value = hand_value + (1 - 11)\n # The below might not be usable with a while statement\n # Need to reset it but only when there is a new ace?\n ace_conversion = True\n else:\n continue\n else:\n continue\n if hand_value > 21:\n print('Player', str(i + 1), 'has busted.', '\\n')\n busted_index.append(i)\n break\n else:\n continue\n # For Stay\n elif answer == '2' or answer == 'stay':\n print('\\n')\n break\n\n # For if the human player does not give a proper response\n else:\n print('\\n', 'That was not a valid response.', '\\n')\n\n # Computer player deciding whether or not to hit or stay\n\n\n # Final conversion from numbers to playing cards to reveal all hands\n player_hands = NumberConvertor(players, num_of_decks)\n for i in range(len(player_hands)):\n if i in busted_index:\n continue\n elif i == num_of_players:\n print('Dealer\\'s hand: ', str(player_hands[i]))\n else:\n print('Player', str(i + 1) + '\\'s', 'hand: ', str(player_hands[i]))\n # IDEA; use list.pop() to generate dealer hand then not_dealer will be the remaining list\n\n not_dealer = player_hands[:(len(player_hands) - 1)]\n dealer = player_hands[:num_of_players]\n if all(dealer > i for i in not_dealer):\n return 'Dealer won.'\n return 'Game ended'\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":9714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"569226668","text":"# -*- coding: utf-8 -*-\nfrom tkinter import *\nfrom tkinter.font import *\nfrom tkinter import ttk\nfrom urllib.request import urlopen,Request\nfrom urllib.parse import quote_plus\nfrom xml.etree import ElementTree\n\nimport cplus\n\nh = 4\nv = ['1호선','2호선','3호선','4호선','5호선']\ndata = []\ncanvas = None\np = 0\n\ndef initGraph(frame):\n global canvas\n canvas = Canvas(frame,height=500, bg='white')\n canvas.pack(fill='x')\n #bframe = Frame(frame,bg='black')\n #bframe.pack( fill='x')\n #line = ttk.Combobox(bframe, values=v,state='readonly')\n #line.current(0)\n #line.grid(row=0, column=0, padx=50)\n #Button(bframe, text=\" < \", command=prebutton).grid(row=0, column=1)\n #Button(bframe, text=\" > \", command=nextbutton).grid(row=0, column=2)\n\n\n\n\ndef getCard(line='4호선',station='정왕역'):\n global data\n key = ''\n url = 'http://openAPI.seoul.go.kr:8088/' +key +'/xml/CardSubwayTime/' + '1/5/' + '201901/' + quote_plus(line+'/'+station) + '/'\n\n print(url)\n req = Request(url)\n req.get_method = lambda: 'GET'\n res_body = urlopen(req).read()\n print('카드 얻어오기')\n root = ElementTree.fromstring(res_body)\n dataTree = root.find(\"row\")\n\n data = []\n list = []\n if root.findtext(\"CODE\") != 'INFO-000' and root.findtext('CODE') != None:\n print(root.findtext(\"CODE\"))\n print(\"데이터 없음\")\n data = []\n return\n\n for i in dataTree.iter():\n list.append(i.text)\n #print(list)\n for i in range(4,51,2):\n data.append(eval(list[i])+eval(list[i+1]))\n print(data)\n\n\ndef prebutton():\n global h\n h = max(h-1, 4)\n\ndef nextbutton():\n global h\n h += 1\n\ndef drowGraph():\n global p\n if p == 100:\n p=1\n\n mx = 0\n tp = 50\n bt = 400 +tp\n lt = 100\n\n canvas.delete('all')\n\n if len(data) != 0:\n mx = max(data)\n canvas.create_text(lt-30,bt-380, text=str(mx))\n\n canvas.delete('d')\n per = p/100\n for i in range(23):\n x = i*21 + lt + 30\n if mx != 0:\n y = bt - cplus.calc(data[i], 380, per, mx)\n y2 = bt - cplus.calc(data[i+1], 380, per, mx)\n canvas.create_line(x, y, x+21, y2, width=3, fill='SeaGreen3', tag='d')\n canvas.create_text(x, bt+10, text=str((i+4) % 25), tag='d')\n\n canvas.create_line(lt, tp, lt, bt, lt + 520, bt, width=7, arrow='both')\n p += 1\n if p < 100:\n canvas.after(10, drowGraph)\n\n\ndef UpdateGraph(line, station):\n getCard(line, station)\n drowGraph()","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"340711274","text":"def get_query(start_time,end_time,entity):\n\n\tif entity == \"user\":\n\t\treturn get_user_activity_query(start_time,end_time)\n\telif entity == \"dashboard\":\n\t\treturn get_dashboard_query(start_time,end_time)\n\telif entity == \"groupBy\" :\n\t\treturn get_group_query(start_time,end_time)\n\telif entity == \"filter\" :\n\t\treturn get_filter_query(start_time,end_time)\n\telif entity == \"login\" :\n\t\treturn get_login_query(start_time,end_time)\t\t\t\t\n\ndef get_filter_query(start_time,end_time):\n\tquery =[\n\t{\"$match\":{\"message\":\"activate\",\"client\":\"nezu\",\"client\":\"nezu\",\"logTime\":{\"$gte\": start_time, \"$lt\": end_time}}},\n\t{\"$unwind\":\"$masterFilter\"},\n\t{\"$group\":{\"_id\":{\"filterValue\":\"$masterFilter.values\"},\"filterCount\":{\"$sum\":1}}},\n\t{\"$project\":{\"entity\":\"$_id.filterValue\",\"score\":\"$filterCount\",\"_id\":False}},\n\t{\"$sort\":{\"score\":-1}}]\n\n\treturn query\n\ndef get_login_query(start_time,end_time):\n\tquery =[\n\t{\"$match\":{\"message\":\"activate\",\"client\":\"nezu\",\"logTime\":{\"$gte\": start_time, \"$lt\": end_time}}},\n\t{\"$group\":{\"_id\":{\"user\":\"$user\"},\"sessions\":{\"$addToSet\":\"$session\"}}},\n\t{\"$project\":{\"_id\":False,\"entity\":\"$_id.user\",\"score\":{\"$size\":\"$sessions\"}}},\n\t{\"$sort\":{\"score\":-1}}\n\t]\n\n\treturn query\n\ndef get_dashboard_query(start_time,end_time):\n\tquery = [\n\t{\"$match\":{\"message\":\"activate\",\"client\":\"nezu\",\"logTime\":{\"$gte\": start_time, \"$lt\": end_time}}},\n\t{\"$unwind\":\"$widgets\"},\n\t{\"$group\":{\"_id\":{\"widgets\":\"$widgets\"},\"widgetCount\":{\"$sum\":1}}},\n\t{\"$project\":{\"entity\":\"$_id.widgets\",\"score\":\"$widgetCount\",\"_id\":False}},\n\t{\"$sort\":{\"score\":-1}}\n\t]\n\treturn query\n\ndef get_group_query(start_time,end_time):\n\tquery = [\n\t{\"$match\":{\"message\":\"activate\",\"client\":\"nezu\",\"logTime\":{\"$gte\": start_time, \"$lt\": end_time}}},\n\t{\"$unwind\":\"$groups\"},\n\t{\"$group\":{\"_id\":{\"groupBy\":\"$groups\"},\"groupByCount\":{\"$sum\":1}}},\n\t{\"$project\":{\"entity\":\"$_id.groupBy\",\"score\":\"$groupByCount\",\"_id\":False}},\n\t{\"$sort\":{\"score\":-1}}\n\t]\n\n\treturn query\t\n\ndef get_user_activity_query(start_time,end_time):\n\n\tquery = [\n\t{\"$match\":{\"message\":\"activate\",\"client\":\"nezu\",\"logTime\":{\"$gte\": start_time, \"$lt\": end_time}}},\n\t{\"$group\":{\"_id\":{\"user\":\"$user\"},\"activateCount\":{\"$sum\":1}}},\n\t{\"$project\":{\"_id\":False,\"score\":\"$activateCount\",\"entity\":\"$_id.user\"}},\n\t{\"$sort\":{\"score\":-1}}\n\t]\n\t\n\treturn query\t\n\n","sub_path":"mongo_aggregation_factory.py","file_name":"mongo_aggregation_factory.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522012421","text":"class Node:\n def __init__(self, value=None, next=None):\n self.value = value\n self.next = next\n\nclass Graph:\n def __init__(self, vertices):\n self.V = vertices\n self.graph = [None] * self.V\n\n def add_edge(self, src, dest):\n src_node = Node(src)\n dest_node = Node(dest)\n # assign src_node's next to whatever is first at dest, vice versa\n src_node.next = self.graph[dest]\n dest_node.next = self.graph[src]\n\n # Place dest_node at source and src_node at dest\n self.graph[src] = dest_node\n self.graph[dest] = src_node\n\n def get_nodes(self):\n return self.graph\n\n def print_graph(self):\n for i in range(self.V):\n print(f\"Adjacency list of vertex {i} \\nhead\", end=\"\")\n temp = self.graph[i]\n while temp != None:\n print(f\" -> {temp.value}\", end=\"\")\n temp = temp.next\n print(\"\\n\")\n\n def get_adjacents(self, node):\n adjacents = []\n while node:\n adjacents.append(node)\n node = node.next\n return adjacents\n\n\nif __name__ == \"__main__\":\n V = 5\n graph = Graph(V)\n graph.add_edge(0, 1)\n graph.add_edge(0, 4)\n graph.add_edge(1, 2)\n graph.add_edge(1, 3)\n graph.add_edge(1, 4)\n graph.add_edge(2, 3)\n graph.add_edge(3, 4)\n # Print Adjacency list\n graph.print_graph()\n print(graph.get_nodes()[-1].value)\n adjs = graph.get_adjacents(graph.get_nodes()[-1])\n for n in adjs:\n print(f\"value: {n.value}\")\n # print(graph.get_adjacents(graph.get_nodes()[-1]))\n # print(graph.get_adjacents(graph.get_nodes()[-1]))\n","sub_path":"trees_graphs/simple_graph.py","file_name":"simple_graph.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206436323","text":"import math\n\ndef main():\n points = [[0, 0, 0], [9, 9, 9], [-1, 0, 3], [-1, -1, -1], [4, 1, 1], [4, 1, 2], [4, 1, 0],\n [2, 0.5, 9], [3.5, 2, -1], [3, 1.5, 3], [-1.5, 4, 2],\n [5.5, 4, -0.5]]\n\n # p1 and p2 are the indices in the points list\n p1 = 0\n p2 = 1 \n list1 = [[p1, p2]]\n shortestDistances = distance(\n points[p1][0], points[p1][1], points[p1][2],\n points[p2][0], points[p2][1], points[p2][2]) # Initialize shortestDistances\n\n # Compute distance for every two points\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n currentDistance = distance(\n points[i][0], points[i][1], points[i][2],\n points[j][0], points[j][1], points[j][2])\n\n if shortestDistances > currentDistance:\n list1 = [[i, j]]\n shortestDistances = currentDistance # Update shortestDistances\n elif shortestDistances == currentDistance:\n list1.append([i, j])\n\n # Display result\n for pair in list1:\n print(\"The closest two points are \" +\n \"(\" + str(points[pair[0]]) + \", \" + str(points[pair[1]]) + \")\")\n\ndef distance(x1, y1, z1, x2, y2, z2):\n return math.sqrt((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1))\n\nmain()","sub_path":"evennumberedexercise/Exercise11_8.py","file_name":"Exercise11_8.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"654365004","text":"#import re\r\nimport string\r\n\r\nim = open('004.txt')\r\nir = im.read()\r\no = type(ir)\r\nprint(o)\r\n#e.findall()\r\n# a = 0\r\n# for i in range(len(ir)):\r\n# if i in string.ascii_letters:\r\n# a += 1\r\n# print(a)","sub_path":"004.py","file_name":"004.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26111200","text":"import sqlite3\r\nimport re\r\n\r\n# Hongru Qi\r\n# search rides by up to 3 keywords\r\ndef searchRide(conn, email):\r\n print(\"Search location by keywords\")\r\n c = conn.cursor()\r\n keyword = input(\"Please enter up to 3 location keywords (seperated by space) or (quit) to quit: \")\r\n # quit\r\n if keyword == \"(quit)\":\r\n return False\r\n keywords = keyword.split()\r\n findRides = \"\"\r\n # counter of keywords\r\n tmp1 = 0\r\n # more than one keyword\r\n if len(keywords) > 1:\r\n for keyword in keywords:\r\n findRides += \"select * from (\"\r\n # get the lcodes for each keyword\r\n lcodes = locationSearch(conn, keyword)\r\n lcodes = [x for subtuples in lcodes for x in subtuples]\r\n # counter of lcode of each keyword\r\n tmp = 0\r\n # get the union of rides which satisfies any one of the lcode that the keyword matches\r\n for lcode in lcodes:\r\n if tmp < len(lcodes) - 1:\r\n findRides += (rideSearch(lcode) + \" union \")\r\n else:\r\n findRides += rideSearch(lcode)\r\n tmp += 1\r\n # get the intersection of each union (result matches all three keywords)\r\n if tmp1 < len(keywords) - 1:\r\n findRides += \") intersect \"\r\n else:\r\n findRides += \")\"\r\n tmp1 += 1\r\n # only one keyword\r\n else:\r\n # get the lcodes\r\n lcodes = locationSearch(conn, keywords[0])\r\n lcodes = [x for subtuples in lcodes for x in subtuples]\r\n # counter of lcode\r\n tmp = 0\r\n for lcode in lcodes:\r\n if tmp < len(lcodes) - 1:\r\n findRides += (rideSearch(lcode) + \" union \")\r\n else:\r\n findRides += rideSearch(lcode) + \";\"\r\n tmp += 1\r\n c.execute(findRides)\r\n rides = c.fetchall()\r\n # let the user to make selection to send message\r\n while True:\r\n selection = displayAndSelect(rides)\r\n if selection is True:\r\n break\r\n if selection is \"\":\r\n break\r\n sendMsg(conn, selection, email)\r\n ano = input(\"Message sent, enter y to send another message or enter any key to quit: \")\r\n if ano == \"y\":\r\n break\r\n\r\n# main operational function\r\ndef mainOp(conn, email):\r\n while True:\r\n result = searchRide(conn, email)\r\n if result is False:\r\n break\r\n\r\n# construct the search command\r\ndef rideSearch(keyword):\r\n findRide = '''\r\n select distinct(r.rno), r.price, r.rdate, r.seats, r.lugDesc, r.src, r.dst, r.driver, r.cno, t1.make, t1.model, t1.year, t1.seats, t1.owner\r\n from rides r, enroute e\r\n left outer join (select c.cno, c.make, c.model, c.year, c.seats, c.owner\r\n from cars c, rides r1, enroute e1 where c.cno = r1.cno\r\n and r1.cno is not null and (r1.dst = '{0}' COLLATE NOCASE or r1.src = '{0}' COLLATE NOCASE or (r1.rno = e1.rno and e1.lcode= '{0}' COLLATE NOCASE))) t1 on t1.cno = r.cno\r\n where r.dst = '{0}' COLLATE NOCASE or r.src = '{0}' COLLATE NOCASE or (r.rno = e.rno and e.lcode= '{0}' COLLATE NOCASE)\r\n '''.format(keyword)\r\n\r\n return findRide\r\n\r\n# search the lcodes by keyword\r\ndef locationSearch(conn, keyword):\r\n #global conn, cur\r\n cur = conn.cursor()\r\n #find\r\n findLoc = '''\r\n SELECT lcode\r\n FROM locations\r\n WHERE lcode = '{0}' COLLATE NOCASE\r\n OR city like '%{0}%' COLLATE NOCASE\r\n OR prov like '%{0}%' COLLATE NOCASE\r\n OR address like '%{0}%'\r\n COLLATE NOCASE;\r\n '''.format(keyword)\r\n cur.execute(findLoc)\r\n\r\n #get all the matches and return\r\n return cur.fetchall()\r\n\r\n# show the seached rides and let the user to make selection\r\ndef displayAndSelect(results):\r\n if len(results) == 0:\r\n print('no results found')\r\n return ''\r\n #print title\r\n print(\"ride no | price | ride date | seats | luggage Description | source | destination | driver | car no | car make | car model | year of car | seats of car | car owner\")\r\n for i in range(0, len(results), 5):\r\n # more than 5 results, only print 5\r\n if len(results) <= i+5:\r\n for j in range(i, len(results)):\r\n print(results[j])\r\n while 1: #promtinput\r\n print(\"To contact the ride publisher select one to send message.\")\r\n selection = input('select options: 1-{0} or ''q'' to quit:'.format(len(results)-i))\r\n if selection == 'q':\r\n return True\r\n if re.match('^[1-{0}]$'.format(len(results)-i), selection):\r\n break\r\n print('invalid selection')\r\n # less than 5 results, print all\r\n else:\r\n for j in range(i, i+5):\r\n print(results[j])\r\n while 1:\r\n print(\"To contact the ride publisher select one to send message.\")\r\n selection = input(' select options: 1-5, ''y'' to view more, ''q'' to quit:')\r\n if selection == 'q':\r\n return True\r\n if re.match('^[1-5y]$', selection):\r\n break\r\n print('invalid selection')\r\n if selection == 'y':\r\n continue\r\n else: break\r\n\r\n return results[i+int(selection)-1]\r\n\r\n# send message to the selected driver\r\ndef sendMsg(conn, selection, email):\r\n c = conn.cursor()\r\n msg = input(\"Please enter your messagesg or (quit) to go to the previous page: \")\r\n # quit\r\n if msg == \"(quit)\":\r\n return\r\n c.execute('INSERT into inbox values (?, datetime(\"now\", \"localtime\"), ?, ?, ?, ?);', (selection[7], email, msg, selection[0], 'n'))\r\n conn.commit()\r\n","sub_path":"searchRides.py","file_name":"searchRides.py","file_ext":"py","file_size_in_byte":5907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"542560682","text":"def fib_mod(n, m):\n a = 0\n b = 0\n mF = []\n for i in range(0, n):\n if i == 1:\n a = 1\n a, b, = b, a + b\n print(i, b, b % m)\n return b \n\n \ndef main():\n n = int(raw_input())\n m = int(raw_input())\n n, m = map(int, input().split())\n print(fib_mod(n, m))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week1/fib_mod.py","file_name":"fib_mod.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390890676","text":"def get_quadrant_number(x, y):\n if x ==0 or y == 0:\n raise ValueError\n\n if x > 0 and y > 0:\n return 1\n elif x < 0 and y > 0:\n return 2\n elif x < 0 and y < 0:\n return 3\n elif x > 0 and y < 0:\n return 4\n\nif __name__ == '__main__':\n x = int(input())\n y = int(input())\n print('{}'.format(get_quadrant_number(x, y)))","sub_path":"task_exception_quadrant.py","file_name":"task_exception_quadrant.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527094163","text":"from random import randint\ni=str(input('Lanzar dado (s/n): '))\nz=0\nwhile i == 's':\n a=randint(1, 6)\n b=randint(1, 6)\n c=randint(1, 6)\n z=z+1\n print('Tus numero son:',a,b,c)\n if a == b and b == c:\n print('Has ganado, solo tuviste que intentarlo',z,'veces.')\n else:\n print('Perdiste')\n i= str(input('Intentar de nuevo (s/n): '))\n","sub_path":"first/first/dado.py","file_name":"dado.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231393717","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom .base_dataset import Dataset as base_dataset\nfrom glob import glob\nfrom os.path import join\nimport torch\n\n\nclass Dataset(base_dataset):\n\n @classmethod\n def add_arguments(cls, parser):\n parser.add_argument('--cache', action='store_true', help='cache the data into ram')\n parser.add_argument('--subsample', action='store_true', help='subsample the video in time')\n parser.add_argument('--track_id', default='train', type=str, help='the track id to load')\n parser.add_argument('--overfit', action='store_true', help='overfit and see if things works')\n parser.add_argument('--gaps', type=str, default='1,2,3,4', help='gaps for sequences')\n parser.add_argument('--repeat', type=int, default=1, help='number of repeatition')\n parser.add_argument('--select', action='store_true', help='pred')\n return parser, set()\n\n def __init__(self, opt, mode='train', model=None):\n super().__init__(opt, mode, model)\n self.mode = mode\n assert mode in ('train', 'vali')\n\n data_root = './datafiles/davis_processed'\n # tracks = sorted(glob(join(data_root, 'frames_midas', '*')))\n # tracks = [x.split('/')[-1] for x in tracks]\n track_name = opt.track_id # tracks[opt.track_id]\n if model is None:\n self.required = ['img', 'flow']\n self.preproc = None\n elif mode == 'train':\n self.required = model.requires\n self.preproc = model.preprocess\n else:\n self.required = ['img']\n self.preproc = model.preprocess\n\n frame_prefix = 'frames_midas'\n seq_prefix = 'sequences_select_pairs_midas'\n\n if mode == 'train':\n\n if self.opt.subsample:\n data_path = join(data_root, seq_prefix, track_name, 'subsample')\n else:\n data_path = join(data_root, seq_prefix, track_name, '%03d' % 1)\n\n gaps = opt.gaps.split(',')\n gaps = [int(x) for x in gaps]\n self.file_list = []\n for g in gaps:\n\n file_list = sorted(glob(join(data_path, f'shuffle_False_gap_{g:02d}_*.pt')))\n self.file_list += file_list\n\n frame_data_path = join(data_root, frame_prefix, track_name)\n self.n_frames = len(sorted(glob(join(frame_data_path, '*.npz')))) + 0.0\n\n else:\n data_path = join(data_root, frame_prefix, track_name)\n self.file_list = sorted(glob(join(data_path, '*.npz')))\n self.n_frames = len(self.file_list) + 0.0\n\n def __len__(self):\n if self.mode != 'train':\n return len(self.file_list)\n else:\n return len(self.file_list) * self.opt.repeat\n\n def __getitem__(self, idx):\n sample_loaded = {}\n if self.opt.overfit:\n idx = idx % self.opt.capat\n else:\n idx = idx % len(self.file_list)\n\n if self.opt.subsample:\n unit = 2.0\n else:\n unit = 1.0\n\n if self.mode == 'train':\n\n dataset = torch.load(self.file_list[idx])\n\n _, H, W, _ = dataset['img_1'].shape\n dataset['img_1'] = dataset['img_1'].permute([0, 3, 1, 2])\n dataset['img_2'] = dataset['img_2'].permute([0, 3, 1, 2])\n ts = dataset['fid_1'].reshape([-1, 1, 1, 1]).expand(-1, -1, H, W) / self.n_frames\n ts2 = dataset['fid_2'].reshape([-1, 1, 1, 1]).expand(-1, -1, H, W) / self.n_frames\n for k in dataset:\n if type(dataset[k]) == list:\n continue\n sample_loaded[k] = dataset[k].float()\n sample_loaded['time_step'] = unit / self.n_frames\n sample_loaded['time_stamp_1'] = ts.float()\n sample_loaded['time_stamp_2'] = ts2.float()\n sample_loaded['frame_id_1'] = np.asarray(dataset['fid_1'])\n sample_loaded['frame_id_2'] = np.asarray(dataset['fid_2'])\n\n else:\n dataset = np.load(self.file_list[idx])\n H, W, _ = dataset['img'].shape\n sample_loaded['time_stamp_1'] = np.ones([1, H, W]) * idx / self.n_frames\n sample_loaded['img'] = np.transpose(dataset['img'], [2, 0, 1])\n sample_loaded['frame_id_1'] = idx\n\n sample_loaded['time_step'] = unit / self.n_frames\n sample_loaded['depth_pred'] = dataset['depth_pred'][None, ...]\n sample_loaded['cam_c2w'] = dataset['pose_c2w']\n sample_loaded['K'] = dataset['intrinsics']\n sample_loaded['depth_mvs'] = dataset['depth_mvs'][None, ...]\n # add decomposed cam mat\n cam_pose_c2w_1 = dataset['pose_c2w']\n R_1 = cam_pose_c2w_1[:3, :3]\n t_1 = cam_pose_c2w_1[:3, 3]\n K = dataset['intrinsics']\n\n # for network use:\n R_1_tensor = np.zeros([1, 1, 3, 3])\n R_1_T_tensor = np.zeros([1, 1, 3, 3])\n t_1_tensor = np.zeros([1, 1, 1, 3])\n K_tensor = np.zeros([1, 1, 3, 3])\n K_inv_tensor = np.zeros([1, 1, 3, 3])\n R_1_tensor[..., :, :] = R_1.T\n R_1_T_tensor[..., :, :] = R_1\n t_1_tensor[..., :] = t_1\n K_tensor[..., :, :] = K.T\n K_inv_tensor[..., :, :] = np.linalg.inv(K).T\n\n sample_loaded['R_1'] = R_1_tensor\n sample_loaded['R_1_T'] = R_1_T_tensor\n sample_loaded['t_1'] = t_1_tensor\n sample_loaded['K'] = K_tensor\n sample_loaded['K_inv'] = K_inv_tensor\n sample_loaded['pair_path'] = self.file_list[idx]\n self.convert_to_float32(sample_loaded)\n return sample_loaded\n","sub_path":"datasets/davis_sequence.py","file_name":"davis_sequence.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519872713","text":"import sys\nimport numpy as np\nimport cv2\n\n\ndef extract_image(input_filename, output_filename):\n img = cv2.imread(input_filename, cv2.IMREAD_GRAYSCALE)\n img = np.asarray(img, dtype=int)\n img = img * 2\n my_file = open(output_filename, 'w')\n for x in np.nditer(np.asarray(img)):\n my_file.write(str(x) + '\\n')\n my_file.close()\n\n\nif __name__ == \"__main__\":\n extract_image(sys.argv[1], sys.argv[2])\n","sub_path":"python/extract_image.py","file_name":"extract_image.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191173207","text":"import Methods\nimport threading\n\nclass Function1 (threading.Thread):\n def __init__(self, sizeOfArrays, threadLock):\n self.size = sizeOfArrays\n self.threadLock = threadLock\n threading.Thread.__init__(self)\n\n def run(self):\n self.StartOfFunction()\n\n def StartOfFunction(self):\n methods = Methods.Methods(self.size)\n\n a = []\n b = []\n c = []\n d = []\n\n md = [[]]\n me =[[]]\n \n b = methods.fillVectorOfOne()\n c = methods.fillVectorOfOne()\n d = methods.fillVectorOfOne()\n md = methods.fillMatrixOfOne()\n me = methods.fillMatrixOfOne()\n\n a = methods.sumOfVectors(methods.sumOfVectors(b, c), methods.multOfVectorOnMatrix(d, methods.multOfMatrix(md, me)))\n\n \n #self.threadLock.acquire()\n print(\"Start Function1\")\n print(a)\n print(\"End Function1\\n\")\n # self.threadLock.release()\n \n","sub_path":"V семестр/Паралельне програмування/NoName/Lab7/Func1.py","file_name":"Func1.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166738492","text":"from datetime import datetime\nfrom util import Auth\nfrom app import db\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.ext.associationproxy import association_proxy\n\nfrom orm.entities import History\n\n\nclass HistoryVersionModel(db.Model):\n __tablename__ = 'history_version'\n historyVersionId = db.Column(db.Integer, primary_key=True, autoincrement=True)\n historyId = db.Column(db.Integer, db.ForeignKey(History.Model.__table__.c.historyId))\n createdBy = db.Column(db.Integer, nullable=False)\n createdAt = db.Column(db.DateTime, default=datetime.now, onupdate=datetime.now, nullable=False)\n\n def __init__(self, historyId):\n super(HistoryVersionModel, self).__init__(\n historyId=historyId,\n createdBy=Auth().get_user_id(),\n )\n\n db.session.add(self)\n db.session.flush()\n\n\nModel = HistoryVersionModel\n\n\ndef create(historyId):\n result = Model(historyId=historyId)\n\n return result\n","sub_path":"orm/entities/History/HistoryVersion.py","file_name":"HistoryVersion.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635796626","text":"import sqlite3 as sql\r\nimport os\r\n\r\nconn_user = sql.connect('users.db', check_same_thread=False)\r\nconn_dri = sql.connect('drivers.db', check_same_thread=False)\r\n\r\n\r\ndef user_get_new_id():\r\n c = conn_user.cursor()\r\n c.execute('select nid from sid')\r\n data = c.fetchall()\r\n nid = data[0][0]\r\n c.execute('update sid set nid = ? where s = 0;', (nid + 1, ))\r\n conn_user.commit()\r\n c.close()\r\n return nid\r\n\r\n\r\ndef user_add(name, passwd, email):\r\n c = conn_user.cursor()\r\n c.execute('select * from users where name = ?;', (name,))\r\n data = c.fetchall()\r\n if len(data) != 0:\r\n return 'Name used'\r\n nid = user_get_new_id()\r\n try:\r\n c.execute('insert into users values (?, ?, ?, ?);', (name, passwd, email, nid))\r\n except Exception as e:\r\n conn_user.commit()\r\n c.close()\r\n return 'Fail, ' + str(e)\r\n conn_user.commit()\r\n c.close()\r\n return 'Success'\r\n\r\n\r\ndef user_del(name, passwd):\r\n c = conn_user.cursor()\r\n c.execute('select * from users where name = ?;', (name, ))\r\n data = c.fetchall()\r\n if len(data) == 0:\r\n conn_user.commit()\r\n c.close()\r\n return 'No such of user'\r\n if data[0][1] != passwd:\r\n conn_user.commit()\r\n c.close()\r\n return 'Password error'\r\n c.execute('delete from users where name = ?;', (name, ))\r\n conn_user.commit()\r\n c.close()\r\n return 'Success'\r\n\r\n\r\ndef user_check(name, passwd):\r\n c = conn_user.cursor()\r\n c.execute('select * from users where name = ?;', (name, ))\r\n data = c.fetchall()\r\n c.close()\r\n if len(data) == 0:\r\n return 'No such of user'\r\n if data[0][1] != passwd:\r\n return 'Password error'\r\n return 'Success'\r\n\r\n\r\ndef user_get_email(name):\r\n c = conn_user.cursor()\r\n conn_user.commit()\r\n c.execute('select email from users where name = ?;', (name, ))\r\n data = c.fetchall()\r\n if len(data) == 0:\r\n return 'Get None'\r\n c.close()\r\n return data[0][0]\r\n\r\n\r\ndef user_all_name():\r\n c = conn_user.cursor()\r\n c.execute('select name from users')\r\n data = c.fetchall()\r\n c.close()\r\n if len(data) == 0:\r\n return []\r\n return data\r\n\r\n\r\ndef user_get_name(uid):\r\n c = conn_user.cursor()\r\n c.execute('select name from users where uid = ?', (uid, ))\r\n c.close()\r\n data = c.fetchall()\r\n if len(data) == 0:\r\n return []\r\n return data\r\n\r\n\r\ndef user_get_uid(name):\r\n c = conn_user.cursor()\r\n c.execute('select uid from users where name = ?', (name, ))\r\n data = c.fetchall()\r\n c.close()\r\n if len(data) == 0:\r\n return 0\r\n return data[0][0]\r\n\r\n\r\ndef user_all():\r\n c = conn_user.cursor()\r\n c.execute('select name, email from users')\r\n data = c.fetchall()\r\n c.close()\r\n if len(data) == 0:\r\n return []\r\n return data\r\n\r\n\r\ndef token_get_new_token(length=8):\r\n return ''.join([('0'+hex(ord(os.urandom(1)))[2:])[-2:] for x in range(length)])\r\n\r\n\r\ndef driver_get_new_id():\r\n c = conn_dri.cursor()\r\n c.execute('select nid from sid')\r\n data = c.fetchall()\r\n nid = data[0][0]\r\n c.execute('update sid set nid = ? where s = 0;', (nid + 1, ))\r\n conn_dri.commit()\r\n c.close()\r\n return nid\r\n\r\n\r\ndef driver_check(uid, token):\r\n c = conn_dri.cursor()\r\n c.execute('select drivers')\r\n actoken = c.fetchall()[0][0]\r\n c.close()\r\n conn_dri.commit()\r\n\r\n\r\ndef driver_add(uid, name, type_, ip='0.0.0.0'):\r\n c = conn_dri.cursor()\r\n nid = driver_get_new_id()\r\n token = token_get_new_token()\r\n try:\r\n c.execute(\"insert into drivers (id, user, name, type, ip, token) values (?, ?, ?, ?, ?, ?)\",\r\n (nid, uid, name, type_, ip, token))\r\n except Exception as e:\r\n conn_dri.commit()\r\n c.close()\r\n return 'Fail, {0}'.format(str(e))\r\n conn_dri.commit()\r\n c.close()\r\n return 'Success'\r\n\r\n\r\ndef driver_beat(uid, ip='0.0.0.0'):\r\n c = conn_dri.cursor()\r\n c.execute(\"select datetime('now', 'localtime')\")\r\n datetime = c.fetchall()[0][0]\r\n try:\r\n c.execute('update drivers set last_beat = ?, ip = ? where id = ?', (datetime, ip, uid))\r\n except Exception as e:\r\n c.close()\r\n conn_dri.commit()\r\n return 'Fail to beat: {0}'.format(str(e))\r\n c.close()\r\n conn_dri.commit()\r\n return 'Success'\r\n\r\n\r\ndef driver_get_all(did):\r\n c = conn_dri.cursor()\r\n c.execute('select * from drivers where id = ?', (did,))\r\n data = c.fetchall()[0]\r\n print(data)\r\n c.close()\r\n conn_dri.commit()\r\n return data\r\n\r\n\r\ndef driver_del(did):\r\n c = conn_dri.cursor()\r\n c.execute('select * from drivers where id = ?', (did, ))\r\n data = c.fetchall()\r\n if len(data) == 0:\r\n conn_dri.commit()\r\n c.close()\r\n return 'No such of driver'\r\n c.execute('delete from drivers where id = ?', (did, ))\r\n conn_dri.commit()\r\n c.close()\r\n return 'Success'\r\n\r\n\r\ndef driver_get_user(uid):\r\n c = conn_dri.cursor()\r\n c.execute('select id from drivers where user = ?', (uid, ))\r\n data = c.fetchall()\r\n # print(data)\r\n c.close()\r\n conn_dri.commit()\r\n return data\r\n\r\n\r\nif __name__ == '__main__':\r\n # driver_add(1, 'Dri1', 'comm')\r\n driver_beat(1)\r\n driver_get_all(1)\r\n\r\n","sub_path":"IOT-Noodles/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90929456","text":"# -*- coding: utf-8 -*-\nfrom __future__ import annotations\n\nimport bpy\nfrom PyR3.shortcut.context import Objects\nfrom PyR3.shortcut.mesh import addCircle, fromPyData\nfrom PyR3.shortcut.transform import Transform\n\nfrom pygerber.mathclasses import Vector2D, angle_from_zero\nfrom pygerber.parser.blender.apertures.arc_mixin import ArcUtilMixinBlender\nfrom pygerber.parser.blender.apertures.flash_mixin import FlashUtilMixin\nfrom pygerber.renderer.aperture.circular import CircularAperture\nfrom pygerber.renderer.spec import ArcSpec, FlashSpec, LineSpec\n\n\nclass BlenderCircle(ArcUtilMixinBlender, FlashUtilMixin, CircularAperture):\n @property\n def RADIUS(self):\n return self.DIAMETER / 2\n\n def create_stamp_shape(self, spec: FlashSpec) -> bpy.types.Object:\n return addCircle(\n radius=self.RADIUS,\n vertices=self.get_number_points_within_angle(radius=self.RADIUS),\n location=spec.location.as_tuple_3D(),\n fill_type=\"NGON\",\n )\n\n def all_joining_mesh(self, vertices: list) -> bpy.types.Object:\n vertex_last_index = len(vertices) - 1\n edges = [(i, i + 1) for i in range(vertex_last_index)] + [\n (vertex_last_index, 0)\n ]\n edge_count = len(edges)\n faces = [tuple(i for i in range(edge_count))]\n return fromPyData(\n vertices,\n edges,\n faces,\n )\n\n def line(self, spec: LineSpec) -> None:\n end_point = spec.end - spec.begin\n length = end_point.length()\n left_arc_spec = ArcSpec(\n Vector2D(0.0, self.RADIUS),\n Vector2D(0.0, -self.RADIUS),\n Vector2D(0.0, 0.0),\n )\n vertices = [p.as_tuple_3D() for p in self.get_arc_points(left_arc_spec, True)]\n right_arc_spec = ArcSpec(\n Vector2D(length, -self.RADIUS),\n Vector2D(length, self.RADIUS),\n Vector2D(length, 0.0),\n )\n vertices.extend(\n [p.as_tuple_3D() for p in self.get_arc_points(right_arc_spec, True)]\n )\n line_mesh = self.all_joining_mesh(vertices)\n angle = angle_from_zero(end_point)\n Objects.select_only(line_mesh)\n override = bpy.context.copy()\n override[\"area\"] = [a for a in bpy.context.screen.areas if a.type == \"VIEW_3D\"][\n 0\n ]\n bpy.ops.transform.rotate(\n override,\n value=-angle,\n orient_axis=\"Z\",\n orient_type=\"GLOBAL\",\n center_override=(0.0, 0.0, 0.0),\n )\n Transform.move(spec.begin.as_tuple_3D())\n self.solidify(line_mesh, self.thickness)\n self.commit_mesh_to_root(line_mesh)\n\n def arc(self, spec: ArcSpec) -> None:\n pass\n","sub_path":"src/pygerber/parser/blender/apertures/circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460164776","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\ntraining_path = \"offenseval-gr-training-v1/offenseval-gr-training-v1.tsv\"\ntest_dir = \"offenseval-gr-testsetv1\"\n\nwith open(training_path, \"r\") as f:\n train_content = list(map(lambda s: s.strip().split(\"\\t\"), f.readlines()))\n train_content = train_content[1:]\n\ntrain_df = pd.DataFrame({\n \"content\": list(map(lambda l: l[1], train_content)),\n \"target\": list(map(lambda l: l[2], train_content))\n})\n\nwith open(os.path.join(test_dir, \"offenseval-gr-test-v1.tsv\"), \"r\") as f:\n test_text = list(map(lambda s: s.strip().split(\"\\t\"), f.readlines()))\n test_text = test_text[1:]\n\nwith open(os.path.join(test_dir, \"offenseval-gr-labela-v1.csv\"), \"r\") as f:\n test_labels = list(map(lambda s: s.strip().split(\",\"), f.readlines()))\n\ntest_df_txt = pd.DataFrame({\n \"content\": list(map(lambda l: l[1], test_text))\n})\n\ntest_df_lbl = pd.DataFrame({\n \"target\": list(map(lambda l: l[1], test_labels))\n})\n\ntest_df = pd.concat((test_df_txt, test_df_lbl), axis=1)\n\ntrain_df[\"content\"] = train_df[\"content\"].apply(lambda s: s.replace(\"\", \". \"))\ntest_df[\"content\"] = test_df[\"content\"].apply(lambda s: s.replace(\"\", \". \"))\n\ntrain_df[\"target\"] = train_df[\"target\"].apply(lambda str_label: {\"NOT\": 0, \"OFF\": 1}[str_label])\ntest_df[\"target\"] = test_df[\"target\"].apply(lambda str_label: {\"NOT\": 0, \"OFF\": 1}[str_label])\n\nindices = np.random.permutation(train_df.shape[0])\ntrain_indices = indices[: int(0.8 * indices.shape[0])]\ndev_indices = indices[int(0.8 * indices.shape[0]):]\n\ndev_df = train_df.iloc[dev_indices].reset_index(drop=True)\ntrain_df = train_df.iloc[train_indices].reset_index(drop=True)\n\nprint(f\"{train_df.shape[0]} train examples, \\n\"\n f\"{dev_df.shape[0]} dev examples, \\n\"\n f\"{test_df.shape[0]} test examples\")\n\ntrain_df.to_csv(\"train.csv\", sep=\",\", index=False)\ndev_df.to_csv(\"dev.csv\", sep=\",\", index=False)\ntest_df.to_csv(\"test.csv\", sep=\",\", index=False)\n\n\n\n\n\n\n\n\n\n","sub_path":"data/GRE/OffensEval2020/prepare_offenseval.py","file_name":"prepare_offenseval.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"210425121","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/Kamaelia/Util/OneShot.py\n# Compiled at: 2008-10-19 12:19:52\n\"\"\"=====================\nOne-shot sending data\n=====================\n\nOneShot and TriggeredOneShot send a single specified item to their \"outbox\"\noutbox and immediately terminate.\n\nTriggeredOneShot waits first for anything to arrive at its \"inbox\" inbox,\nwhereas OneShot acts as soon as it is activated.\n\nExample Usage\n-------------\n\nA way to create a component that writes data to a given filename, based on\n(filename,data) messages sent to its \"next\" inbox::\n\n Carousel( lambda filename, data :\n Pipeline( OneShot(data),\n SimpleFileWriter(filename),\n ),\n )\n\nA graphline that opens a TCP connection to myserver.com port 1500, and sends an\na one off message::\n\n Pipeline( OneShot(\"data to send to server\"),\n TCPClient(\"myserver.com\", 1500),\n ).run()\n\nShutting down a connection to myserver.com port 1500 as soon as a reply is\nreceived from the server::\n \n Graphline( NET = TCPClient(\"myserver.com\", 1500),\n SPLIT = TwoWaySplitter(),\n STOP = TriggeredOneShot(producerFinished()),\n linkages = {\n (\"\", \"inbox\" ) : (\"NET\", \"inbox\"),\n (\"NET\", \"outbox\") : (\"SPLIT\", \"inbox\"),\n (\"SPLIT\", \"outbox\") : (\"\", \"outbox\"),\n \n (\"SPLIT\", \"outbox2\") : (\"STOP\", \"inbox\"),\n (\"STOP\", \"outbox\") : (\"NET\", \"control\"),\n (\"\", \"control\") : (\"NET\", \"control\"),\n (\"NET\", \"signal\") : (\"SPLIT\", \"control\"),\n (\"SPLIT\", \"signal\") : (\"\", \"signal\"),\n (\"SPLIT\", \"signal2\"),: (\"STOP\", \"control\"),\n },\n )\n\nOneShot Behaviour\n-----------------\n\nAt initialisation, specify the message to be sent by OneShot.\n\nAs soon as OneShot is activated, the specified message is sent out of the\n\"outbox\" outbox. A producerFinished message is also sent out of the \"signal\"\noutbox. The component then immediately terminates.\n\nTriggeredOneShot Behaviour\n--------------------------\n\nAt initialisation, specify the message to be sent by TriggeredOneShot.\n\nSend anything to the \"inbox\" inbox and TriggeredOneShot will immediately send\nthe specified message out of the \"outbox\" outbox. A producerFinished message is\nalso sent out of the \"signal\" outbox. The component then immediately terminates.\n\nIf a producerFinished or shutdownMicroprocess message is received on the\n\"control\" inbox. It is immediately sent on out of the \"signal\" outbox and the\ncomponent then immediately terminates.\n\n\"\"\"\nfrom Axon.Component import component\nfrom Axon.Ipc import producerFinished, shutdownMicroprocess\n\nclass OneShot(component):\n \"\"\" OneShot(msg) -> new OneShot component.\n \n Immediately sends the specified message and terminates.\n \n Keyword arguments::\n \n - msg -- the message to send out\n \"\"\"\n Inboxes = {'inbox': 'NOT USED', 'control': 'Shutdown signalling'}\n Outboxes = {'outbox': 'Item is sent out here', 'signal': 'Shutdown signalling'}\n\n def __init__(self, msg=None):\n \"\"\"x.__init__(...) initializes x; see x.__class__.__doc__ for signature\"\"\"\n super(OneShot, self).__init__()\n self.msg = msg\n\n def main(self):\n \"\"\"Main loop\"\"\"\n self.send(self.msg, 'outbox')\n yield 1\n self.send(producerFinished(self), 'signal')\n\n\nclass TriggeredOneShot(component):\n \"\"\" OneShot(msg) -> new OneShot component.\n \n Waits for anything to arrive at its \"inbox\" inbox, then immediately sends\n the specified message and terminates.\n \n Keyword arguments::\n \n - msg -- the message to send out\n \"\"\"\n Inboxes = {'inbox': 'Anything, as trigger', 'control': 'Shutdown signalling'}\n Outboxes = {'outbox': 'Item is sent out here', 'signal': 'Shutdown signalling'}\n\n def __init__(self, msg=None):\n \"\"\"x.__init__(...) initializes x; see x.__class__.__doc__ for signature\"\"\"\n super(TriggeredOneShot, self).__init__()\n self.msg = msg\n\n def main(self):\n \"\"\"Main loop\"\"\"\n while not self.dataReady('inbox'):\n while self.dataReady('control'):\n msg = self.recv('control')\n self.send(msg, 'signal')\n if isinstance(msg, (producerFinished, shutdownMicroprocess)):\n return\n\n self.pause()\n yield 1\n\n self.recv('inbox')\n self.send(self.msg, 'outbox')\n yield 1\n self.send(producerFinished(self), 'signal')\n\n\n__kamaelia_components__ = (\n OneShot, TriggeredOneShot)","sub_path":"pycfiles/Kamaelia-0.6.0-py2.5/OneShot.py","file_name":"OneShot.py","file_ext":"py","file_size_in_byte":4880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"11091139","text":"import time\nstart_time = time.time()\nfrom pulp import *\nfrom openpyxl import load_workbook\n\n#For 9 scenarios#\nwb=load_workbook(filename= 'Compiled output.xlsx')\nws=wb['kmeans']\n\nwb2=load_workbook(filename= 'X_Sol values for 9 clusters.xlsx') #For 9 Clusters\nws2=wb2['Sheet1']\n\n\nM=10000000\nCap=1000000\ncin=305000\ncout=-457500\nd=1\nHorizon=24\nN=9 #For 9 Clusters\n\nT=list(range(Horizon))\nS=list(range(N))\n\np=[[0 for t in range(Horizon)] for s in range(N)]\nx_sol=[[0 for t in range(Horizon)] for s in range(N)]\n\nfor t in range(Horizon):\n for s in range(N):\n p[s][t]=ws.cell(row=t+6, column=s+38).value #For 9 scenarios\nfor t in range(Horizon):\n for s in range(N):\n x_sol[s][t]=ws2.cell(row=t+1, column=s+1).value #For 9 scenarios\nprob= LpProblem(\"EL-NR\", LpMinimize)\nx= LpVariable.matrix(\"x\", (T), None, None, LpContinuous)\nI= LpVariable.matrix(\"I\", (T), 0, None, LpContinuous)\nr= LpVariable('r', 0, None, LpContinuous)\n\n \nprob+= r\nfor s in S:\n prob+=lpSum([-p[s][t]*x[t] for t in T]) + r >= lpSum([-p[s][t]*x_sol[s][t] for t in T])\n\nfor t in T:\n prob+=cout<=x[t]\n prob+=x[t]<=cin\n prob+=I[t]<=Cap\n if t>0:\n prob+=I[t]==I[t-1]+x[t-1]\n\n prob+= I[Horizon-1]+x[Horizon-1]==I[0]\n prob+= I[0]==0 \n prob.solve()\n\nprint(prob.objective.value())\n\nfor v in prob.variables():\n if v.name[:1]==\"x\":\n print(v.name, \"=\", v.varValue)\n \nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"SLP with k=9 scenarios + MMR2.py","file_name":"SLP with k=9 scenarios + MMR2.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495161240","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom quadtree import Point, Rect, QuadTree\nfrom matplotlib import gridspec\n\nDPI = 72\nnp.random.seed(60)\n\nwidth, height = 600, 400\n\nN = 1500\ncoords = np.random.randn(N, 2) * height/3 + (width/2, height/2)\npoints = [Point(*coord) for coord in coords]\n\ndomain = Rect(width/2, height/2, width, height)\nqtree = QuadTree(domain, 3)\nfor point in points:\n qtree.insert(point)\n\nprint('Number of points in the domain =', len(qtree))\n\nfig = plt.figure(figsize=(700/DPI, 500/DPI), dpi=DPI)\nax = plt.subplot()\nax.set_xlim(0, width)\nax.set_ylim(0, height)\nqtree.draw(ax)\n\nax.scatter([p.x for p in points], [p.y for p in points], s=4)\nax.set_xticks([])\nax.set_yticks([])\n\ncentre, radius = (width/2, height/2), 120\nfound_points = []\nqtree.query_radius(centre, radius, found_points)\nprint('Number of found points =', len(found_points))\n\nax.scatter([p.x for p in found_points], [p.y for p in found_points],\n facecolors='none', edgecolors='r', s=32)\n\ncircle = plt.Circle(centre, radius, ec='r')\nRect(*centre, 2*radius, 2*radius).draw(ax, c='r')\n\nax.invert_yaxis()\nplt.tight_layout()\nplt.savefig('search-quadtree-circle.png', DPI=72)\nplt.show()","sub_path":"tests/qt_basic1.py","file_name":"qt_basic1.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"560746831","text":"from threading import Thread\nimport socket\nimport wx\n\nfrom NewMessageEvent import NewMessageEvent\nfrom MessageFormatter import MessageFormatter\n\nclass Client(Thread):\n\n def __init__(self, gui):\n Thread.__init__(self)\n self.gui = gui\n\n def run(self):\n host = 'localhost'\n port = 50000\n size = 1024\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((host, port))\n self.s = s\n\n while 1:\n data = s.recv(size)\n message = MessageFormatter.parse(data)\n if message['id'] == '-1':\n message['id'] = 'server'\n event = NewMessageEvent(NewMessageEvent.NEW_MESSAGE_EVENT_TYPE, -1, message)\n wx.PostEvent(self.gui, event)\n s.close()\n\n def send(self, data):\n message = MessageFormatter.stringify(-1, data)\n self.s.send(message)","sub_path":"Python/Chat/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199406220","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"\nExample script to export all elements.\n-follow the result and check result is valid zip file\n-use exclude_trashed option\n-import inconsistent import file. check exception\n\"\"\"\n\n# Python Base Import\nimport logging\nimport zipfile\nimport smc.examples\n\n# Python SMC Import\nfrom smc import session\nfrom smc.administration.system import System\nfrom smc.api.exceptions import ActionCommandFailed\nfrom smc.core.engines import Layer2Firewall\nfrom smc_info import *\n\nif __name__ == \"__main__\":\n\n session.login(url=SMC_URL, api_key=API_KEY, verify=False, timeout=120, api_version=API_VERSION)\n\n print(\"session OK\")\n\n # try export all\n system = System()\n export_zip = \"/tmp/export_test.zip\"\n\n # check trashed host is in export (default case)\n # using the SMC Client need first to create a testHostTrashed Host and trash it\n system.export_elements(export_zip, timeout=5, max_tries=50)\n the_zip_file = zipfile.ZipFile(export_zip)\n data_xml = the_zip_file.open('exported_data.xml').read()\n assert data_xml.find('testHostTrashed'.encode()) > -1,\\\n \"Host testHostTrashed not found in export\"\n\n # use exclude_trashed=true parameter and check trashed host NOT in export\n system.export_elements(export_zip, timeout=5, max_tries=50, exclude_trashed=True)\n the_zip_file = zipfile.ZipFile(export_zip)\n data_xml = the_zip_file.open('exported_data.xml').read()\n assert data_xml.find('testHostTrashed'.encode()) == -1, \"Host testHostTrashed found in export\"\n\n valid_zip = the_zip_file.testzip()\n\n # check export all is valid\n if valid_zip is not None:\n logging.warning(\"Invalid zip file\")\n else:\n logging.info(\"Zip file is valid\")\n\n logging.info(\"Export firewall\")\n # try export firewall\n l2FW = Layer2Firewall(\"Atlanta L2 FW\")\n for interface in l2FW.interface:\n logging.info(\"interface=\" + str(interface))\n l2FW.export(\"/tmp/Atlantal2FW.zip\")\n\n # try import corrupted file\n logging.info(\"Import Corrupted file\")\n try:\n system.import_elements(\"/tmp/WRONG_Atlantal2FW.zip\")\n except ActionCommandFailed as exception:\n logging.warning(\"Import result: \" + str(exception))\n\n except BaseException as e:\n print(\"ex={}\".format(e))\n exit(-1)\n finally:\n session.logout()\n","sub_path":"smc/examples/export_all.py","file_name":"export_all.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391906910","text":"# -*- coding: utf-8 -*-\n# @Author: Puffrora\n# @Date: 2020-08-19 22:32:37\n# @Last Modified by: Puffrora\n# @Last Modified time: 2020-08-19 23:25:51\n\n\nclass Solution:\n\tdef maxNumber(self, nums1, nums2, k):\n\n\t\tdef get_k_comb(arr, k):\n\t\t\tto_delete = len(arr) - k\n\t\t\tmono_stack = []\n\t\t\tfor i in arr:\n\t\t\t\twhile to_delete and mono_stack and mono_stack[-1] < i:\n\t\t\t\t\tto_delete -= 1\n\t\t\t\t\tmono_stack.pop()\n\t\t\t\tmono_stack.append(i)\n\t\t\tprint(mono_stack[:k])\n\t\t\treturn mono_stack[:k]\n\n\t\t# 注意 按照一般交替方法合并 [6] [6, 7]\n\t\t# 会出现 [6, 6, 7] [6, 7, 6] 两种情况\n\t\t# 利用 python 数组大小比较来解决这个问题\n\t\tdef merge(arr1, arr2):\n\t\t\tres = []\n\t\t\twhile arr1 or arr2:\n\t\t\t\tbigger = arr1 if arr1 > arr2 else arr2\n\t\t\t\tres.append(bigger[0])\n\t\t\t\tbigger.pop(0)\n\t\t\treturn res\n\n\t\treturn max([merge(get_k_comb(nums1, i), get_k_comb(nums2, k-i)) for i in range(k+1) if i <= len(nums1) and k-i <= len(nums2)])\n\n'''\nprint(Solution().maxNumber([2,5,6,4,4,0],[7,3,8,0,6,5,7,6,2],15))\n'''\n","sub_path":"Leetcode/leetcode321 拼接最大数.py","file_name":"leetcode321 拼接最大数.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495233537","text":"# Copyright 2020 Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nfrom __future__ import absolute_import\n\n\nNOT_CACHED = object()\n\n\nclass CachedProperty(object):\n \"\"\" Property that calls getter only the first time it is required\n\n It invokes property setter with the value returned by getter the first time\n it is required so that it is not requested again until del operator is not\n called again on top of target object.\n\n Implements default setter and deleter behaving as a regular attribute would\n by setting or removing named attribute of target object __dict__\n\n The name used for storing cached property value is got from getter function\n name if not passed as constructor attribute.\n\n Examples of use:\n\n class MyClass(object):\n\n @cached\n def my_property(self):\n return object()\n\n\n obj = MyClass()\n # my_property method not yet called\n assert 'my_property' not in obj.__dict__\n\n # my_property method is called\n first_value = obj.my_property\n assert obj.__dict__['my_property'] is first_value\n\n # my_property method is not called again\n assert obj.my_property is first_value\n\n # first value is removed from dictionary\n del obj.my_property\n assert 'my_property' not in obj.__dict__\n\n # my_property method is called\n second_value = obj.my_property\n assert obj.__dict__['my_property'] is second_value\n\n # value returned by second call of method can be different\n second_value is not first_value\n\n For more details about how Python properties works please refers to\n language documentation [1]\n\n [1] https://docs.python.org/3/howto/descriptor.html\n \"\"\"\n\n fget = None\n fset = None\n fdel = None\n __doc__ = None\n cached_id = None\n\n def __init__(self, fget=None, fset=None, fdel=None, doc=None,\n cached_id=None):\n if fget:\n self.getter(fget)\n if fset:\n self.setter(fset)\n if fdel:\n self.deleter(fdel)\n if doc:\n self.__doc__ = doc\n if cached_id:\n self.cached_id = cached_id\n elif self.cached_id is None:\n self.cached_id = '_cached_' + str(id(self))\n\n def getter(self, fget):\n assert callable(fget)\n self.fget = fget\n if self.__doc__ is None:\n self.__doc__ = fget.__doc__\n return fget\n\n def setter(self, fset):\n self.fset = fset\n return fset\n\n def deleter(self, fdel):\n self.fdel = fdel\n return fdel\n\n def __get__(self, obj, _objtype=None):\n if obj is None:\n return self\n\n value = self._get_cached(obj)\n if value is NOT_CACHED:\n if self.fget is None:\n raise AttributeError(\"Cached property has no getter method\")\n value = self.fget(obj)\n self.__set__(obj, value)\n\n return value\n\n def __set__(self, obj, value):\n if self.fset:\n self.fset(obj, value)\n self._set_cached(obj, value)\n\n def __delete__(self, obj):\n if self.fdel:\n self.fdel(obj)\n self._delete_cached(obj)\n\n def _get_cached(self, obj):\n return getattr(obj, self.cached_id, NOT_CACHED)\n\n def _set_cached(self, obj, value):\n setattr(obj, self.cached_id, value)\n\n def _delete_cached(self, obj):\n return obj.__dict__.pop(self.cached_id, NOT_CACHED)\n\n\ndef cached(*args, **kwargs):\n return CachedProperty(*args, **kwargs)\n","sub_path":"tobiko/common/_cached.py","file_name":"_cached.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"585472030","text":"\r\n\r\nimport numpy as np\r\nimport json\r\nimport pandas as pd\r\nfrom six import iteritems\r\nfrom sklearn.model_selection import ParameterGrid\r\n\r\nfrom utils.scoring import BIO_scorer\r\nfrom models.Base import Base\r\nfrom models.LSTM_CRF import LSTM_CRF\r\nimport os\r\nimport joblib\r\nimport re \r\nimport shutil\r\n\r\nclass NeuralSpanClassifier(Base):\r\n \r\n '''\r\n Sequence classifier\r\n '''\r\n \r\n def __init__(self, \\\r\n hyperparams, \\\r\n param_sweep, \\\r\n cv, \\\r\n metric, \\\r\n average, \\\r\n begin_prefix, \\\r\n in_prefix, \\\r\n outside_label, \\\r\n descrip, \\\r\n path, \\\r\n feat_params, \\\r\n name,\r\n ):\r\n\r\n self.hyperparams = hyperparams\r\n self.param_sweep = param_sweep\r\n self.cv = cv\r\n self.metric = metric\r\n self.average = average\r\n self.begin_prefix = begin_prefix\r\n self.in_prefix = in_prefix\r\n self.outside_label = outside_label\r\n self.descrip = descrip\r\n self.path = path\r\n self.feat_params = feat_params\r\n \r\n \r\n self.estimator_class = LSTM_CRF\r\n self.fit_method = 'fit'\r\n self.prob_method = 'predict_marginals'\r\n self.pred_method = 'predict'\r\n self.get_params_method = 'get_params'\r\n \r\n self.scorer = BIO_scorer(\\\r\n begin_prefix=self.begin_prefix, \\\r\n in_prefix=self.in_prefix, \\\r\n outside_label=self.outside_label, \\\r\n flatten=True, \\\r\n rm_start_padding=True,\r\n )\r\n self.hyperparams['scorer'] = self.scorer\r\n \r\n super().__init__( \\\r\n estimator_class = self.estimator_class, \\\r\n fit_method = self.fit_method, \\\r\n prob_method = self.prob_method, \\\r\n pred_method = self.pred_method, \\\r\n get_params_method = self.get_params_method, \\\r\n hyperparams = self.hyperparams, \\\r\n param_sweep = self.param_sweep, \\\r\n cv = self.cv, \\\r\n metric = self.metric, \\\r\n average = self.average, \\\r\n scorer = self.scorer, \\\r\n neg_label = self.outside_label, \\\r\n seq_of_seq = True, \\\r\n descrip = descrip, \\\r\n path = path, \\\r\n feat_params = feat_params, \\\r\n name = name,\r\n ) \r\n\r\n\r\n def dump(self, dest):\r\n '''\r\n Save classifier to directory, with default model name\r\n '''\r\n # Model path\r\n model_path = self.estimator.model_path\r\n\r\n # Model directory\r\n model_dir = os.path.dirname(model_path)\r\n model_base = os.path.basename(model_path)\r\n \r\n # All model files\r\n all_files = [os.path.join(model_dir, f) \\\r\n for f in os.listdir(model_dir)]\r\n model_files = [f for f in all_files if re.search(model_base, f)]\r\n \r\n # Copy files\r\n msg = 'Could not find tensorflow model files at {} matching pattern {}'.format( \\\r\n model_dir, model_base)\r\n assert len(model_files) > 0, msg\r\n for f in model_files:\r\n shutil.copy(f, dest)\r\n \r\n # Update model directory \r\n new_hyperparams = self.hyperparams.copy()\r\n new_hyperparams['model_dir'] = dest\r\n \r\n # Create classifier with copied TF model\r\n new_classifier = NeuralSpanClassifier( \\\r\n hyperparams = new_hyperparams, \\\r\n param_sweep = self.param_sweep, \\\r\n cv = self.cv, \\\r\n metric = self.metric, \\\r\n average = self.average, \\\r\n begin_prefix = self.begin_prefix, \\\r\n in_prefix = self.in_prefix, \\\r\n outside_label = self.outside_label, \\\r\n descrip = self.descrip, \\\r\n path = self.path, \\\r\n feat_params = self.feat_params, \\\r\n name = self.name,\r\n )\r\n\r\n # Save\r\n joblib.dump(new_classifier, os.path.join(dest, self.model_file))\r\n \r\n ","sub_path":"code/tensorflow_models/NeuralSpanClassifier.py","file_name":"NeuralSpanClassifier.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177593767","text":"import numpy as np\nfrom update import UpdateCell\n\n\nclass PD:\n cdic = {1: 'blue',\n 0: 'red'}\n\n cdic_hist = {(0, 0): 'red',\n (1, 1): 'blue',\n (0, 1): 'green',\n (1, 0): 'yellow'}\n\n def __init__(self, t):\n self.t_pay = t # temptation for defecting\n\n def create_init(self, size, prob=1):\n \"\"\"Create the initial array.\"\"\"\n if 0 < prob < 1:\n return init_random(prob, size)\n elif prob == 1:\n return init_mid(size)\n\n def run(self, ca, n=1, method='lazy'):\n \"\"\"Run PD game for 'n' timesteps and return the resulting\n arrays as a tuple.\"\"\"\n t = self.t_pay\n out = [ca]\n for _ in range(n):\n ca_next = run_once(out[-1], t, method)\n out.append(ca_next.copy())\n return tuple(out)\n\n\n# Class instance\nnbr = UpdateCell(8)\n\n\n#\n# Initial array\n#\ndef init_random(p, s):\n \"\"\"Generate a random array with 0s (defectors) and 1s\n (coorperators) with probability '1-p' and 'p',\n respectively.\"\"\"\n return np.random.choice(2, size=(s, s), p=[1-p, p])\n\n\ndef init_mid(s):\n \"\"\"Generate array with a single defector in the middle of the\n grid---or approximately in the middle when side length is an even\n number.\"\"\"\n m, arr = (s-1)//2, np.ones((s, s))\n arr[m, m] = 0\n return arr\n\n\n#\n# Functions used to run one time step\n#\n\ndef run_once(ca, t_pay, m):\n a_pay = nbr.cell_and_nbrs(payoff_array(ca, t_pay))\n a_str = nbr.cell_and_nbrs(ca)\n ca_new = np.empty(ca.shape)\n for index in np.ndindex(ca.shape):\n ca_new[index] = strategy(a_pay[index], a_str[index], m)\n return ca_new\n\n\ndef payoff_array(ca, t_pay):\n return nbr.update_cell(ca, lambda arr: payoff(t_pay, arr))\n\n\ndef payoff(t_pay, arr):\n \"\"\"Returns cell payoff from pairwise interactions with neighbors.\"\"\"\n total = np.sum(arr)\n return total if arr[0] else t_pay*total\n\n\ndef strategy(arr_pay, arr_strat, method):\n m = np.max(arr_pay)\n b = arr_pay == m\n if np.sum(b) == 1:\n return arr_strat[b][0]\n elif np.sum(b) > 1: # more than one max val\n return dic_funcs[method](arr_strat, b)\n\n\n#\n# Update rules for when defecting and cooperating yields the same payoff\n#\n\ndef pick_lazy(arr_strat, b):\n if b[0]: # if focal cell is one of the maxs...\n return arr_strat[0] # stick to it\n else: # Otherwise...\n a = arr_strat[b]\n return np.random.choice(a) # pick one of the strats randomly\n\n\ndef pick_coop_bias(arr_strat, b):\n a = arr_strat[b]\n if 1 in a:\n return 1\n else:\n return 0\n\n\ndef pick_defect_bias(arr_strat, b):\n a = arr_strat[b]\n if 0 in a:\n return 0\n else:\n return 1\n\n\ndef pick_indifferent(arr_strat, b):\n a = arr_strat[b]\n return np.random.choice(a)\n\n\ndic_funcs = {'lazy': pick_lazy,\n 'coop_bias': pick_coop_bias,\n 'defect_bias': pick_defect_bias,\n 'indifferent': pick_indifferent}\n","sub_path":"pd_game.py","file_name":"pd_game.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"225970532","text":"from flask import Flask, render_template, request, redirect, url_for, jsonify, abort\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport sys\n\n# Create a Flask application that obtains its name from the name of our file, which in this case is \"app\"\napp = Flask(__name__)\n\n# Create connection to local postgreSQL database \napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://Mireysa:password@localhost:5432/planner'\n\n# Define a database object which links SQLAlchemy to our application\ndb = SQLAlchemy(app)\n\n# Setup Flask-Migrate to allow us to upgrade, downgrade, perform migrations, etc. \nmigrate = Migrate(app, db)\n\n# Model for Guest column \nclass Guest(db.Model):\n __tablename__ = 'guest'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(), nullable=False)\n rsvp = db.Column(db.Boolean, nullable=False, default=False)\n table_id = db.Column(db.Integer, db.ForeignKey('tables.id'), nullable=False)\n\n def __repr__(self):\n return f''\n\n# Model for Tables\nclass Table(db.Model):\n __tablename__ = 'tables'\n id = db.Column(db.Integer, primary_key=True)\n number = db.Column(db.String(), nullable=False)\n guests = db.relationship('Guest', backref='tablenumber', lazy=True)\n\n\n# Creates tables from models if they do not exist in the given database\n# db.create_all() \n\n# route that allows us to delete a guest object\n@app.route('/guest//', methods=['DELETE'])\ndef delete_guest(guest_id):\n try:\n Guest.query.filter_by(id=guest_id).delete()\n db.session.commit()\n except:\n db.session.rollback()\n finally: \n db.session.close()\n return jsonify({'success': True})\n\n# route that listens for creation of a guest object\n@app.route('/guest/create', methods=['POST'])\ndef create_guest():\n error = False\n body = {}\n try:\n # Obtain response \n name = request.get_json()['name']\n table_id = request.get_json()['table_id']\n \n # Create Guest objects given the response\n guest = Guest(name=name)\n active_list = Table.query.get(table_id)\n guest.tablenumber = active_list\n db.session.add(guest)\n db.session.commit()\n body['name'] = guest.name\n except:\n error = True\n db.session.rollback()\n print(sys.exc_info())\n finally: \n db.session.close()\n if not error:\n return jsonify(body)\n else:\n abort(500)\n\n# route that allows us to update the guest's rsvp status\n@app.route('/guest//set-rsvp', methods=['POST'])\ndef set_rsvp_guest(guest_id):\n try:\n rsvp = request.get_json()['rsvp']\n guest = Guest.query.get(guest_id)\n guest.rsvp = rsvp\n db.session.commit()\n except:\n db.session.rollback()\n finally: \n db.session.close()\n return redirect(url_for('index')) \n\n# route that sets our home page to a particular table\n@app.route('/table/')\ndef get_table_guest(table_id):\n return render_template('index.html', \n tables=Table.query.all(),\n active_list=Table.query.get(table_id),\n guests=Guest.query.filter_by(table_id=table_id).order_by('id').all())\n\n# route that listens to our home page\n@app.route('/')\ndef index():\n return redirect(url_for('get_table_guest', table_id=1))\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154619547","text":"# encoding: utf-8\n'''\nCreated on 2016年5月11日\n\n@author: tongxy\n'''\n\nimport datetime\n\nfrom settings import logger\nfrom common.pgcomm import PGUtils\nfrom common.sqlcomm import formatSql\nfrom common.log_db import insertlog\nfrom common.time_utils import get_month_range\nfrom common.syscomm import SYS_LOG_TYPE_REPORT,SYS_LOG_RESULT_OK\n\n# 插入指定月份的数据\nsql_insert = '''\nINSERT INTO edw.rpt_salerate_subbranch_month(\nyear_id ,\nmonth_id ,\nyear_month_id ,\nechelon ,\nsubbranch_id ,\ntrans_amnt_total ,\nsales_amnt ,\nsales_rate \n) \nSELECT %s,%s,%s,b.echelon ,a.org_id ,COALESCE(trans_amnt_total,0.00),COALESCE(sales_amnt,0.00),CASE WHEN trans_amnt_total IS NULL THEN 0.00 ELSE ROUND(trans_amnt_total/sales_amnt,2) END AS sales_rate\nFROM (\n SELECT org_id,sales_amnt FROM edw.sales_goal_month where sales_year= %s and sales_month=%s) a LEFT JOIN edw.org_organization b\n ON a.org_id=b.org_id LEFT JOIN \n (SELECT subbranch_id,SUM(capital) AS trans_amnt_total FROM edw.trans_detail \n WHERE business_code='00' and status='0' AND cfm_date >= %s AND cfm_date < %s\n GROUP BY subbranch_id) c\n ON a.org_id=c.subbranch_id\nWHERE b.org_type='b' AND b.echelon IS NOT NULL\n'''\n\ndef insert_month(conn, year_month_id, year_month_next):\n '''插入指定月底时点分公司业绩状况数据'''\n d0 = datetime.datetime.now()\n year = year_month_id[0:4]\n month = year_month_id[5:7]\n year_month_str = year_month_id\n year_month = year + month\n year_month_next = year_month_next[0:4] + year_month_next[5:7]\n param_val = (\n year,month,year_month_str,year,month,\n year_month+'01',year_month_next+'01',\n )\n with conn.cursor() as cur:\n logger.debug(formatSql(sql_insert)%param_val)\n logger.debug(\"... param value: \" + str(param_val))\n cur.execute(formatSql(sql_insert), param_val)\n d1 = datetime.datetime.now()\n logger.info(\"月底时点分公司业绩状况完成. 月: %s; 耗时: %s 秒. \" % (year_month_id, (d1-d0).seconds))\n\n# 查询最近的年月\nsql_max_yearmonth = '''\nSELECT MAX(year_month_id) FROM edw.rpt_salerate_subbranch_month\n'''\n\ndef get_startDate(conn):\n '''确定开始日期'''\n with conn.cursor() as cur:\n logger.debug(formatSql(sql_max_yearmonth))\n cur.execute(formatSql(sql_max_yearmonth))\n rec = cur.fetchone()\n if rec and rec[0]:\n return rec[0]\n else:\n return '2012-01'\n\n# 删除指定月的数据\nsql_del = '''\nDELETE FROM edw.rpt_salerate_subbranch_month WHERE year_month_id=%s\n'''\n\ndef del_month(conn, year_month_id):\n '''删除指定月的数据'''\n d0 = datetime.datetime.now()\n with conn.cursor() as cur:\n logger.debug(formatSql(sql_del))\n logger.debug(\"... param value: \" + year_month_id)\n cur.execute(formatSql(sql_del), (year_month_id,))\n d1 = datetime.datetime.now()\n logger.info(\"删除数据完成, 月: %s, 耗时: %s 秒. \" % (year_month_id, (d1 - d0).seconds))\n\ndef deal(sys_date, batch_id, data_date):\n '''\n 月底时点分公司业绩状况统计\n '''\n d0 = datetime.datetime.now()\n logger.info('start...月底时点分公司业绩状况统计')\n \n pgUtils = PGUtils()\n conn = pgUtils.getConnection()\n try:\n with conn:\n startDate = get_startDate(conn)\n endDate = data_date[0:4] + '-' + data_date[4:6]\n logger.info(\"月范围: [%s, %s]\" % (startDate, endDate))\n month_ids = get_month_range()\n ilen = len(month_ids)\n for i in range(0, ilen):\n year_month_id = month_ids[i]\n year_month_next = month_ids[i+1]\n if year_month_id >= startDate and year_month_id <= endDate:\n del_month(conn, year_month_id)\n insert_month(conn, year_month_id, year_month_next)\n elif year_month_id > endDate:\n break\n d1 = datetime.datetime.now()\n log_content = \"月底时点分公司业绩状况统计完成.月范围: [%s, %s].\" % (startDate, endDate)\n insertlog(conn, sys_date, batch_id, SYS_LOG_TYPE_REPORT, SYS_LOG_RESULT_OK, log_content, d0, d1, (d1-d0).seconds)\n logger.info(\"end... 月底时点分公司业绩状况统计完成. 耗时: %s 秒. \" % (d1-d0).seconds)\n finally:\n conn.close()\n return True","sub_path":"etl/datadeal/scripts/rptmonth/rpt_salerate_subbranch_month.py","file_name":"rpt_salerate_subbranch_month.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111247574","text":"from markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nimport re\n\n\nclass ImagePathModifyTreeprocessor(Treeprocessor):\n def run(self, root):\n for img in root.getiterator('img'):\n img.set('src', re.sub(r'\\.\\.(/)', r'\\1', img.get('src')))\n\n\nclass ImagePathModifyExtension(Extension):\n def extendMarkdown(self, md):\n md.treeprocessors.register(\n ImagePathModifyTreeprocessor(md),\n 'image_path_modify',\n 15,\n )\n\n\ndef makeExtension(**kwargs):\n return ImagePathModifyExtension(**kwargs)\n","sub_path":"extensions/markdown/image_path_modify.py","file_name":"image_path_modify.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"505879840","text":"'''\nEasy balance checking\n\nYou are given a (small) check book as a - sometimes - cluttered (by non-alphanumeric characters) string:\n\n\"1000.00\n125 Market 125.45\n126 Hardware 34.95\n127 Video 7.45\n128 Book 14.32\n129 Gasoline 16.10\"\nThe first line shows the original balance. Each other line (when not blank) gives information: check number, category, check amount.\n\nFirst you have to clean the lines keeping only letters, digits, dots and spaces.\n\nThen return a report as a string (underscores show spaces -- don't put them in your solution. They are there so you can see them and how many of them you need):\n\n\"Original_Balance:_1000.00\n125_Market_125.45_Balance_874.55\n126_Hardware_34.95_Balance_839.60\n127_Video_7.45_Balance_832.15\n128_Book_14.32_Balance_817.83\n129_Gasoline_16.10_Balance_801.73\nTotal_expense__198.27\nAverage_expense__39.65\"\nOn each line of the report you have to add the new balance and then in the last two lines the total expense and the average expense. So as not to have a too long result string we don't ask for a properly formatted result.\n'''\n\nimport re\nfrom decimal import Decimal\n\ndef balance(book):\n ## clean white space, then create list of entries in the checkbook\n\n book = re.sub(\"/(\\r\\n)(\\r\\n)+|\\r\\r+|\\n\\n+|\\t\\t+/i\",\"\\n\",book.lstrip().rstrip()) \n entries = book.splitlines()\n\n ## extract balance, then initialize result and expense (to 0.00)\n\n balance = Decimal((re.search(r'\\d+\\.?\\d?\\d?',book).group(0))).quantize(Decimal('0.01'))\n result = \"Original Balance: {}\\r\\n\".format(balance)\n expense = Decimal(0.00)\n\n ## iterate through the entries, extract everything up to price, then price\n ## then calculate balance and expenses and rewrite each entry\n \n for i in range(1,len(entries)):\n product = re.findall(r'\\d\\d\\d\\s.*\\s',entries[i])[0] \n price = Decimal(re.findall(r'\\d+\\.\\d\\d?',entries[i])[0]).quantize(Decimal('.01')) \n balance = Decimal(balance - price).quantize(Decimal('.01')) \n expense = Decimal(expense + price).quantize(Decimal('.01'))\n entries[i] = product + str(price) + ' Balance ' + str(balance) \n entries[i] = re.sub(r'[!@#$%^&*()_+{}:\"<>\\?\\-\\=\\;]','',entries[i])\n\n ## calculate avg expense, then prepare result (from entries) and return\n\n avg_expense = round(expense/(len(entries)-1),2) \n result = result + ('\\r\\n'.join(entries[1:])) + '\\r\\nTotal expense {}\\r\\n'.format(expense) + 'Average expense {}'.format(avg_expense)\n return result\n \nb1 = \"\"\"\n\n\n1000 !=\n125 Market !=:125.45\n\n\n\n126 Hardware =34.95\n127 Video! 7.45\n128 Book :14.32\n\n129 Gasoline ::16.10\n\"\"\"\nprint(balance(b1))","sub_path":"6kyu/6kyu_balance.py","file_name":"6kyu_balance.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345381047","text":"import os\r\n\r\nfrom django.db import models\r\n\r\nfrom actividades.models import Tipo_Actividad, Actividad\r\nfrom presupuesto.models import Categoria, Movimiento\r\nfrom proyectos_sociales.models import Tipo_Proyecto, Proyecto\r\nfrom .models import *\r\n# Create your models here.\r\nfrom django.utils.safestring import mark_safe\r\nfrom matplotlib import pyplot\r\nfrom os import remove\r\n\r\nclass Informes(models.Model):\r\n\r\n\r\n def eventosInforme(self):\r\n return mark_safe('Informe General de Actividades')\r\n\r\n def eventosPendientesInforme(self):\r\n return mark_safe(\r\n 'Informe de Actividades Pendientes')\r\n\r\n def eventosRealizadosInforme(self):\r\n return mark_safe(\r\n 'Informe de Actividades Realizadas')\r\n\r\n class Meta:\r\n db_table = 'informe_general'\r\n verbose_name= 'Informes_generales'\r\n verbose_name_plural= 'Informes de Eventos'\r\n\r\nclass InformePastores(models.Model):\r\n\r\n def pastoresInforme(self):\r\n return mark_safe('Informe de Pastores')\r\n\r\n class Meta:\r\n db_table = 'informe_pastor'\r\n verbose_name= 'Informe_pastores'\r\n verbose_name_plural= 'Informe Pastores'\r\n\r\n\r\nclass ProyectoPorRango(models.Model):\r\n nombre='Seleccione un rango de fechas'\r\n fecha_inicio = models.DateField('Fecha De Inicio')\r\n fecha_limite = models.DateField('Fecha De Limite')\r\n\r\n def proyectoInforme(self):\r\n return mark_safe('Proyectos por rango')\r\n\r\n def save(self, *args, **kwargs):\r\n\r\n # now = datetime.now()\r\n tipos = Tipo_Proyecto.objects.all()\r\n proyecto = Proyecto.objects.all()\r\n motivo = []\r\n slices = []\r\n colores = ('blue', 'green', 'red', '#30f8ff', '#59ff30', '#e8f00e', '#ff5405')\r\n contador = 0\r\n suma = 0\r\n\r\n for elemento in tipos:\r\n motivo.append(elemento.nombre)\r\n\r\n for e in proyecto:\r\n var1 = elemento.nombre\r\n var2 = e.tipo_proyecto.nombre\r\n if (var2 == var1 and e.fecha >= self.fecha_inicio and e.fecha <= self.fecha_limite):\r\n suma += e.donaciones\r\n print(suma)\r\n slices.append(suma)\r\n suma = 0\r\n print(slices)\r\n print(motivo)\r\n\r\n pyplot.rcParams['toolbar']\r\n _, _, texto = pyplot.pie(slices, labels=slices, colors=colores, autopct='%1.1f%%')\r\n\r\n for tex in texto:\r\n tex.set_color('white')\r\n\r\n pyplot.legend(labels=motivo)\r\n pyplot.axis('equal')\r\n pyplot.title('Proyectos Realizados')\r\n pyplot.savefig('media/graficas/rangoproyecto.png')\r\n pyplot.clf()\r\n super(ProyectoPorRango, self).save(*args, **kwargs)\r\n\r\n class Meta():\r\n db_table = 'proyecto_por_rango'\r\n verbose_name = 'Proyecto Por Rango de Fecha'\r\n verbose_name_plural = 'Proyectos Por Rango de Fecha'\r\n\r\n\r\nclass InformeTipoProyecto(models.Model):\r\n nombre = 'Seleccione un rango de fechas y un tipo de proyecto'\r\n fecha_inicio = models.DateField('Fecha De Inicio')\r\n fecha_limite = models.DateField('Fecha De Limite')\r\n tipo = models.ForeignKey(Tipo_Proyecto, on_delete=models.CASCADE)\r\n\r\n def proyectoInforme(self):\r\n return mark_safe('Proyectos por tipo')\r\n\r\n def save(self, *args, **kwargs):\r\n\r\n proyecto = Proyecto.objects.all()\r\n motivo = []\r\n slices = []\r\n colores = ('blue', 'green', 'red', '#30f8ff', '#59ff30', '#e8f00e', '#ff5405')\r\n contador = 0\r\n suma = 0\r\n meses = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\r\n inicio = self.fecha_inicio.month\r\n fin = self.fecha_limite.month\r\n print(inicio)\r\n print(fin)\r\n\r\n for e in meses:\r\n if (e >= inicio and e <= fin):\r\n if e == 1:\r\n motivo.append('Enero')\r\n if e == 2:\r\n motivo.append('Febrero')\r\n if e == 3:\r\n motivo.append('Marzo')\r\n if e == 4:\r\n motivo.append('Abril')\r\n if e == 5:\r\n motivo.append('Mayo')\r\n if e == 6:\r\n motivo.append('Junio')\r\n if e == 7:\r\n motivo.append('Julio')\r\n if e == 8:\r\n motivo.append('Agosto')\r\n if e == 9:\r\n motivo.append('Septiembre')\r\n if e == 10:\r\n motivo.append('Octubre')\r\n if e == 11:\r\n motivo.append('Noviembre')\r\n if e == 12:\r\n motivo.append('Dicimbre')\r\n\r\n for elemento in proyecto:\r\n if (self.tipo.nombre == elemento.tipo_proyecto.nombre and elemento.fecha.month == e):\r\n contador += elemento.donaciones\r\n slices.append(contador)\r\n contador = 0\r\n\r\n pyplot.rcParams['toolbar']\r\n _, _, texto = pyplot.pie(slices, labels=slices, colors=colores, autopct='%1.1f%%')\r\n for tex in texto:\r\n tex.set_color('white')\r\n\r\n pyplot.legend(labels=motivo)\r\n pyplot.axis('equal')\r\n pyplot.title(self.tipo.nombre)\r\n pyplot.savefig('media/graficas/tipo_proyecto_rango.png')\r\n pyplot.clf()\r\n\r\n super(InformeTipoProyecto, self).save()\r\n\r\n class Meta():\r\n db_table = 'informe_por_tipo'\r\n verbose_name = 'Informe Por Tipo'\r\n verbose_name_plural = 'Informes Tipo'\r\n\r\nclass ActividadPorRango(models.Model):\r\n nombre='Seleccione un rango de fechas'\r\n fecha_inicio = models.DateField('Fecha De Inicio')\r\n fecha_limite = models.DateField('Fecha De Limite')\r\n\r\n def actividadInforme(self):\r\n return mark_safe('Actividades por rango')\r\n\r\n def save(self, *args, **kwargs):\r\n\r\n # now = datetime.now()\r\n tipos = Tipo_Actividad.objects.all()\r\n proyecto = Actividad.objects.all()\r\n motivo = []\r\n slices = []\r\n colores = ('blue', 'green', 'red', '#30f8ff', '#59ff30', '#e8f00e', '#ff5405')\r\n contador = 0\r\n suma = 0\r\n\r\n for elemento in tipos:\r\n motivo.append(elemento.nombre)\r\n\r\n for e in proyecto:\r\n var1 = elemento.nombre\r\n var2 = e.tipo_actividad.nombre\r\n if (var2 == var1 and e.fecha >= self.fecha_inicio and e.fecha <= self.fecha_limite):\r\n contador += 1\r\n print(contador)\r\n slices.append(contador)\r\n contador = 0\r\n print(slices)\r\n print(motivo)\r\n\r\n pyplot.rcParams['toolbar']\r\n _, _, texto = pyplot.pie(slices, labels=slices, colors=colores, autopct='%1.1f%%')\r\n\r\n for tex in texto:\r\n tex.set_color('white')\r\n\r\n pyplot.legend(labels=motivo)\r\n pyplot.axis('equal')\r\n pyplot.title('Grafica de Actividades Realizadas')\r\n pyplot.savefig('media/graficas/rangoactividad.png')\r\n pyplot.clf()\r\n super(ActividadPorRango, self).save(*args, **kwargs)\r\n\r\n class Meta():\r\n db_table = 'actividad_por_rango'\r\n verbose_name = 'Actividad Por Rango de Fecha'\r\n verbose_name_plural = 'Actividades Por Rango de Fecha'\r\n\r\nclass PresupuestoPorRango(models.Model):\r\n nombre='Seleccione un rango de fechas'\r\n fecha_inicio = models.DateField('Fecha De Inicio')\r\n fecha_limite = models.DateField('Fecha De Limite')\r\n gasto=models.FloatField(default=0.00)\r\n\r\n def presupuestoInforme(self):\r\n return mark_safe('Presupuesto por rango')\r\n\r\n def save(self, *args, **kwargs):\r\n\r\n # now = datetime.now()\r\n tipos = Categoria.objects.all()\r\n mov = Movimiento.objects.all()\r\n motivo = [' ']\r\n slices = []\r\n colores = ('white', 'green', 'red', '#30f8ff', '#59ff30', '#e8f00e', '#ff5405')\r\n contador = 0\r\n suma = 0\r\n total=0\r\n for elemento in tipos:\r\n if elemento.tipo != \"Ingreso\":\r\n motivo.append(elemento.nombre)\r\n for e in mov:\r\n var1 = elemento.nombre\r\n var2 = e.motivo.nombre\r\n if (var2 == var1 and e.fecha >= self.fecha_inicio and e.fecha <= self.fecha_limite):\r\n if e.tipo != \"Ingreso\":\r\n suma += (e.cantidad)\r\n total+=(suma)\r\n slices.append(suma)\r\n suma = 0\r\n self.gasto=total\r\n print(slices)\r\n print(motivo)\r\n print(total)\r\n pyplot.rcParams['toolbar']\r\n _, _, texto = pyplot.pie(slices, labels=slices, colors=colores, autopct='%1.1f%%')\r\n\r\n for tex in texto:\r\n tex.set_color('white')\r\n\r\n pyplot.legend(labels=motivo)\r\n pyplot.axis('equal')\r\n pyplot.title('Grafica de Presupuesto')\r\n pyplot.savefig('media/graficas/rangopresupuesto.png')\r\n pyplot.clf()\r\n super(PresupuestoPorRango, self).save(*args, **kwargs)\r\n\r\n class Meta():\r\n db_table = 'presupuesto_por_rango'\r\n verbose_name = 'Presupuesto Por Rango de Fecha'\r\n verbose_name_plural = 'Presupuesto Por Rango de Fecha'","sub_path":"informes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484780718","text":"from PyQt5 import QtWidgets\n\nfrom point_spectra_gui.ui.RenameData import Ui_Form\nfrom point_spectra_gui.util.Modules import Modules\n\n\nclass RenameData(Ui_Form, Modules):\n def setupUi(self, Form):\n super().setupUi(Form)\n Modules.setupUi(self, Form)\n\n def get_widget(self):\n return self.groupBox\n\n def rename_data(self):\n new_data_name = self.toDataLineEdit.text()\n old_data_name = self.renameDataComboBox.currentText()\n if new_data_name != '':\n data_index = [i for i,x in enumerate(self.datakeys) if x==old_data_name][0] #assumes only one data set with the old name...\n self.list_amend(self.datakeys,data_index,new_data_name)\n self.data[new_data_name] = self.data[old_data_name]\n del self.data[old_data_name]\n\n def connectWidgets(self):\n self.setComboBox(self.renameDataComboBox, self.datakeys)\n\n def run(self):\n self.rename_data()\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n\n Form = QtWidgets.QWidget()\n ui = RenameData()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n","sub_path":"point_spectra_gui/core/RenameData.py","file_name":"RenameData.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405375288","text":"import datetime\nimport django\nfrom django.forms.models import model_to_dict\nimport sys\nimport time\nimport tweepy\nimport os\nimport requests\n\n# need to point Django at the right settings to access pieces of app\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"twotebotapi.settings\"\ndjango.setup()\n\nfrom twotebotapp.models import Tweets, AppConfig\nfrom twotebotapp.serializers import TweetSerializer\nfrom twotebotapp.secrets import listener, sender \n\n\nclass TwitterBot:\n \"\"\"\n Class that helps run the actions in an end to end test of the twitter bot.\n \"\"\"\n def __init__(self, account):\n # tw_api is access point to all Tweepy methods\n self.tw_api = self.setup_tw_api(account)\n self.user_info = self.get_user_info()\n\n def setup_tw_api(self, account):\n auth = tweepy.OAuthHandler(account[\"CONSUMER_KEY\"], account[\"CONSUMER_SECRET\"])\n auth.set_access_token(account[\"ACCESS_TOKEN\"], account[\"ACCESS_TOKEN_SECRET\"])\n return tweepy.API(auth)\n\n def get_user_info(self):\n user_obj = self.tw_api.me()\n\n result = {}\n result[\"id\"] = user_obj._json[\"id\"]\n result[\"screen_name\"] = user_obj._json[\"screen_name\"]\n return result\n\n def get_tweets(self):\n return self.tw_api.user_timeline(self.user_info[\"id\"])\n\n def clean_tweets(self):\n \"\"\"\n Get 20 most recent tweets from user and delete all.\n \"\"\"\n tweets = self.tw_api.user_timeline(self.user_info[\"id\"])\n tweet_ids = [status._json[\"id\"] for status in tweets]\n\n for tw_id in tweet_ids:\n self.tw_api.destroy_status(tw_id)\n\n\ndef test_correct_keyword_no_time_room(l_bot, s_bot, keyword):\n \"\"\"\n Send a tweet with keyword stream is listening for, but not including \n a time and room. Should not get @ mention.\n \"\"\"\n # user sends a tweet containing the correct keyword but not \n s_tweet = \"test 1: {}\".format(keyword)\n s_bot.tw_api.update_status(s_tweet)\n time.sleep(5)\n\n # no action should be taken by l_bot, checking that no retweets sent\n l_tweets = l_bot.get_tweets()\n assert (len(l_tweets) == 0), \"tweet where there shouldn't be\"\n\n s_bot.clean_tweets()\n\ndef test_correct_keyword_with_room_time(l_bot, s_bot, keyword):\n \"\"\"\n Send a tweet with room and time that should get @ mention. \n \"\"\"\n s_tweet = \"test 2: {} @ 6pm room H112\".format(keyword)\n s_bot.tw_api.update_status(s_tweet)\n time.sleep(5)\n\n l_tweets = l_bot.get_tweets()\n assert (len(l_tweets) == 1),\"one tweet expected: {} found\".format(len(l_tweets))\n\n mention = \"@{}\".format(s_bot.user_info[\"screen_name\"])\n assert (mention in l_tweets[0]._json[\"text\"]), \"correct user not mentioned\"\n\n s_bot.clean_tweets()\n l_bot.clean_tweets()\n\ndef test_adding_bot_to_ignore_list_works_as_expected(l_bot, s_bot, keyword):\n \"\"\"\n Update ignore list with sending bot's id and then send tweet that should \n be retweeted, test that listener correctly ignores tweet. \n \"\"\"\n # create a new AppConifg model instance with s_bot id in ignore_users\n # app_config should still be set to auto_send: false\n start_conf = model_to_dict(AppConfig.objects.latest(\"id\"))\n test_conf = {\n \"auto_send\": start_conf[\"auto_send\"],\n \"default_send_interval\": start_conf[\"default_send_interval\"],\n \"ignore_users\": [s_bot.user_info[\"id\"]]\n }\n AppConfig.objects.create(**test_conf)\n\n # send a tweet from s_bot that should get an @ mention\n s_tweet = \"test 3: {} @ 6pm room H112\".format(keyword)\n s_bot.tw_api.update_status(s_tweet)\n time.sleep(5)\n\n # check that tweet wasn't sent form listen bot\n l_tweets = l_bot.get_tweets()\n assert (len(l_tweets) == 0), \"tweet where there shouldn't be\"\n\n # set AppConfig latest record back to starting state with only id changed\n test_conf[\"ignore_users\"] = []\n AppConfig.objects.create(**test_conf)\n\n # clean twitter accounts \n s_bot.clean_tweets()\n l_bot.clean_tweets()\n\ndef test_valid_tweet_causes_bot_to_send_retweet_about_event(l_bot, s_bot, keyword):\n \"\"\"\n This test requires the celery and rabbitmq to be running in the background\n and will send a retweet after the delay time. \n \"\"\"\n # turn auto_send on, this will cause a valid tweet to be retweeted\n # a minute after it is recived. \n test_conf = {\n \"auto_send\": 1,\n \"default_send_interval\": 1,\n \"ignore_users\": []\n }\n AppConfig.objects.create(**test_conf)\n\n # send a tweet from s_bot that should get an @ mention and retweet 1 min later\n s_tweet = \"test 4: {} @ 6pm room H112\".format(keyword)\n s_bot.tw_api.update_status(s_tweet)\n print(\"sleeping to wait for retweet\")\n time.sleep(80)\n\n # check that two tweets have been sent from bot's account\n l_tweets = l_bot.get_tweets()\n assert (len(l_tweets) == 2),\"two tweet expected: {} found\".format(len(l_tweets))\n\n s_bot.clean_tweets()\n l_bot.clean_tweets()\n\ndef interface(keyword):\n \"\"\"\n In order for these tests to run correctly they will clear the test\n twitter accounts of all messages at the start and inbetween tests.\n\n The twitter bot must be running and listening for a unique keyword.\n This keyword should be passed in as a command line arg to this script.\n\n Celery and Rabbitmq should also be running as normal. \n\n The bot maintains the state of ignored users so it must be \n restarted if running this script more than once. \n \"\"\"\n listen_bot = TwitterBot(listener)\n send_bot = TwitterBot(sender)\n\n # clean both twitter accounts\n listen_bot.clean_tweets()\n send_bot.clean_tweets()\n\n # setup latest Appconfig object to not auto_send tweets unitl needed\n test_conf = {\n \"auto_send\": 0,\n \"default_send_interval\": 1,\n \"ignore_users\": []\n }\n AppConfig.objects.create(**test_conf)\n\n #test 1:\n test_correct_keyword_no_time_room(listen_bot, send_bot, keyword)\n\n #test 2: \n test_correct_keyword_with_room_time(listen_bot, send_bot, keyword)\n\n #test 3:\n test_adding_bot_to_ignore_list_works_as_expected(listen_bot, send_bot, keyword)\n\n #test 4: \n test_valid_tweet_causes_bot_to_send_retweet_about_event(listen_bot, send_bot, keyword)\n\ndef cli_interface():\n \"\"\"\n wrapper_cli method that interfaces from commandline to function space\n call the script with: \n python end_to_end.py \n \"\"\"\n try:\n keyword = sys.argv[1]\n\n except:\n print(\"usage: {} \".format(sys.argv[0]))\n sys.exit(1)\n interface(keyword)\n\n\nif __name__ == \"__main__\":\n cli_interface()","sub_path":"twotebotapi/end_to_end.py","file_name":"end_to_end.py","file_ext":"py","file_size_in_byte":6680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146915398","text":"# --------------------------------------------------------------------------------------------\r\n# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License. See License.txt in the project root for license information.\r\n# --------------------------------------------------------------------------------------------\r\n# Generated file, DO NOT EDIT\r\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\r\n# --------------------------------------------------------------------------------------------\r\n\r\nfrom msrest.serialization import Model\r\n\r\n\r\nclass Package(Model):\r\n \"\"\"Package.\r\n\r\n :param _links:\r\n :type _links: :class:`ReferenceLinks `\r\n :param deleted_date: If and when the package was deleted\r\n :type deleted_date: datetime\r\n :param id:\r\n :type id: str\r\n :param name: The display name of the package\r\n :type name: str\r\n :param permanently_deleted_date: If and when the package was permanently deleted.\r\n :type permanently_deleted_date: datetime\r\n :param version: The version of the package\r\n :type version: str\r\n \"\"\"\r\n\r\n _attribute_map = {\r\n '_links': {'key': '_links', 'type': 'ReferenceLinks'},\r\n 'deleted_date': {'key': 'deletedDate', 'type': 'iso-8601'},\r\n 'id': {'key': 'id', 'type': 'str'},\r\n 'name': {'key': 'name', 'type': 'str'},\r\n 'permanently_deleted_date': {'key': 'permanentlyDeletedDate', 'type': 'iso-8601'},\r\n 'version': {'key': 'version', 'type': 'str'}\r\n }\r\n\r\n def __init__(self, _links=None, deleted_date=None, id=None, name=None, permanently_deleted_date=None, version=None):\r\n super(Package, self).__init__()\r\n self._links = _links\r\n self.deleted_date = deleted_date\r\n self.id = id\r\n self.name = name\r\n self.permanently_deleted_date = permanently_deleted_date\r\n self.version = version\r\n","sub_path":"flask_api/venv/lib/python3.7/site-packages/vsts/maven/v4_1/models/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554813725","text":"from flask import Flask, render_template, request, jsonify\nimport sqlite3\napp = Flask(__name__)\n\nconnection = sqlite3.connect('database.db')\ncursor = connection.cursor()\n\n@app.route('/')\ndef home():\n\treturn render_template('home.html')\n\n@app.route('/new', methods = ['GET'])\ndef new():\n\treturn render_template('new.html')\n\n@app.route('/movie', methods = ['POST'])\ndef movie():\t\n\tconnection = sqlite3.connect(\"database.db\")\n\tcursor = connection.cursor()\n\tif request.method == \"POST\":\n\t\ttry:\n\t\t\t# print(\"try block\")\n\t\t\ttitle = request.form[\"title\"]\n\t\t\t# print(title)\t\n\t\t\tquery = \"INSERT INTO movies values (\\\"{}\\\")\".format( title)\n\t\t\t# print (query)\n\t\t\tcursor.execute( 'INSERT INTO movies values(\\\"{}\\\")'.format( title) )\n\t\t\tconnection.commit()\n\t\t\tmessage = \"good\"\n\t\texcept:\n\t\t\tconnection.rollback()\n\t\t\tmessage = \"no good\"\n\t\tfinally:\t\t\n\t\t\tconnection.close()\n\t\treturn render_template('result.html', message = message)\n\n@app.route('/movies', methods = ['GET'])\ndef movies():\n\tconnection = sqlite3.connect(\"database.db\")\n\tcursor = connection.cursor()\n\tif request.method == \"GET\":\n\t\ttry:\n\t\t\tall_movies_cursor = cursor.execute('SELECT * FROM movies')\n\t\t\tresults = all_movies_cursor.fetchall()\n\t\texcept:\n\t\t\treturn \"something wrong\"\n\t\tfinally:\n\t\t\tconnection.close()\n\t\treturn jsonify( results)\n\n@app.route('/search', methods = ['GET'])\ndef search():\n\tprint( \"hit search\" )\n\ttitle = request.args.get(\"title\")\n\tprint( title )\n\tconnection = sqlite3.connect(\"database.db\")\n\tcursor = connection.cursor()\n\tif request.method == \"GET\":\n\t\ttry:\n\t\t\ttitle_query = \"SELECT * FROM movies WHERE title = \\\"{}\\\"\".format( title )\n\t\t\tquery_result_cursor = cursor.execute( title_query )\t\n\t\t\tprint( title_query )\n\t\t\tresult = query_result_cursor.fetchall()\t\n\t\t\tprint (result)\n\t\t\tmessage = \"found\"\n\t\texcept:\n\t\t\tmessage = \"something wrong\"\n\t\t\treturn message\n\t\tfinally:\n\t\t\tconnection.close()\n\treturn jsonify( result )\n","sub_path":"movie_server/movie_server.py","file_name":"movie_server.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613857056","text":"#!/usr/bin/env python\n\nimport sys\n\ntry:\n import urwid\nexcept ImportError as error:\n print >> sys.stderr, \"Failed to import urwid: %s\" % (error)\n sys.exit(1)\n\n\n# color palette\npalette = [\n ('titlebar', 'dark red', 'black'),\n ('body', 'white', 'default'),\n ('statusbar', 'white', 'dark blue')\n]\n\n\nclass TitleBar (urwid.WidgetWrap):\n \"\"\"titlebar widget\"\"\"\n\n def __init__(self, text):\n text_widget = urwid.Text(text)\n map_widget = urwid.AttrWrap(text_widget, 'titlebar')\n super(urwid.WidgetWrap, self).__init__(map_widget)\n\n\nclass Body (urwid.WidgetWrap):\n \"\"\"body widget\"\"\"\n\n def __init__(self, text):\n self.text_widget = urwid.Text(text)\n self._w = urwid.Filler(self.text_widget)\n self._w = urwid.AttrWrap(self._w, 'body')\n\n def update(self, text):\n self.text_widget.set_text(text)\n\n\nclass Prompt(urwid.Edit):\n \"\"\"prompt widget\"\"\"\n\n signals = ['update']\n\n def keypress(self, size, key):\n \"\"\"process prompt input\"\"\"\n\n # quit on esc\n if key == 'esc':\n sys.exit(0)\n\n # normal behavior as the input is an enter\n elif key != 'enter':\n urwid.Edit.keypress(self, size, key)\n\n # when we press enter, clear the prompt and send a signal to\n # update the body widget text\n else:\n text = self.get_edit_text()\n self.set_edit_text('')\n urwid.emit_signal(self, 'update', text)\n return\n\n\nclass Window (urwid.Frame):\n \"\"\"window widget\"\"\"\n\n pass\n\n\nif __name__ == '__main__':\n \"\"\"main\"\"\"\n\n titlebar = TitleBar('titlebar: enter text in prompt and press enter. ' +\n 'press esc to quit')\n\n body = Body('body')\n prompt = Prompt('> ')\n\n window = Window(body,\n header=titlebar,\n footer=prompt,\n focus_part='footer')\n\n # create the event loop\n loop = urwid.MainLoop(\n window,\n palette,\n unhandled_input=input,\n handle_mouse=False\n )\n\n # signals - the prompt box emits the signal from the keypress function and\n # the signal calls body's update method\n urwid.connect_signal(prompt, 'update', body.update)\n\n # start the event loop\n loop.run()\n","sub_path":"python/third-party/urwid/ui5.py","file_name":"ui5.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"438289477","text":"class No:\n def __init__(self, info=None, ante=None, prox=None):\n self.ante=ante\n self.prox=prox\n self.info=info\n\n\nclass ListaDuplamenteEncadeada:\n def __init__(self):\n self.cabeca = None\n self.rabo = None\n\n\n def inserir(self, info):\n\n novo = No(info, None, None)\n\n if(self.cabeca == None):\n self.cabeca = novo\n self.rabo = novo\n else:\n self.rabo.prox = novo\n novo.ante = self.rabo\n self.rabo = novo\n\n def remover(self, info):\n aux=self.cabeca\n\n while(aux != None and aux.info != info):\n aux = aux.prox\n\n return aux\n\n def exibir(self):\n aux = self.cabeca\n while(aux != None):\n print(aux.info, end=' ')\n aux=aux.prox\n\n\nif __name__ == '__main__':\n lista = ListaDuplamenteEncadeada()\n\n\n for i in [10,30,5,4,29]:\n lista.inserir(i)\n\n lista.exibir()\n","sub_path":"estrutura_dados_udemy/listas/ListaDuplamenteEncadeada.py","file_name":"ListaDuplamenteEncadeada.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95364652","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport re\nimport sys\nimport time\nimport json\nimport socket\nimport logging\nimport argparse\nfrom functools import partial\nfrom logging.handlers import RotatingFileHandler\nfrom datetime import datetime, timedelta, date\nfrom xml.sax.saxutils import escape, unescape\n\n#\n# default variables\n#\n__version__ = '1.2.12'\ndebug = False\ntoday = date.today()\nua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'\nreq_timeout = 5\nreq_sleep = 1\nloglevel = logging.DEBUG if debug else logging.INFO\n\n#\n# logging\n#\nlog = logging.getLogger(__name__)\nlog.setLevel(loglevel)\n\nlog_fmt = \"%(asctime)-15s %(levelname)-8s %(lineno)03d %(message)s\"\nformatter = logging.Formatter(log_fmt, datefmt='%Y/%m/%d %H:%M:%S')\n\n# logging to file\nfilehandler = RotatingFileHandler(\n __file__ + '.log',\n maxBytes=1024 * 1000, backupCount=10, encoding='utf-8'\n)\nfilehandler.setLevel(loglevel)\nfilehandler.setFormatter(formatter)\nlog.addHandler(filehandler)\n\n# logging to console, stderr by default\nconsolehandler = logging.StreamHandler()\nconsolehandler.setLevel(loglevel)\nconsolehandler.setFormatter(formatter)\nlog.addHandler(consolehandler)\n\n#\n# import third-parties\n#\ntry:\n from bs4 import BeautifulSoup, SoupStrainer\nexcept ImportError:\n log.error(\"BeautifulSoup 모듈이 설치되지 않았습니다.\")\n sys.exit(1)\ntry:\n import lxml\n htmlparser = 'lxml'\nexcept ImportError:\n log.warning(\"lxml 모듈이 설치되지 않아 html.parser로 동작합니다. 속도가 느립니다.\")\n htmlparser = 'html.parser'\ntry:\n import requests\nexcept ImportError:\n log.error(\"requests 모듈이 설치되지 않았습니다.\")\n sys.exit(1)\n\nif list(sys.version_info[:2]) < [3, 5]:\n log.error(\"python 3.5+에서 실행하세요.\")\n sys.exit(1)\n\n\n# Get epg data\ndef getEpg():\n # XML 헤더 시작\n print('')\n print('\\n')\n print('')\n\n # My Channel 정의\n if debug:\n MyChannelInfo = [str(ch) for ch in range(500)] # debug\n else:\n MyChannelInfo = [ch.strip() for ch in MyChannels.split(',') if ch]\n\n ChannelInfos = []\n for Channeldata in Channeldatajson: # Get Channel & Print Channel info\n if (Channeldata['Source'] in ['KT', 'LG', 'SK', 'SKB', 'NAVER']) and (str(Channeldata['Id']) in MyChannelInfo):\n ChannelId = Channeldata['Id']\n ChannelName = escape(Channeldata['Name'])\n ChannelSource = Channeldata['Source']\n ChannelServiceId = Channeldata['ServiceId']\n ChannelIconUrl = escape(Channeldata['Icon_url'])\n ChannelInfos.append([ChannelId, ChannelName, ChannelSource, ChannelServiceId])\n print(' ' % ChannelId)\n if MyISP != \"ALL\" and Channeldata[MyISP+'Ch'] is not None:\n ChannelNumber = str(Channeldata[MyISP+'Ch'])\n ChannelISPName = escape(Channeldata[MyISP+' Name'])\n print(' %s' % ChannelName)\n print(' %s' % ChannelISPName)\n print(' %s' % ChannelNumber)\n print(' %s' % (ChannelNumber+' '+ChannelISPName))\n elif MyISP == \"ALL\":\n print(' %s' % ChannelName)\n if IconUrl:\n print(' ' % (IconUrl, ChannelId))\n else:\n print(' ' % ChannelIconUrl)\n print(' ')\n\n # Print Program Information\n GetEPGFromKT([info for info in ChannelInfos if info[2] == 'KT'])\n GetEPGFromLG([info for info in ChannelInfos if info[2] == 'LG'])\n GetEPGFromSK([info for info in ChannelInfos if info[2] == 'SK'])\n GetEPGFromSKB([info for info in ChannelInfos if info[2] == 'SKB'])\n GetEPGFromNaver([info for info in ChannelInfos if info[2] == 'NAVER'])\n\n # 여기서부터는 기존의 채널 필터(My Channel)를 사용하지 않음\n GetEPGFromWAVVE([c for c in Channeldatajson if c['Source'] == 'POOQ' or c['Source'] == 'WAVVE'])\n\n print('')\n log.info('종료합니다.')\n\n\ndef GetEPGFromKT(ChannelInfos):\n if ChannelInfos:\n log.info('KT EPG 데이터를 가져오고 있습니다.')\n else:\n return\n\n url = 'https://tv.kt.com/tv/channel/pSchedule.asp'\n referer = 'https://tv.kt.com/'\n params = {\n 'ch_type': '1',\n 'view_type': '1',\n 'service_ch_no': 'SVCID',\n 'seldate': 'EPGDATE',\n }\n\n sess = requests.session()\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n\n for ChannelInfo in ChannelInfos:\n epginfo = []\n for k in range(period):\n day = today + timedelta(days=k)\n params.update({'service_ch_no': ChannelInfo[3], 'seldate': day.strftime('%Y%m%d')})\n try:\n response = sess.post(url, data=params, timeout=req_timeout)\n response.raise_for_status()\n soup = BeautifulSoup(response.text, htmlparser, parse_only=SoupStrainer('tbody'))\n html = soup.find_all('tr') if soup.find('tbody') else ''\n if html:\n for row in html:\n for cell in [row.find_all('td')]:\n startTime = endTime = programName = subprogramName = desc = actors = producers = category = episode = ''\n rebroadcast = False\n for minute, program, category in zip(cell[1].find_all('p'), cell[2].find_all('p'), cell[3].find_all('p')):\n startTime = str(day) + ' ' + cell[0].text.strip() + ':' + minute.text.strip()\n startTime = datetime.strptime(startTime, '%Y-%m-%d %H:%M')\n startTime = startTime.strftime('%Y%m%d%H%M%S')\n programName = program.text.replace('방송중 ', '').strip()\n category = category.text.strip()\n for image in [program.find_all('img', alt=True)]:\n grade = re.match('([\\d,]+)', image[0]['alt'])\n rating = int(grade.group(1)) if grade else 0\n # ChannelId, startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating\n epginfo.append([ChannelInfo[0], startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating])\n else:\n log.info('EPG 정보가 없거나 없는 채널입니다: %s' % ChannelInfo)\n # 오늘 없으면 내일도 없는 채널로 간주\n break\n except requests.exceptions.RequestException as e:\n log.error('요청 중 에러: %s: %s' % (ChannelInfo, str(e)))\n\n # req_sleep\n time.sleep(req_sleep)\n\n if epginfo:\n epgzip(epginfo)\n\n\ndef GetEPGFromLG(ChannelInfos):\n if ChannelInfos:\n log.info('LG EPG 데이터를 가져오고 있습니다.')\n else:\n return\n\n url = 'http://www.uplus.co.kr/css/chgi/chgi/RetrieveTvSchedule.hpi'\n referer = 'http://www.uplus.co.kr/css/chgi/chgi/RetrieveTvContentsMFamily.hpi'\n params = {'chnlCd': 'SVCID', 'evntCmpYmd': 'EPGDATE'}\n\n sess = requests.session()\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n\n for ChannelInfo in ChannelInfos:\n epginfo = []\n for k in range(period):\n day = today + timedelta(days=k)\n params.update({'chnlCd': ChannelInfo[3], 'evntCmpYmd': day.strftime('%Y%m%d')})\n try:\n response = sess.post(url, data=params, timeout=req_timeout)\n response.raise_for_status()\n data = response.text\n data = data.replace('<재>', '<재>').replace(' [..', '').replace(' (..', '')\n soup = BeautifulSoup(data, htmlparser, parse_only=SoupStrainer('table'))\n html = soup.find('table').tbody.find_all('tr') if soup.find('table') else ''\n if html:\n for row in html:\n for cell in [row.find_all('td')]:\n startTime = endTime = programName = subprogramName = desc = actors = producers = category = episode = ''\n rebroadcast = False\n startTime = str(day) + ' ' + cell[0].text\n startTime = datetime.strptime(startTime, '%Y-%m-%d %H:%M')\n startTime = startTime.strftime('%Y%m%d%H%M%S')\n rating_str = cell[1].find('span', {'class': 'tag cte_all'}).text.strip()\n rating = 0 if rating_str == 'All' else int(rating_str)\n cell[1].find('span', {'class': 'tagGroup'}).decompose()\n pattern = '(<재>)?\\s?(?:\\[.*?\\])?(.*?)(?:\\[(.*)\\])?\\s?(?:\\(([\\d,]+)회\\))?$'\n matches = re.match(pattern, cell[1].text.strip())\n if matches:\n programName = matches.group(2).strip() if matches.group(2) else ''\n subprogramName = matches.group(3).strip() if matches.group(3) else ''\n episode = matches.group(4) if matches.group(4) else ''\n rebroadcast = True if matches.group(1) else False\n category = cell[2].text.strip()\n # ChannelId, startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating\n epginfo.append([ChannelInfo[0], startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating])\n else:\n log.info('EPG 정보가 없거나 없는 채널입니다: %s' % ChannelInfo)\n # 오늘 없으면 내일도 없는 채널로 간주\n break\n except requests.exceptions.RequestException as e:\n log.error('요청 중 에러: %s: %s' % (ChannelInfo, str(e)))\n\n # req_sleep\n time.sleep(req_sleep)\n\n if epginfo:\n epgzip(epginfo)\n\n\ndef GetEPGFromSK(ChannelInfos):\n if ChannelInfos:\n log.info('SK EPG 데이터를 가져오고 있습니다.')\n else:\n return\n\n url = 'http://mapp.btvplus.co.kr/sideMenu/live/IFGetData.do'\n referer = 'http://mapp.btvplus.co.kr/channelFavor.do'\n icon_url = 'http://mapp.btvplus.co.kr/data/btvplus/admobd/channelLogo/nsepg_{}.png'\n\n sess = requests.session()\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n \n def request_json(form_data):\n ret = []\n try:\n response = sess.post(url, data=form_data, timeout=req_timeout)\n response.raise_for_status()\n data = response.json()\n if data['result'].lower() == 'ok':\n ret = data['ServiceInfoArray']\n else:\n log.error('유효한 응답이 아닙니다: %s' % data['reason'])\n except ValueError as e:\n log.error(str(e))\n except requests.exceptions.RequestException as e:\n log.error('요청 중 에러: %s' % str(e))\n return ret\n\n # dump all available channels to json\n json_fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Channel_SK.json')\n json_rawdata = request_json({'variable': 'IF_LIVECHART_ALL'})\n all_channels = [{'SK Name': x['NM_CH'], 'SKCh': int(x['NO_CH']), 'Icon_url': icon_url.format(x['ID_SVC']), 'Source': 'SK', 'ServiceId': x['ID_SVC']} for x in json_rawdata]\n notice = [{'last update': datetime.now().strftime('%Y/%m/%d %H:%M:%S'), 'total': len(all_channels)}]\n dump_json(json_fname, notice + all_channels)\n\n # remove unavailable channels in advance\n all_services = [x['ID_SVC'] for x in json_rawdata]\n newChannelInfos = []\n for ChannelInfo in ChannelInfos:\n ServiceId = ChannelInfo[3]\n if ServiceId in all_services:\n newChannelInfos.append(ChannelInfo)\n else:\n log.warning('없는 서비스 아이디입니다: %s', ChannelInfo)\n\n params = {\n 'variable': 'IF_LIVECHART_DETAIL',\n 'o_date': 'EPGDATE',\n 'svc_ids': '|'.join([info[3].strip() for info in newChannelInfos]),\n }\n\n for k in range(period):\n day = today + timedelta(days=k)\n params.update({'o_date': day.strftime('%Y%m%d')})\n channels = {x['ID_SVC']: x['EventInfoArray'] for x in request_json(params)}\n time.sleep(req_sleep) # request sleep\n\n for ChannelInfo in newChannelInfos:\n ServiceId = ChannelInfo[3]\n if ServiceId in channels:\n programs = channels[ServiceId]\n writeSKPrograms(ChannelInfo, programs)\n else:\n log.warning('해당 날짜에 EPG 정보가 없거나 없는 채널입니다: %s %s' % (day.strftime('%Y%m%d'), ChannelInfo))\n\n log.info('SK EPG 완료: {}/{}개 채널'.format(len(newChannelInfos), len(ChannelInfos)))\n\n\ndef GetEPGFromSKB(ChannelInfos):\n if ChannelInfos:\n log.info('SKB EPG 데이터를 가져오고 있습니다.')\n else:\n return\n\n def replacement(match, tag):\n if match:\n tag = tag.strip()\n programName = unescape(match.group(1)).replace('<', '<').replace('>', '>').strip()\n programName = '<' + tag + ' class=\"cont\">' + programName\n return programName\n else:\n return ''\n\n url = 'http://m.skbroadband.com/content/realtime/Channel_List.do'\n referer = 'http://m.skbroadband.com/content/realtime/Channel_List.do'\n params = {'key_depth2': 'SVCID', 'key_depth3': 'EPGDATE'}\n\n sess = requests.session()\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n\n for ChannelInfo in ChannelInfos:\n epginfo = []\n for k in range(period):\n day = today + timedelta(days=k)\n params.update({'key_depth2': ChannelInfo[3], 'key_depth3': day.strftime('%Y%m%d')})\n try:\n response = sess.get(url, params=params, timeout=req_timeout)\n response.raise_for_status()\n data = response.text\n data = re.sub('EUC-KR', 'utf-8', data)\n data = re.sub('', '', data, 0, re.I | re.S)\n data = re.sub('(.*?)', '', data)\n data = re.sub('(.*?)', '', data)\n data = re.sub('(.*?)', '', data)\n data = re.sub('(.*?)', '', data)\n data = re.sub('(.*?)', '', data)\n data = re.sub('(.*?)', '', data)\n data = re.sub('(.*?)', '', data)\n data = re.sub('프로그램 안내', '', data)\n data = re.sub('

    (.*)', partial(replacement, tag='p'), data)\n data = re.sub('

    (.*)', partial(replacement, tag='p'), data)\n strainer = SoupStrainer('div', {'id': 'uiScheduleTabContent'})\n soup = BeautifulSoup(data, htmlparser, parse_only=strainer)\n html = soup.find_all('li', {'class': 'list'}) if soup.find_all('li') else ''\n if html:\n for row in html:\n startTime = endTime = programName = subprogramName = desc = actors = producers = category = episode = ''\n rebroadcast = False\n rating = 0\n startTime = str(day) + ' ' + row.find('p', {'class': 'time'}).text\n startTime = datetime.strptime(startTime, '%Y-%m-%d %H:%M')\n startTime = startTime.strftime('%Y%m%d%H%M%S')\n cell = row.find('p', {'class': 'cont'})\n grade = row.find('i', {'class': 'hide'})\n if grade is not None:\n rating = int(grade.text.replace('세 이상', '').strip())\n\n if cell:\n if cell.find('span'):\n cell.span.decompose()\n cell = cell.text.strip()\n pattern = \"^(.*?)(\\(([\\d,]+)회\\))?(<(.*)>)?(\\((재)\\))?$\"\n matches = re.match(pattern, cell)\n\n if matches:\n programName = matches.group(1) if matches.group(1) else ''\n subprogramName = matches.group(5) if matches.group(5) else ''\n rebroadcast = True if matches.group(7) else False\n episode = matches.group(3) if matches.group(3) else ''\n\n # ChannelId, startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating\n epginfo.append([ChannelInfo[0], startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating])\n else:\n log.info('EPG 정보가 없거나 없는 채널입니다: %s' % ChannelInfo)\n # 오늘 ��으면 내일도 없는 채널로 간주\n break\n except requests.exceptions.RequestException as e:\n log.error('요청 중 에러: %s: %s' % (ChannelInfo, str(e)))\n\n # req_sleep\n time.sleep(req_sleep)\n\n if epginfo:\n epgzip(epginfo)\n\n\ndef GetEPGFromNaver(ChannelInfos):\n if ChannelInfos:\n log.info('NAVER EPG 데이터를 가져오고 있습니다.')\n else:\n return\n\n url = 'https://m.search.naver.com/p/csearch/content/nqapirender.nhn'\n referer = 'https://m.search.naver.com/search.naver?where=m&query=%ED%8E%B8%EC%84%B1%ED%91%9C'\n params = {\n 'callback': 'epg',\n 'key': 'SingleChannelDailySchedule',\n 'where': 'm',\n 'pkid': '66',\n 'u1': 'SVCID',\n 'u2': 'EPGDATE'\n }\n\n sess = requests.session()\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n\n for ChannelInfo in ChannelInfos:\n epginfo = []\n for k in range(period):\n day = today + timedelta(days=k)\n params.update({'u1': ChannelInfo[3], 'u2': day.strftime('%Y%m%d')})\n try:\n response = sess.get(url, params=params, timeout=req_timeout)\n response.raise_for_status()\n json_data = re.sub(re.compile(\"/\\*.*?\\*/\", re.DOTALL), \"\", response.text.split(\"epg(\")[1].strip(\");\").strip())\n try:\n data = json.loads(json_data, encoding='utf-8')\n if data['statusCode'].lower() != 'success':\n log.error('유효한 응답이 아닙니다: %s %s' % (ChannelInfo, data['statusCode']))\n break\n\n for ul in data['dataHtml']:\n strainer = SoupStrainer('ul', {'class': 'ind_list'})\n soup = BeautifulSoup(ul, htmlparser, parse_only=strainer)\n html = soup.find_all('li', {'class': 'list'}) if soup.find('ul', {'class': 'ind_list'}) else ''\n if html:\n for row in html:\n for cell in [row.find_all('div')]:\n startTime = endTime = programName = subprogramName = desc = actors = producers = category = episode = ''\n rating = 0\n programName = unescape(cell[4].text.strip())\n startTime = str(day) + ' ' + cell[1].text.strip()\n startTime = datetime.strptime(startTime, '%Y-%m-%d %H:%M')\n startTime = startTime.strftime('%Y%m%d%H%M%S')\n rebroadcast = True if cell[3].find('span', {'class': 're'}) else False\n try:\n subprogramName = cell[5].text.strip()\n except:\n subprogramName = ''\n epginfo.append([ChannelInfo[0], startTime, programName, subprogramName, desc, actors, producers, category, episode, rebroadcast, rating])\n else:\n log.info('EPG 정보가 없거나 없는 채널입니다: %s %s' % (day.strftime('%Y%m%d'), ChannelInfo))\n\n except ValueError as e:\n log.error(str(e))\n except requests.RequestException as e:\n log.error('요청 중 에러: %s: %s' % (ChannelInfo, str(e)))\n\n # req_sleep\n time.sleep(req_sleep)\n\n if epginfo:\n epgzip(epginfo)\n\n\ndef GetEPGFromWAVVE(reqChannels):\n if reqChannels:\n log.info('WAVVE EPG 데이터를 가져오고 있습니다.')\n else:\n return\n\n '''\n 개별채널: https://apis.pooq.co.kr/live/epgs/channels/{ServideId}\n 전체채널: https://apis.pooq.co.kr/live/epgs\n 정보량은 거의 비슷\n '''\n\n url = 'https://apis.pooq.co.kr/live/epgs'\n referer = 'https://www.wavve.com/schedule/index.html'\n params = {\n 'enddatetime': '2020-01-20 24:00',\n 'genre': 'all',\n 'limit': 100,\n 'offset': 0,\n 'startdatetime': '2020-01-20 21:00',\n 'apikey': 'E5F3E0D30947AA5440556471321BB6D9',\n 'credential': 'none',\n 'device': 'pc',\n 'drm': 'wm',\n 'partner': 'pooq',\n 'pooqzone': 'none',\n 'region': 'kor',\n 'targetage': 'auto',\n }\n\n sess = requests.session()\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n\n # update parameters for requests\n params.update({\n 'startdatetime': today.strftime('%Y-%m-%d') + ' 00:00',\n 'enddatetime': (today + timedelta(days=period-1)).strftime('%Y-%m-%d') + ' 24:00',\n })\n\n try:\n response = sess.get(url, params=params, timeout=req_timeout)\n response.raise_for_status()\n channellist = response.json()['list']\n channeldict = {x['channelid']: x for x in channellist}\n\n # dump all available channels to json\n json_fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Channel_WAVVE.json')\n all_channels = [{'WAVVE Name': x['channelname'], 'Icon_url': 'https://' + x['channelimage'], 'Source': 'WAVVE',\n 'ServiceId': x['channelid']} for x in channellist]\n notice = [{'last update': datetime.now().strftime('%Y/%m/%d %H:%M:%S'), 'total': len(all_channels)}]\n dump_json(json_fname, notice + all_channels)\n\n # remove unavailable channels in advance\n all_services = [x['channelid'] for x in channellist]\n tmpChannels = []\n for reqChannel in reqChannels:\n if reqChannel['ServiceId'] in all_services:\n tmpChannels.append(reqChannel)\n else:\n log.warning('없는 서비스 아이디입니다: %s', reqChannel)\n\n if debug:\n reqChannels = all_channels # request all channels\n else:\n reqChannels = tmpChannels\n\n # for caching program details\n programcache = {}\n\n for reqChannel in reqChannels:\n if 'ServiceId' in reqChannel and reqChannel['ServiceId'] in channeldict:\n # 채널이름은 그대로 들어오고 프로그램 제목은 escape되어 들어옴\n srcChannel = channeldict[reqChannel['ServiceId']]\n channelid = reqChannel['Id'] if 'Id' in reqChannel else 'pooq|%s' % srcChannel['channelid']\n channelname = reqChannel['Name'] if 'Name' in reqChannel else srcChannel['channelname'].strip()\n channelicon = reqChannel['Icon_url'] if 'Icon_url' in reqChannel else 'https://' + srcChannel['channelimage']\n # channelliveimg = \"https://wchimg.pooq.co.kr/pooqlive/thumbnail/%s.jpg\" % reqChannel['ServiceId']\n print(' ' % channelid)\n print(' ' % escape(channelicon))\n print(' %s' % escape(channelname))\n print(' ')\n\n for program in srcChannel['list']:\n startTime = endTime = programName = subprogramName = desc = ''\n actors = producers = category = episode = iconurl = ''\n rebroadcast = False\n startTime = datetime.strptime(program['starttime'], '%Y-%m-%d %H:%M').strftime('%Y%m%d%H%M%S')\n endTime = datetime.strptime(program['endtime'], '%Y-%m-%d %H:%M').strftime('%Y%m%d%H%M%S')\n\n # TODO: 제목 너무 지저분/부실하네\n # TODO: python3에서 re.match에 더 많이 잡힘. 왜?\n programName = unescape(program['title'])\n pattern = '^(.*?)(?:\\s*[\\(<]([\\d,회]+)[\\)>])?(?:\\s*<([^<]*?)>)?(\\((재)\\))?$'\n matches = re.match(pattern, programName)\n if matches:\n programName = matches.group(1).strip() if matches.group(1) else ''\n subprogramName = matches.group(3).strip() if matches.group(3) else ''\n episode = matches.group(2).replace('회', '') if matches.group(2) else ''\n episode = '' if episode == '0' else episode\n rebroadcast = True if matches.group(5) else False\n\n rating = 0 if program['targetage'] == 'n' else int(program['targetage'])\n\n # 추가 정보 가져오기\n programid = program['programid'].strip()\n if programid and (programid not in programcache):\n # 개별 programid가 없는 경우도 있으니 체크해야함\n programdetail = getWAVVEProgramDetails(programid, sess)\n if programdetail is not None:\n programdetail[u'hit'] = 0 # to know cache hit rate\n programcache[programid] = programdetail\n\n if (programid in programcache) and bool(programcache[programid]):\n programcache[programid][u'hit'] += 1\n programdetail = programcache[programid]\n # TODO: 추가 제목 정보 활용\n # programtitle = programdetail['programtitle']\n # log.info('%s / %s' % (programName, programtitle))\n desc = '\\n'.join([x.replace('
    ', '\\n').strip() for x in programdetail['programsynopsis'].splitlines()]) # carriage return(\\r) 제거,
    제거\n category = programdetail['genretext'].strip()\n iconurl = 'https://' + programdetail['programposterimage'].strip()\n # tags = programdetail['tags']['list'][0]['text']\n if programdetail['actors']['list']:\n actors = ','.join([x['text'] for x in programdetail['actors']['list']])\n\n writeProgram({\n 'channelId': channelid,\n 'startTime': startTime,\n 'endTime': endTime,\n 'programName': programName,\n 'subprogramName': subprogramName,\n 'desc': desc,\n 'actors': actors,\n 'producers': producers,\n 'category': category,\n 'episode': episode,\n 'rebroadcast': rebroadcast,\n 'rating': rating,\n 'iconurl': iconurl\n })\n else:\n log.info('EPG 정보가 없거나 없는 채널입니다: %s' % reqChannel)\n log.info('WAVVE EPG 완료: {}개 채널'.format(len(reqChannels)))\n except requests.exceptions.RequestException as e:\n log.error('요청 중 에러: %s' % str(e))\n except Exception as e:\n log.error('알 수 없는 에러: %s' % str(e))\n\n\ndef getWAVVEProgramDetails(programid, sess):\n url = 'https://apis.pooq.co.kr/vod/programs-contentid/' + programid\n referer = 'https://www.wavve.com/player/vod?programid=' + programid\n param = {\n \"apikey\": \"E5F3E0D30947AA5440556471321BB6D9\",\n \"credential\": \"none\",\n \"device\": \"pc\",\n \"drm\": \"wm\",\n \"partner\": \"pooq\",\n \"pooqzone\": \"none\",\n \"region\": \"kor\",\n \"targetage\": \"auto\"\n }\n sess.headers.update({'User-Agent': ua, 'Referer': referer})\n\n ret = None\n try:\n res = sess.get(url, params=param, timeout=req_timeout)\n res.raise_for_status()\n contentid = res.json()['contentid'].strip()\n\n time.sleep(req_sleep) # request sleep\n\n # url2 = 'https://apis.pooq.co.kr/cf/vod/contents/' + contentid\n url2 = 'https://apis.pooq.co.kr/vod/contents/' + contentid # 같은 주소지만 이게 더 안정적인듯\n res2 = sess.get(url2, params=param, timeout=req_timeout)\n res2.raise_for_status()\n ret = res2.json()\n except requests.exceptions.RequestException as e:\n log.error('요청 중 에러: %s' % str(e))\n except Exception as e:\n log.error('알 수 없는 에러: %s' % str(e))\n time.sleep(req_sleep) # request sleep\n return ret\n\n\ndef epgzip(epginfo):\n epginfo = iter(epginfo)\n epg1 = next(epginfo)\n for epg2 in epginfo:\n ChannelId = epg1[0]\n startTime = epg1[1] if epg1[1] else ''\n endTime = epg2[1] if epg2[1] else ''\n programName = epg1[2] if epg1[2] else ''\n subprogramName = epg1[3] if epg1[3] else ''\n desc = epg1[4] if epg1[4] else ''\n actors = epg1[5] if epg1[5] else ''\n producers = epg1[6] if epg1[6] else ''\n category = epg1[7] if epg1[7] else ''\n episode = epg1[8] if epg1[8] else ''\n rebroadcast = True if epg1[9] else False\n rating = int(epg1[10]) if epg1[10] else 0\n programdata = {\n 'channelId': ChannelId,\n 'startTime': startTime,\n 'endTime': endTime,\n 'programName': programName,\n 'subprogramName': subprogramName,\n 'desc': desc,\n 'actors': actors,\n 'producers': producers,\n 'category': category,\n 'episode': episode,\n 'rebroadcast': rebroadcast,\n 'rating': rating\n }\n writeProgram(programdata)\n epg1 = epg2\n\n\ndef writeProgram(programdata):\n ChannelId = programdata['channelId']\n startTime = programdata['startTime']\n endTime = programdata['endTime']\n programName = escape(programdata['programName']).strip()\n subprogramName = escape(programdata['subprogramName']).strip()\n matches = re.match('(.*) \\(?(\\d+부)\\)?', unescape(programName))\n if matches:\n programName = escape(matches.group(1)).strip()\n subprogramName = escape(matches.group(2)) + ' ' + subprogramName\n subprogramName = subprogramName.strip()\n if programName is None:\n programName = subprogramName\n actors = escape(programdata['actors'])\n producers = escape(programdata['producers'])\n category = escape(programdata['category'])\n episode = programdata['episode']\n if episode:\n try:\n episode_ns = int(episode) - 1\n except ValueError:\n episode_ns = int(episode.split(',', 1)[0]) - 1\n episode_ns = '0' + '.' + str(episode_ns) + '.' + '0' + '/' + '0'\n episode_on = episode\n rebroadcast = programdata['rebroadcast']\n if episode and addepisode == 'y':\n programName = programName + ' (' + str(episode) + '회)'\n if rebroadcast and (addrebroadcast == 'y'):\n programName = programName + ' (재)'\n if programdata['rating'] == 0:\n rating = '전체 관람가'\n else:\n rating = '%s세 이상 ���람가' % (programdata['rating'])\n if addverbose == 'y':\n desc = programName\n if subprogramName:\n desc += '\\n부제 : ' + subprogramName\n if rebroadcast and (addrebroadcast == 'y'):\n desc += '\\n방송 : 재방송'\n if episode:\n desc += '\\n회차 : ' + str(episode) + '회'\n if category:\n desc += '\\n장르 : ' + category\n if actors:\n desc += '\\n출연 : ' + actors.strip()\n if producers:\n desc += '\\n제작 : ' + producers.strip()\n desc += '\\n등급 : ' + rating\n else:\n desc = ''\n if programdata['desc']:\n desc += '\\n' + escape(programdata['desc'])\n desc = re.sub(' +', ' ', desc)\n contentTypeDict = {\n '교양': 'Arts / Culture (without music)',\n '만화': 'Cartoons / Puppets',\n '교육': 'Education / Science / Factual topics',\n '취미': 'Leisure hobbies',\n '드라마': 'Movie / Drama',\n '영화': 'Movie / Drama',\n '음악': 'Music / Ballet / Dance',\n '뉴스': 'News / Current affairs',\n '다큐': 'Documentary',\n '라이프': 'Documentary',\n '시사/다큐': 'Documentary',\n '연예': 'Show / Game show',\n '스포츠': 'Sports',\n '홈쇼핑': 'Advertisement / Shopping'\n }\n contentType = ''\n for key, value in contentTypeDict.items():\n if key in category:\n contentType = value\n print(' ' % (startTime, endTime, ChannelId))\n print(' %s' % programName)\n if subprogramName:\n print(' %s' % subprogramName)\n if addverbose == 'y':\n print(' %s' % desc)\n if actors or producers:\n print(' ')\n if actors:\n for actor in actors.split(','):\n if actor.strip():\n print(' %s' % actor.strip())\n if producers:\n for producer in producers.split(','):\n if producer.strip():\n print(' %s' % producer.strip())\n print(' ')\n if category:\n print(' %s' % category)\n if contentType:\n print(' %s' % contentType)\n if episode and addxmltvns == 'y':\n print(' %s' % episode_ns)\n if episode and addxmltvns != 'y':\n print(' %s' % episode_on)\n if rebroadcast:\n print(' ')\n if rating:\n print(' ')\n print(' %s' % rating)\n print(' ')\n if ('iconurl' in programdata) and programdata['iconurl']:\n print(' ' % escape(programdata['iconurl']))\n print(' ')\n\n\ndef writeSKPrograms(ChannelInfo, programs):\n genre_code = {\n '1': '드라마',\n '2': '영화',\n '4': '만화',\n '8': '스포츠',\n '9': '교육',\n '11': '홈쇼핑',\n '13': '예능',\n '14': '시사/다큐',\n '15': '음악',\n '16': '라이프',\n '17': '교양',\n '18': '뉴스',\n }\n for program in programs:\n startTime = endTime = programName = subprogramName = desc = actors = producers = category = episode = ''\n rebroadcast = False\n programName = program['NM_TITLE'].replace('...', '>')\n pattern = '^(.*?)(?:\\s*[\\(<]([\\d,회]+)[\\)>])?(?:\\s*<([^<]*?)>)?(\\((재)\\))?$'\n matches = re.match(pattern, programName)\n if matches:\n programName = matches.group(1).strip() if matches.group(1) else ''\n subprogramName = matches.group(3).strip() if matches.group(3) else ''\n episode = matches.group(2).replace('회', '') if matches.group(2) else ''\n episode = '' if episode == '0' else episode\n rebroadcast = True if matches.group(5) else False\n startTime = program['DT_EVNT_START']\n endTime = program['DT_EVNT_END']\n desc = program['NM_SYNOP'] if program['NM_SYNOP'] else ''\n if 'AdditionalInfoArray' in program:\n info_array = program['AdditionalInfoArray'][0]\n actors = info_array['NM_ACT'].replace('...', '').strip(', ') if info_array['NM_ACT'] else ''\n producers = info_array['NM_DIRECTOR'].replace('...', '').strip(', ') if info_array['NM_DIRECTOR'] else ''\n if program['CD_GENRE'] and (program['CD_GENRE'] in genre_code):\n category = genre_code[program['CD_GENRE']]\n else:\n category = ''\n rating = int(program['CD_RATING']) if program['CD_RATING'] else 0\n programdata = {\n 'channelId': ChannelInfo[0],\n 'startTime': startTime,\n 'endTime': endTime,\n 'programName': programName,\n 'subprogramName': subprogramName,\n 'desc': desc,\n 'actors': actors,\n 'producers': producers,\n 'category': category,\n 'episode': episode,\n 'rebroadcast': rebroadcast,\n 'rating': rating\n }\n writeProgram(programdata)\n\n\ndef load_json(file_path):\n try:\n with open(file_path, 'r', encoding='utf-8') as f:\n return json.load(f)\n except EnvironmentError as e:\n log.error(\"파일을 읽을 수 없습니다: %s\", str(e))\n sys.exit(1)\n except ValueError as e:\n log.error(\"파일 형식이 잘못되었습니다: %s\", str(e))\n sys.exit(1)\n\n\ndef dump_json(file_path, data):\n try:\n with open(file_path, 'w', encoding='utf-8') as f:\n json.dump(data, f, ensure_ascii=False, indent=4)\n except Exception as e:\n log.error(\"파일 저장 중 에러: %s\", str(e))\n\n\nChannelfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'Channel.json')\nChanneldatajson = load_json(Channelfile)\n\nSettingfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'epg2xml.json')\nSettings = load_json(Settingfile)\n\nMyISP = Settings['MyISP'] if 'MyISP' in Settings else 'ALL'\nMyChannels = Settings['MyChannels'] if 'MyChannels' in Settings else ''\ndefault_output = Settings['output'] if 'output' in Settings else 'd'\ndefault_xml_file = Settings['default_xml_file'] if 'default_xml_file' in Settings else 'xmltv.xml'\ndefault_xml_socket = Settings['default_xml_socket'] if 'default_xml_socket' in Settings else 'xmltv.sock'\ndefault_icon_url = Settings['default_icon_url'] if 'default_icon_url' in Settings else None\ndefault_fetch_limit = Settings['default_fetch_limit'] if 'default_fetch_limit' in Settings else '2'\ndefault_rebroadcast = Settings['default_rebroadcast'] if 'default_rebroadcast' in Settings else 'y'\ndefault_episode = Settings['default_episode'] if 'default_episode' in Settings else 'y'\ndefault_verbose = Settings['default_verbose'] if 'default_verbose' in Settings else 'n'\ndefault_xmltvns = Settings['default_xmltvns'] if 'default_xmltvns' in Settings else 'n'\n\nparser = argparse.ArgumentParser(description='EPG 정보를 출력하는 방법을 선택한다')\nargu1 = parser.add_argument_group(description='IPTV 선택')\nargu1.add_argument('-i', dest='MyISP', choices=['ALL', 'KT', 'LG', 'SK'], help='사용하는 IPTV : ALL, KT, LG, SK', default=MyISP)\nargu2 = parser.add_mutually_exclusive_group()\nargu2.add_argument('-v', '--version', action='version', version='%(prog)s version : ' + __version__)\nargu2.add_argument('-d', '--display', action='store_true', help='EPG 정보 화면출력')\nargu2.add_argument('-o', '--outfile', metavar=default_xml_file, nargs='?', const=default_xml_file, help='EPG 정보 저장')\nargu2.add_argument('-s', '--socket', metavar=default_xml_socket, nargs='?', const=default_xml_socket, help='xmltv.sock(External: XMLTV)로 EPG정보 전송')\nargu3 = parser.add_argument_group('추가옵션')\nargu3.add_argument('--icon', dest='icon', metavar=\"http://www.example.com/icon\", help='채널 아이콘 URL, 기본값: ' + default_icon_url, default=default_icon_url)\nargu3.add_argument('-l', '--limit', dest='limit', type=int, metavar=\"1-7\", choices=range(1, 8), help='EPG 정보를 가져올 기간, 기본값: ' + str(default_fetch_limit), default=default_fetch_limit)\nargu3.add_argument('--rebroadcast', dest='rebroadcast', metavar='y, n', choices='yn', help='제목에 재방송 정보 출력', default=default_rebroadcast)\nargu3.add_argument('--episode', dest='episode', metavar='y, n', choices='yn', help='제목에 회차 정보 출력', default=default_episode)\nargu3.add_argument('--verbose', dest='verbose', metavar='y, n', choices='yn', help='EPG 정보 추가 출력', default=default_verbose)\n\nargs = parser.parse_args()\nif args.MyISP:\n MyISP = args.MyISP\nif args.display:\n default_output = \"d\"\nelif args.outfile:\n default_output = \"o\"\n default_xml_file = args.outfile\nelif args.socket:\n default_output = \"s\"\n default_xml_socket = args.socket\nif args.icon:\n default_icon_url = args.icon\nif args.limit:\n default_fetch_limit = args.limit\nif args.rebroadcast:\n default_rebroadcast = args.rebroadcast\nif args.episode:\n default_episode = args.episode\nif args.verbose:\n default_verbose = args.verbose\n\nif MyISP:\n if not any(MyISP in s for s in ['ALL', 'KT', 'LG', 'SK']):\n log.error(\"MyISP는 ALL, KT, LG, SK만 가능합니다.\")\n sys.exit(1)\nelse:\n log.error(\"epg2xml.json 파일의 MyISP항목이 없습니다.\")\n sys.exit(1)\n\nif default_output:\n if any(default_output in s for s in ['d', 'o', 's']):\n if default_output == \"d\":\n output = \"display\"\n elif default_output == \"o\":\n output = \"file\"\n elif default_output == 's':\n output = \"socket\"\n else:\n log.error(\"default_output는 d, o, s만 가능합니다.\")\n sys.exit(1)\nelse:\n log.error(\"epg2xml.json 파일의 output항목이 없습니다.\")\n sys.exit(1)\n\nIconUrl = default_icon_url\n\nif default_rebroadcast:\n if not any(default_rebroadcast in s for s in ['y', 'n']):\n log.error(\"default_rebroadcast는 y, n만 가능합니다.\")\n sys.exit(1)\n else:\n addrebroadcast = default_rebroadcast\nelse:\n log.error(\"epg2xml.json 파일의 default_rebroadcast항목이 없습니다.\")\n sys.exit(1)\n\nif default_episode:\n if not any(default_episode in s for s in ['y', 'n']):\n log.error(\"default_episode는 y, n만 가능합니다.\")\n sys.exit(1)\n else:\n addepisode = default_episode\nelse:\n log.error(\"epg2xml.json 파일의 default_episode항목이 없습니다.\")\n sys.exit(1)\n\nif default_verbose:\n if not any(default_verbose in s for s in ['y', 'n']):\n log.error(\"default_verbose는 y, n만 가능합니다.\")\n sys.exit(1)\n else:\n addverbose = default_verbose\nelse:\n log.error(\"epg2xml.json 파일의 default_verbose항목이 없습니다.\")\n sys.exit(1)\n\nif default_xmltvns:\n if not any(default_xmltvns in s for s in ['y', 'n']):\n log.error(\"default_xmltvns는 y, n만 가능합니다.\")\n sys.exit(1)\n else:\n addxmltvns = default_xmltvns\nelse:\n log.error(\"epg2xml.json 파일의 default_verbose항목이 없습니다.\")\n sys.exit(1)\n\nif default_fetch_limit:\n if not any(str(default_fetch_limit) in s for s in ['1', '2', '3', '4', '5', '6', '7']):\n log.error(\"default_fetch_limit 는 1, 2, 3, 4, 5, 6, 7만 가능합니다.\")\n sys.exit(1)\n else:\n period = int(default_fetch_limit)\nelse:\n log.error(\"epg2xml.json 파일의 default_fetch_limit항목이 없습니다.\")\n sys.exit(1)\n\nif output == \"file\":\n if default_xml_file:\n sys.stdout = open(default_xml_file, 'w', encoding='utf-8')\n else:\n log.error(\"epg2xml.json 파일의 default_xml_file항목이 없습니다.\")\n sys.exit(1)\nelif output == \"socket\":\n if default_xml_socket:\n try:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(default_xml_socket)\n sockfile = sock.makefile('w')\n sys.stdout = sockfile\n except socket.error:\n sys.exit('xmltv.sock 파일을 찾을 수 없습니다.')\n else:\n log.error(\"epg2xml.json 파일의 default_xml_socket항목이 없습니다.\")\n sys.exit(1)\ngetEpg()\n","sub_path":"epg2xml.py","file_name":"epg2xml.py","file_ext":"py","file_size_in_byte":45505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"479933604","text":"import pandas as pd\nimport re\nfrom bs4 import BeautifulSoup\nimport json\nimport numpy as np\n\ndf = pd.read_json(\"netflix.json\")\n\np = re.compile(r'\\d+')\ndef parser(body):\n bs = BeautifulSoup(body,'html.parser')\n user_name = bs.find('span',class_='X43Kjb').text\n date = bs.find('span', class_='p2TkOb').text\n rating = bs.find('div', {'role': 'img'})['aria-label']\n rating = p.findall(rating)[-1]\n review_text = bs.find('span', {'jsname': 'bN97Pc'}).text\n return user_name, date, rating, review_text\n\ndf['user_name'], df['date'], df['rating'], df['review_text'] = zip(*df['body'].map(parser))\n\ndel df['body']\n\n\nhighrate= df[df['rating'] >'3']['review_text']\nlowrate= df[df['rating'] <='3']['review_text']\n\nhighrate.to_csv('highrate.csv')\nlowrate.to_csv('lowrate.csv')\n\n\n\n","sub_path":"data_Preprocessing.py","file_name":"data_Preprocessing.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"194289920","text":"from __future__ import absolute_import\n\nimport scadsui\nimport pytest\nfrom webtest import TestApp\n\n\n@pytest.yield_fixture\ndef app():\n app = scadsui.app\n ctx = app.test_request_context()\n ctx.push()\n yield app\n ctx.pop()\n\n\n@pytest.fixture\ndef testapp(app):\n return TestApp(app)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203600672","text":"from blog import views\nfrom django.urls import path\n\nurlpatterns = [\n path(\n \"publication//\",\n views.publication_page,\n name=\"publicationpage\",\n ),\n path(\"category//\", views.catg_page, name=\"catgpage\"),\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"65735906","text":"import os\nimport bs4\nfrom post import PostRequest\n\ndef get_content(switch_proxy): #获得网页内���\n if switch_proxy is 'y':\n return PostRequest(url, 1).getContent()\n if switch_proxy is 'n':\n return PostRequest(url).getContent()\n\ndef deal_content(content, num): # 处理网页内容\n \n soup = bs4.BeautifulSoup(content)\n tlistnames = soup.findAll('td', {'class': 'tlistname'})\n tlistdns = soup.findAll('td', {'class': 'tlistdn'})\n download_urls = soup.findAll('td', {'class': 'tlistdownload'})\n\n list_of_name = []\n\n for name, download, d_url in zip(tlistnames, tlistdns, download_urls):\n download = download.contents[0]\n name = name.a.contents[0]\n d_url = d_url.a['href']\n if int(download) >= num: #过滤下载数小于20000的\n list_of_name.append(download + \" \" + name + \" \" + d_url +'\\n')\n #if '2D.G.F.' in name: #过滤非2DJ\n #list_of_name.append(download + \" \" + name + \" \" + d_url +'\\n')\n return list_of_name\n\nif __name__ == '__main__':\n files_with_path = os.getcwd() + os.sep + 'sort_file_game.txt' #保存到txt文件 路径和名称\n #url = input(\"Please input the url: \") #http://www.nyaa.se/?page=search&cats=1_0&filter=2&offset=2\n #http://sukebei.nyaa.se/?user=102122&offset=\n switch_proxy = input('Please input y/n to enable or disable proxy: ')\n start_page = input('请输入开始页数:')\n offsets = input('请输入页数: ')\n num_filter = int(input('Please input num of filter: ')) #设置过滤数\n \n num_of_animes = 0\n global url\n \n for offset in range(int(start_page), int(offsets)+1, 1):\n #url = \"http://sukebei.nyaa.se/?cats=7_27&offset=\"\n url = \"http://sukebei.nyaa.se/?user=102122&offset=\" #mikocon\n #url = \"http://sukebei.nyaa.se/?user=57200&offset=\" #girlcelly\n page = '%d' % offset\n url += page\n print('打开的nyaa链接是:', url)\n \n content = get_content(switch_proxy)\n name_lists = deal_content(content, num_filter)\n num_of_animes += len(name_lists)\n with open(files_with_path, 'a+', encoding='utf8') as saved_file:\n print('共有%d部,写入信息中,请稍等' % len(name_lists))\n saved_file.writelines(name_lists)\n \n print('过滤的游戏列表以及其下载链接已保存在%s, 共有%d部' % (files_with_path, num_of_animes))\n","sub_path":"scrapers/nyaa_game_modi.py","file_name":"nyaa_game_modi.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214780973","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# ylgongPw @ 2020-02-15 15:58:39\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def isSymmetric(self, root: TreeNode) -> bool:\n \"\"\"\n 检查左子树和右子树的数目是否相等\n DFS: 自顶向下\n \"\"\"\n if not root:\n return True\n\n def dfs(left,right):\n if (left == None and right == None):\n return True\n if (left == None or right == None):\n return False\n if left.val != right.val:\n return False\n return dfs(left.left,right.right) and dfs(left.right,right.left)\n\n return dfs(root.left,root.right)\n\n\ndef build_tree(root,alist,i):\n if i= 60:\n default1_items[6] += 1\n elif int(data) >= 50:\n default1_items[5] += 1\n elif int(data) >= 40:\n default1_items[4] += 1\n elif int(data) >= 30:\n default1_items[3] += 1\n elif int(data) >= 20:\n default1_items[2] += 1\n elif int(data) >= 10:\n default1_items[1] += 1\n else:\n default1_items[0] += 1\n\n ###############line###########################\n labels_thard = ['요식업','숙박업','레저','쇼핑']\n \n \n default2_items = [0,0,0,0]\n\n \n for li in range(4):\n \n object_money = (ChartStat.objects.filter(Q(store__location=location) & Q(store__category = li+1)).aggregate(Sum('amount')))['amount__sum']\n if object_money != None:\n default2_items[li] = object_money\n \n\n\n data = {\n\n \"all_labels\" : all_labels,\n \"all_default\" : default,\n \n \"labels\": labels,\n \"default\": default_items,\n\n \"labels_second\" : labels_second,\n \"default1\": default1_items,\n\n \"labels_thard\" : labels_thard,\n \"default2\": default2_items,\n \n }\n return Response(data)\n\n#-----------------------------------------------------------------#\n\n## utils\ndef check_length(string, max_len):\n result = \"\"\n if len(string) > max_len:\n result = str(string)[0:max_len] + \"...\"\n else:\n result = str(string)\n return result\n# host = 'http://127.0.0.1:3000/'\nhost = \"http://210.107.78.166:3000/\"\n\n## query\ndef get_notices():\n notice = Notice.objects.order_by('-id')\n notice_list = []\n for li in list(notice)[0:4]:\n temp = {}\n temp['id'] = li.id\n temp['title'] = check_length(str(li.title), 5)\n temp['writer'] = check_length(str(li.writer), 5)\n temp['status'] = li.status\n temp['create_date'] = str(li.create_date)[0:10]\n temp['modify_date'] = str(li.modify_date)[0:10]\n notice_list.append(temp)\n return notice_list\n\ndef get_publish_amount():\n get_publish_url = host + \"get_total_publish\"\n publish_data = {}\n\n publish_amout_url = host + \"get_account\"\n params = {'user_id' : \"admin\"}\n response = requests.get(publish_amout_url, params=params)\n res = response.json()\n publish_amount = (int(res['value']))\n\n try:\n response = requests.get(get_publish_url)\n json_format = json.loads(response.text)\n json_format.reverse()\n data_list = []\n for datas in json_format:\n data = {}\n data['tx_id'] = str(datas['tx_id'])\n data['amount'] = datas['amount']\n data['person'] = datas['trader']\n data['date'] = datas['date']\n data_list.append(data)\n \n publish_data['total_publish'] = publish_amount\n publish_data['publish_list'] = data_list\n except Exception as e:\n print(e)\n publish_data['total_publish'] = 0\n publish_data['publish_list'] = \"\"\n return publish_data\n\n\ndef get_account_cnt():\n users = User.objects.all()\n user_cnt = len(users)\n return user_cnt\n\ndef get_tx_cnt():\n tx_height = 0\n get_block_url = host + \"get_tx_cnt\"\n try:\n response = requests.get(get_block_url)\n json_format = json.loads(response.text)\n tx_height = json_format['block_height']\n except Exception as e:\n print(e)\n return tx_height\n\ndef get_store_cnt():\n store = Store.objects.all()\n store_cnt = len(store)\n return store_cnt\n\ndef get_waiting_store():\n store = Store.objects.filter(status=1).order_by('-modified_date')[0:4]\n store_list = []\n for li in store:\n stores = {}\n stores['name'] = li.name\n stores['modified_date'] = li.modified_date\n stores['corporate_number'] = li.corporate_number\n if str(li.category).strip() == \"쇼핑\":\n stores['category'] = 1\n if str(li.category).strip() == \"레저\":\n stores['category'] = 2\n if str(li.category).strip() == \"숙박업\":\n stores['category'] = 3\n if str(li.category).strip() == \"요식업\":\n stores['category'] = 4\n store_list.append(stores)\n return store_list\n\ndef get_total_location_tx(location):\n return ChartStat.objects.filter(Q(store__location=location)).aggregate(Sum('amount'))['amount__sum']\n\ndef get_default_block():\n get_block_url = host + \"get_default_block\"\n response = requests.get(get_block_url)\n json_format = json.loads(response.text)\n print(\"json_format: \", json_format[0])\n return json_format\n ","sub_path":"RC/rccoin/wallet/operate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9294484","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/runner/work/PyTplot/PyTplot/pytplot/QtPlotter/TVarFigureMap.py\n# Compiled at: 2020-05-12 15:08:28\n# Size of source mod 2**32: 18290 bytes\nimport pyqtgraph as pg, numpy as np, os, pytplot\nfrom pyqtgraph.Qt import QtCore\nimport CustomImage.ColorbarImage as ColorbarImage\nimport CustomAxis.BlankAxis as BlankAxis\nfrom CustomLegend.CustomLegend import CustomLegendItem\nimport CustomAxis.AxisItem as AxisItem\nimport CustomViewBox.NoPaddingPlot as NoPaddingPlot\n\nclass TVarFigureMap(pg.GraphicsLayout):\n\n def __init__(self, tvar_name, show_xaxis=False):\n self.tvar_name = tvar_name\n self.show_xaxis = show_xaxis\n self.crosshair = pytplot.tplot_opt_glob['crosshair']\n pg.GraphicsLayout.__init__(self)\n self.layout.setHorizontalSpacing(50)\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.xaxis = pg.AxisItem(orientation='bottom')\n self.xaxis.setHeight(35)\n self.xaxis.enableAutoSIPrefix(enable=False)\n self.yaxis = AxisItem('left')\n self.yaxis.setWidth(100)\n vb = NoPaddingPlot()\n self.plotwindow = self.addPlot(row=0, col=0, axisItems={'bottom':self.xaxis, 'left':self.yaxis}, viewBox=vb)\n self.plotwindow.vb.setLimits(xMin=0, xMax=360, yMin=(-90), yMax=90)\n self.legendvb = pg.ViewBox(enableMouse=False)\n self.legendvb.setMaximumWidth(100)\n self.legendvb.setXRange(0, 1, padding=0)\n self.legendvb.setYRange(0, 1, padding=0)\n self.addItem(self.legendvb, 0, 1)\n self.curves = []\n self.colors = self._setcolors()\n self.colormap = self._setcolormap()\n if pytplot.tplot_opt_glob['black_background']:\n self.labelStyle = {'font-size':str(pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['char_size']) + 'pt', \n 'color':'#FFF'}\n else:\n self.labelStyle = {'font-size':str(pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['char_size']) + 'pt', 'color':'#000'}\n if show_xaxis:\n self.plotwindow.showAxis('bottom')\n else:\n self.plotwindow.hideAxis('bottom')\n self.label = pg.LabelItem(justify='left')\n self.addItem((self.label), row=1, col=0)\n self.hoverlegend = CustomLegendItem(offset=(0, 0))\n self.hoverlegend.setItem('Date: ', '0')\n self.hoverlegend.setItem('Time: ', '0')\n self.hoverlegend.setItem('Latitude:', '0')\n self.hoverlegend.setItem('Longitude:', '0')\n self.hoverlegend.setVisible(False)\n self.hoverlegend.setParentItem(self.plotwindow.vb)\n\n def buildfigure(self):\n self._setxrange()\n self._setyrange()\n self._setyaxistype()\n self._setzaxistype()\n self._setzrange()\n self._setbackground()\n self._visdata()\n self._setxaxislabel()\n self._setyaxislabel()\n self._addlegend()\n self._addtimebars()\n self._addtimelistener()\n if self.crosshair:\n self._set_crosshairs()\n self._addmouseevents()\n\n def _setxaxislabel(self):\n (self.xaxis.setLabel)(*('Longitude', ), **self.labelStyle)\n\n def _setyaxislabel(self):\n if 'axis_subtitle' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']:\n label = 'Latitude'\n sublabel = pytplot.data_quants[self.tvar_name].attrs['plot_options']['yaxis_opt']['axis_subtitle']\n (self.yaxis.setLabel)(f\"

    {label}
    {sublabel} <\\\\center>\", **self.labelStyle)\n else:\n (self.yaxis.setLabel)(*('Latitude', ), **self.labelStyle)\n\n def getfig(self):\n return self\n\n def _visdata(self):\n datasets = [\n pytplot.data_quants[self.tvar_name]]\n for oplot_name in pytplot.data_quants[self.tvar_name].attrs['plot_options']['overplots']:\n datasets.append(pytplot.data_quants[oplot_name])\n\n cm_index = 0\n for dataset_xr in datasets:\n dataset = pytplot.tplot_utilities.convert_tplotxarray_to_pandas_dataframe(dataset_xr.name)\n coords = pytplot.tplot_utilities.return_interpolated_link_dict(dataset_xr, ['lat', 'lon'])\n t_link = coords['lat'].coords['time'].values\n lat = coords['lat'].values\n t_tvar = dataset.index.values\n data = dataset[0].values\n while t_tvar[(-1)] > t_link[(-1)]:\n t_tvar = np.delete(t_tvar, -1)\n data = np.delete(data, -1)\n\n while t_tvar[0] < t_link[0]:\n t_tvar = np.delete(t_tvar, 0)\n data = np.delete(data, 0)\n\n t_link = coords['lon'].coords['time'].values\n lon = coords['lon'].values\n while t_tvar[(-1)] > t_link[(-1)]:\n t_tvar = np.delete(t_tvar, -1)\n data = np.delete(data, -1)\n\n while t_tvar[0] < t_link[0]:\n t_tvar = np.delete(t_tvar, 0)\n data = np.delete(data, 0)\n\n for column_name in dataset.columns:\n values = data.tolist()\n colors = pytplot.tplot_utilities.get_heatmap_color(color_map=(self.colormap[(cm_index % len(self.colormap))]),\n min_val=(self.zmin),\n max_val=(self.zmax),\n values=values,\n zscale=(self.zscale))\n brushes = []\n for color in colors:\n brushes.append(pg.mkBrush(color))\n\n self.curves.append(self.plotwindow.scatterPlot((lon.tolist()), (lat.tolist()), pen=(pg.mkPen(None)),\n brush=brushes,\n size=4))\n cm_index += 1\n\n def _setyaxistype(self):\n if self._getyaxistype() == 'log':\n self.plotwindow.setLogMode(y=True)\n else:\n self.plotwindow.setLogMode(y=False)\n\n def _addlegend(self):\n zaxis = AxisItem('right')\n if 'axis_subtitle' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']:\n label = pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']['axis_label']\n sublabel = pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']['axis_subtitle']\n (zaxis.setLabel)(f\"
    {label}
    {sublabel} <\\\\center>\", **self.labelStyle)\n else:\n (zaxis.setLabel)((pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']['axis_label']), **self.labelStyle)\n if self.show_xaxis:\n emptyaxis = BlankAxis('bottom')\n emptyaxis.setHeight(35)\n p2 = self.addPlot(row=0, col=1, axisItems={'right':zaxis, 'bottom':emptyaxis}, enableMenu=False, viewBox=(self.legendvb))\n else:\n p2 = self.addPlot(row=0, col=1, axisItems={'right': zaxis}, enableMenu=False, viewBox=(self.legendvb))\n p2.hideAxis('bottom')\n p2.buttonsHidden = True\n p2.setMaximumWidth(100)\n p2.showAxis('right')\n p2.hideAxis('left')\n colorbar = ColorbarImage()\n colorbar.setImage(np.array([np.linspace(1, 2, 200)]).T)\n p2.addItem(colorbar)\n p2.setLogMode(y=(self.zscale == 'log'))\n p2.setXRange(0, 1, padding=0)\n if self.zscale == 'log':\n colorbar.setRect(QtCore.QRectF(0, np.log10(self.zmin), 1, np.log10(self.zmax) - np.log10(self.zmin)))\n p2.setYRange((np.log10(self.zmin)), (np.log10(self.zmax)), padding=0)\n p2.setYRange((np.log10(self.zmin)), (np.log10(self.zmax)), padding=0)\n else:\n colorbar.setRect(QtCore.QRectF(0, self.zmin, 1, self.zmax - self.zmin))\n p2.setYRange((self.zmin), (self.zmax), padding=0)\n colorbar.setLookupTable(self.colormap[0])\n\n def _addmouseevents(self):\n if self.plotwindow.scene() is not None:\n self.plotwindow.scene().sigMouseMoved.connect(self._mousemoved)\n\n def _mousemoved(self, evt):\n pos = evt\n if self.plotwindow.sceneBoundingRect().contains(pos):\n mousepoint = self.plotwindow.vb.mapSceneToView(pos)\n index_x = round(float(mousepoint.x()), 2)\n index_y = round(float(mousepoint.y()), 2)\n time = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lat']].coords['time'].values\n latitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lat']].values\n longitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lon']].values\n radius = np.sqrt((latitude - index_y) ** 2 + (longitude - index_x) ** 2).argmin()\n time_point = time[radius]\n date = pytplot.tplot_utilities.int_to_str(time_point)[0:10]\n time = pytplot.tplot_utilities.int_to_str(time_point)[11:19]\n pytplot.hover_time.change_hover_time(time_point, name=(self.tvar_name))\n self.vLine.setVisible(True)\n self.hLine.setVisible(True)\n self.vLine.setPos(mousepoint.x())\n self.hLine.setPos(mousepoint.y())\n self.hoverlegend.setVisible(True)\n self.hoverlegend.setItem('Date: ', date)\n self.hoverlegend.setItem('Time: ', time)\n self.hoverlegend.setItem('Longitude:', str(index_x))\n self.hoverlegend.setItem('Latitude:', str(index_y))\n else:\n self.hoverlegend.setVisible(False)\n self.vLine.setVisible(False)\n self.hLine.setVisible(False)\n\n def _getyaxistype(self):\n return 'linear'\n\n def _setzaxistype(self):\n if self._getzaxistype() == 'log':\n self.zscale = 'log'\n else:\n self.zscale = 'linear'\n\n def _getzaxistype(self):\n if 'z_axis_type' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']:\n return pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']['z_axis_type']\n return 'linear'\n\n def _setcolors(self):\n if 'line_color' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']:\n return pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['line_color']\n return pytplot.tplot_utilities.rgb_color(['k', 'r', 'seagreen', 'b', 'darkturquoise', 'm', 'goldenrod'])\n\n def _setcolormap(self):\n colors = []\n if 'colormap' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']:\n for cm in pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['colormap']:\n colors.append(pytplot.tplot_utilities.return_lut(cm))\n\n return colors\n return [pytplot.tplot_utilities.return_lut('inferno')]\n\n @staticmethod\n def getaxistype():\n axis_type = 'lat'\n link_y_axis = True\n return (axis_type, link_y_axis)\n\n def _setxrange(self):\n if 'map_x_range' in pytplot.tplot_opt_glob:\n self.plotwindow.setXRange(pytplot.tplot_opt_glob['map_x_range'][0], pytplot.tplot_opt_glob['map_x_range'][1])\n else:\n self.plotwindow.setXRange(0, 360)\n\n def _setyrange(self):\n if 'map_y_range' in pytplot.tplot_opt_glob:\n self.plotwindow.setYRange(pytplot.tplot_opt_glob['map_y_range'][0], pytplot.tplot_opt_glob['map_y_range'][1])\n else:\n self.plotwindow.vb.setYRange(-90, 90)\n\n def _setzrange(self):\n if 'z_range' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']:\n self.zmin = pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']['z_range'][0]\n self.zmax = pytplot.data_quants[self.tvar_name].attrs['plot_options']['zaxis_opt']['z_range'][1]\n else:\n dataset_temp = pytplot.data_quants[self.tvar_name].where(pytplot.data_quants[self.tvar_name] != np.inf)\n dataset_temp = dataset_temp.where(dataset_temp != -np.inf)\n if self.zscale == 'log':\n dataset_temp = dataset_temp.where(dataset_temp > 0)\n self.zmax = dataset_temp.max().max().values\n self.zmin = dataset_temp.min().min().values\n\n def _addtimebars(self):\n tbardict = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar']\n ltbar = len(tbardict)\n datasets = [\n pytplot.data_quants[self.tvar_name]]\n for oplot_name in pytplot.data_quants[self.tvar_name].attrs['plot_options']['overplots']:\n datasets.append(pytplot.data_quants[oplot_name])\n\n for dataset in datasets:\n dataset = pytplot.tplot_utilities.convert_tplotxarray_to_pandas_dataframe(dataset.name)\n for i in range(ltbar):\n test_time = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar'][i]['location']\n color = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar'][i]['line_color']\n pointsize = pytplot.data_quants[self.tvar_name].attrs['plot_options']['time_bar'][i]['line_width']\n time = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lat']].coords['time']\n latitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lat']].values\n longitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lon']].values\n nearest_time_index = np.abs(time - test_time).argmin()\n lat_point = latitude[nearest_time_index]\n lon_point = longitude[nearest_time_index]\n self.plotwindow.scatterPlot([lon_point], [lat_point], size=pointsize, pen=(pg.mkPen(None)), brush=color)\n\n def _setbackground(self):\n if 'alpha' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']:\n alpha = pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['alpha']\n else:\n alpha = 1\n if 'basemap' in pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']:\n if os.path.isfile(pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['basemap']):\n from matplotlib.pyplot import imread\n img = imread((pytplot.data_quants[self.tvar_name].attrs['plot_options']['extras']['basemap']), format='RGBA')\n img = img[::-1]\n bm = ColorbarImage(image=img, opacity=alpha)\n bm.setRect(QtCore.QRect(0, -90, 360, 180))\n self.plotwindow.addItem(bm)\n\n def _set_crosshairs(self):\n self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=(pg.mkPen('k')))\n self.hLine = pg.InfiniteLine(angle=0, movable=False, pen=(pg.mkPen('k')))\n self.plotwindow.addItem((self.vLine), ignoreBounds=True)\n self.plotwindow.addItem((self.hLine), ignoreBounds=True)\n self.vLine.setVisible(False)\n self.hLine.setVisible(False)\n\n def _addtimelistener(self):\n self.spacecraft_position = self.plotwindow.scatterPlot([], [], size=14, pen=(pg.mkPen(None)), brush='b')\n pytplot.hover_time.register_listener(self._time_mover)\n\n def _time_mover(self, time, name):\n if name != self.tvar_name:\n hover_time = time\n time = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lat']].coords['time']\n latitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lat']].values\n longitude = pytplot.data_quants[pytplot.data_quants[self.tvar_name].attrs['plot_options']['links']['lon']].values\n nearest_time_index = np.abs(time - hover_time).argmin()\n lat_point = latitude[nearest_time_index]\n lon_point = longitude[nearest_time_index]\n self.spacecraft_position.setData([lon_point], [lat_point])","sub_path":"pycfiles/pytplot-1.6.8.tar/TVarFigureMap.cpython-37.py","file_name":"TVarFigureMap.cpython-37.py","file_ext":"py","file_size_in_byte":16057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643477221","text":"#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 09:57, 17/03/2020 %\n# %\n# Email: nguyenthieu2102@gmail.com %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom random import random, randint\nfrom numpy import maximum, minimum\nfrom copy import deepcopy\nfrom mealpy.root import Root\n\n\nclass BaseABC(Root):\n \"\"\"\n Taken from book: Clever Algorithms\n - Improved: _create_neigh_bee__\n \"\"\"\n ID_POS = 0\n ID_FIT = 1\n\n def __init__(self, root_paras=None, epoch=750, pop_size=100, couple_bees=(16, 4), patch_variables=(5.0, 0.985), sites=(3, 1)):\n Root.__init__(self, root_paras)\n self.epoch = epoch\n self.pop_size = pop_size\n self.e_bees = couple_bees[0] # number of bees which provided for good location and other location\n self.o_bees = couple_bees[1]\n self.patch_size = patch_variables[0] # patch_variables = patch_variables * patch_factor (0.985)\n self.patch_factor = patch_variables[1]\n self.num_sites = sites[0] # 3 bees (employed bees, onlookers and scouts), 1 good partition\n self.elite_sites = sites[1]\n\n def _create_neigh_bee__(self, individual=None, patch_size=None):\n t1 = randint(0, len(individual) - 1)\n new_bee = deepcopy(individual)\n new_bee[t1] = (individual[t1] + random() * patch_size) if random() < 0.5 else (individual[t1] - random() * patch_size)\n new_bee[t1] = maximum(self.domain_range[0], minimum(self.domain_range[1], new_bee[t1]))\n return [new_bee, self._fitness_model__(new_bee)]\n\n\n def _search_neigh__(self, parent=None, neigh_size=None): # parent: [ vector_individual, fitness ]\n \"\"\"\n Search 1 best solution in neigh_size solution\n \"\"\"\n neigh = [self._create_neigh_bee__(parent[self.ID_POS], self.patch_size) for _ in range(0, neigh_size)]\n return self._get_global_best__(neigh, self.ID_FIT, self.ID_MIN_PROB)\n\n def _create_scout_bees__(self, num_scouts=None):\n return [self._create_solution__() for _ in range(0, num_scouts)]\n\n def _train__(self):\n pop = [self._create_solution__() for _ in range(0, self.pop_size)]\n pop, g_best = self._sort_pop_and_get_global_best__(pop, self.ID_FIT, self.ID_MIN_PROB)\n\n for epoch in range(0, self.epoch):\n next_gen = []\n for i in range(0, self.num_sites):\n if i < self.elite_sites:\n neigh_size = self.e_bees\n else:\n neigh_size = self.o_bees\n next_gen.append(self._search_neigh__(pop[i], neigh_size))\n\n scouts = self._create_scout_bees__(self.pop_size - self.num_sites)\n pop = next_gen + scouts\n\n ## sort pop and update global best\n g_best, pop = self._sort_pop_and_update_global_best__(pop, self.ID_MIN_PROB, g_best)\n self.patch_size = self.patch_size * self.patch_factor\n self.loss_train.append(g_best[self.ID_FIT])\n if self.print_train:\n print(\"> Epoch: {}, patch_size: {}, Best fit: {}\".format(epoch + 1, self.patch_size, g_best[self.ID_FIT]))\n\n return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train","sub_path":"mealpy/swarm_based/ABC.py","file_name":"ABC.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504223755","text":"\"\"\"\nUpdated on FRI DEC 29 13:56:39 2018\n\n@author: Zeyuan Feng\n\n@contributor: fahah & Lianxin Zhang\n\n*Get the command from sailboat object.\n*Execute the command\n*Update the global values\n\"\"\"\n\nimport time\nimport globalvar as gl\nfrom sailboat_v3 import sailboat\nimport math\nimport serial \nimport re\n\n\n\ndef send(ser,rudder,sail,heading_angle):\n rudder_output=75-rudder*69\n # print(rudder_output)\n sail_output=int(sail*50+20)\n # if math.sin(heading_angle-math.pi/2)>0:\n # sail_output=35+(sail_output-35)*9/8\n command=rudder_output//1*100+sail_output\n # print(sail_output,command)\n command=(',,'+str(command)+',').encode(encoding='utf-8')\n # ser.write(command)\n\n#-------------Receiving Commands-----------------\ndef run(ser):\n # print('!')\n #-----------ESC Configuration----------------------\n \n # ser=gl.get_value('ser')\n # gl.set_value('x',2)\n # gl.set_value('y',2)\n gl.set_value('flag',False)\n rudder=0\n sail=0\n\n \n \n my_boat=sailboat(runtimes=3000,target=[3.2,5.5],area=[1.4,2.6],position=[0.5,1.5,0,0])\n target=my_boat.target\n gl.set_value('target',target)\n times=0\n last_rudder_value=0\n last_sail_value=0\n while True:\n times=(times+1)%10\n # get_message(ser)\n if my_boat.flag==True:\n gl.set_value('flag',True)\n print('Program stops!')\n break\n\n ## change the frequency of communication when the sailboat arrives at its target area\n # if my_boat.if_keeping==True:\n # gl.set_value('frequency',20)\n # else:\n # gl.set_value('frequency',10)\n\n # frequency=gl.get_value('frequency')\n frequency=10\n ##get information of sailboat\n x=gl.get_value('ob_x')\n y=gl.get_value('ob_y')\n heading_angle=gl.get_value('heading_angle')\n roll=gl.get_value('roll')\n my_boat.frequency=frequency\n # print(gl.get_value('true_wind'))\n rudder,sail,desired_angle,point_list=my_boat.update_state(gl.get_value('true_wind'),[x,y,roll,heading_angle])\n # print('sail',sail)\n \n if gl.get_value('keyboard_flag'):\n rudder=gl.get_value('rudder')\n sail=gl.get_value('sail')\n v=my_boat.velocity[0]\n u=my_boat.velocity[1]\n p=my_boat.velocity[2]\n w=my_boat.velocity[3]\n # tacking_angle=my_boat.tacking_angle\n keeping_state=my_boat.keeping_state\n \n ##control the rudder and sail\n \n rudder= float('{0:.2f}'.format(rudder))\n sail= float('{0:.2f}'.format(sail))\n send(ser,rudder,sail,heading_angle)\n\n # print(rudder)\n last_rudder_value=rudder\n last_sail_value=sail\n\n #change the global variables\n gl.set_value('tacking_angle',my_boat.tacking_angle)\n # gl.set_value('v',v)\n # gl.set_value('u',u)\n # gl.set_value('p',p)\n # gl.set_value('w',w)\n gl.set_value('target_v',my_boat.target_v)\n # print(my_boat.target_v)\n # print(rudder,sail)\n # print(u,v,w)\n if gl.get_value('keyboard_flag')==False:\n gl.set_value('rudder',rudder) \n gl.set_value('sail',sail) \n # print(rudder,sail,'2')\n gl.set_value('desired_angle',desired_angle)\n gl.set_value('keeping_state',keeping_state)\n gl.set_value('point_list',point_list)\n \n time.sleep(1/frequency)\n \n # End the program \n \n # send(ser,0,0,heading_angle)\n print('Motors Stopped \\n')\n time.sleep(0.1)\n \n \n\ndef sign(x):\n if x>0:\n return 1\n elif x==0:\n return 0\n else:\n return -1\n\ndef get_message(ser):\n mess=0\n \n mess=ser.readline()\n mess=bytes.decode(mess)\n mess=str(mess)\n # print(mess)\n if mess!=0:\n \n mess=mess.split(',')\n # print(mess)\n a=mess[0]\n a=re.sub('\\D','',a)\n voltage=mess[1]\n current=mess[2]\n\n \n try: \n b=int(a)\n except:\n b=b\n \n heading_angle=b/57.32\n if heading_angle>math.pi:\n heading_angle-=math.pi*2\n \n gl.set_value('heading_angle',heading_angle)\n gl.set_value('current',current)\n gl.set_value('voltage',voltage)\n\n frequency=gl.get_value('frequency')\n ser.flushInput()\n# conn.close()\n# time.sleep(1)\n# print('Connection closed!')\n\n#------------------------------------------------\n","sub_path":"controller_4_DoF.py","file_name":"controller_4_DoF.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49322078","text":"t=int(input())\r\nwhile t>0:\r\n n=int(input())\r\n ar=list(map(int,input().split()))\r\n start=0\r\n end=n-1\r\n count=0\r\n while startar[end]:\r\n ar[end-1]=ar[end]+ar[end-1]\r\n end=end-1\r\n count=count+1\r\n else:\r\n ar[start+1]=ar[start]+ar[start+1]\r\n start=start+1\r\n count=count+1\r\n print(count)\r\n t-=1\r\n","sub_path":"ZZZ/Palindromic_Array.py","file_name":"Palindromic_Array.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230048123","text":"#-*- coding: utf-8 -*-\nimport datetime\nimport requests\nimport logging\nimport base64\nfrom odoo import models, fields, api, exceptions, tools, _\n\nfrom itertools import chain\n\nfrom odoo.exceptions import UserError, ValidationError\nfrom odoo.tools import float_repr\n\nfrom odoo.addons import decimal_precision as dp\n\nfrom odoo.tools import pycompat\n\nclass product_pricelist(models.Model):\n _inherit = 'product.pricelist';\n\n unisyn_priceClassID = fields.Integer(\"UniSyn Price Class ID\", help=\"The ID used by UniSyn to store this price list as a UniSyn price class.\", store=True)\n \n # doing this to change default from formula to percentage\n def _get_default_item_ids(self):\n ProductPricelistItem = self.env['product.pricelist.item']\n vals = ProductPricelistItem.default_get(list(ProductPricelistItem._fields))\n vals.update(compute_price='percentage')\n return [[0, False, vals]]\n \n item_ids = fields.One2many(default=_get_default_item_ids)\n \n @api.model\n def create(self, values):\n # Grab the UniSynAPIClient model which contains our integration library\n# UniSynAPIClient = self.env['unisyn.apiclient']\n# UniSynAPIClientValidSettings = UniSynAPIClient.validSettings()\n # Do the actual create method like it normally would\n recordDetails = super(product_pricelist, self).create(values)\n\n# if UniSynAPIClientValidSettings:\n# # Format the record\n# unisynData = self.formatPricelist(recordDetails, values)\n\n# response = UniSynAPIClient.doCall('POST', 'OdooReceiver/addPricelist', {'pricelistDetails': unisynData})\n\n# error = None\n# unisynPriceClassID = 0\n# try:\n# unisynPriceClassID = response['response']['id']\n# except:\n# try:\n# error = response['error']\n# except:\n# error = 'UniSyn add request failed'\n\n# if not error:\n# try:\n# unisynValues = {\n# 'unisyn_priceClassID': unisynPriceClassID,\n# }\n# super(product_pricelist, recordDetails).write(unisynValues)\n# except Exception as e:\n# UniSynAPIClient.doCall('DELETE', 'OdooReceiver/deletePricelist/odooID/' + str(recordDetails.id)) # failed to update Odoo so remove the record from our system\n# error = e\n\n# if error:\n# raise exceptions.UserError(error)\n\n return recordDetails\n \n \n @api.multi\n def write(self, values):\n # Grab the UniSynAPIClient model which contains our integration library\n# UniSynAPIClient = self.env['unisyn.apiclient']\n# UniSynAPIClientValidSettings = UniSynAPIClient.validSettings()\n for record in self:\n # Do the actual write method like it normally would\n super(product_pricelist, record).write(values)\n \n changingName = False\n deleting = False\n try:\n if values['name']:\n changingName = True\n except:\n # not modifying active status\n pass\n try:\n if values['active'] == False:\n deleting = True\n except:\n # not modifying active status\n pass\n if record.id == 1 and (changingName or deleting):\n raise exceptions.UserError('You should not rename or deactive the public price list. You should make a new pricelist and assign your customer to that pricelist instead.')\n \n# if UniSynAPIClientValidSettings:\n# deleteProductPricelist = False\n# try:\n# if values['active'] == False:\n# deleteProductPricelist = True\n# except:\n# # not modifying active status\n# pass\n\n# if deleteProductPricelist:\n# # pricelist deactivated so call our API delete method on this pricelist to deactivate it in our system\n# response = UniSynAPIClient.doCall('DELETE', 'OdooReceiver/deletePricelist/odooID/' + str(record.id))\n# failedDelete = True\n# try:\n# if response['response'] == True:\n# failedDelete = False\n# except:\n# pass\n\n# if failedDelete:\n# try:\n# error = response['error']\n# except:\n# error = 'UniSyn update request failed'\n# raise exceptions.UserError(error)\n# return True # we call it done here because we no longer sync this information\n\n# switchToAdd = False\n# try:\n# if values['active'] == True:\n# # price list was inactive and is now active so call our API add method instead of update\n# switchToAdd = True\n# except:\n# # not modifying active status\n# pass\n\n# recordDetails = record.browse(record.id)\n\n# if switchToAdd:\n# self.unisynAddPricelist(UniSynAPIClient, record, recordDetails)\n# else:\n# # Format the record\n# unisynData = self.formatPricelist(recordDetails, values)\n\n# response = UniSynAPIClient.doCall('POST', 'OdooReceiver/updatePricelist', {'pricelistDetails': unisynData})\n# try:\n# unisynPriceClassID = response['response']['id']\n# unisynValues = {\n# 'unisyn_priceClassID': unisynPriceClassID,\n# }\n# super(product_pricelist, record).write(unisynValues)\n# except:\n# try:\n# error = response['error']\n# except:\n# error = 'UniSyn update request failed'\n\n# if error == 'Pricelist not added from Odoo yet':\n# # We need to get all the pricelist details and call the add method instead\n# self.unisynAddPricelist(UniSynAPIClient, record, recordDetails)\n# else:\n# raise exceptions.UserError(error)\n \n return True\n \n \n @api.multi\n def unlink(self):\n # Grab the UniSynAPIClient model which contains our integration library\n# UniSynAPIClient = self.env['unisyn.apiclient']\n# UniSynAPIClientValidSettings = UniSynAPIClient.validSettings()\n for record in self:\n if record.id == 1:\n raise exceptions.UserError('You cannot delete the Public Pricelist')\n \n super(product_pricelist, record).unlink()\n \n# if UniSynAPIClientValidSettings:\n# response = UniSynAPIClient.doCall('DELETE', 'OdooReceiver/deletePricelist/odooID/' + str(record.id))\n\n# failedDelete = True\n# try:\n# if response['response'] == True:\n# failedDelete = False\n# except:\n# pass\n\n# if failedDelete:\n# try:\n# error = response['error']\n# except:\n# error = 'UniSyn update request failed'\n# raise exceptions.UserError(error)\n\n return True\n \n \n @api.multi\n def unisynAddPricelist(self, UniSynAPIClient, record, recordDetails):\n # Format the record\n unisynData = self.formatPricelist(recordDetails, record.read()[0]) # pass the full record details as values param so that our system gets all pricelist info to add\n \n response = UniSynAPIClient.doCall('POST', 'OdooReceiver/addPricelist', {'pricelistDetails': unisynData})\n try:\n unisynPriceClassID = response['response']['id']\n unisynValues = {\n 'unisyn_priceClassID': unisynPriceClassID,\n }\n super(product_pricelist, record).write(unisynValues)\n except:\n try:\n error = response['error']\n except:\n error = 'UniSyn update request failed'\n raise exceptions.UserError(error)\n \n \n @api.multi\n def formatPricelist(self, recordDetails, values):\n propertiesChanged = values.keys()\n# for key, value in values.items():\n# logging.warning({key: value})\n \n # initialize properties we always send for matching purposes\n unisynData = {\n \"odooID\": recordDetails.id,\n \"unisynPriceClassID\": recordDetails.unisyn_priceClassID,\n }\n \n if 'name' in propertiesChanged:\n unisynData['name'] = recordDetails.name\n \n if 'item_ids' in propertiesChanged:\n unisynData['pricelistItems'] = []\n for priceListItem in recordDetails.item_ids:\n thisPriceListItem = {\n \"min_quantity\": priceListItem.min_quantity,\n \"compute_price\": priceListItem.compute_price,\n \"fixed_price\": priceListItem.fixed_price,\n \"percent_price\": priceListItem.percent_price,\n \"base\": priceListItem.base,\n \"price_discount\": priceListItem.price_discount,\n \"price_surcharge\": priceListItem.price_surcharge,\n }\n \n if priceListItem.applied_on == '3_global':\n thisPriceListItem['applied_on'] = 'global'\n if priceListItem.applied_on == '1_product':\n thisPriceListItem['applied_on'] = 'product'\n if priceListItem.applied_on == '0_product_variant':\n thisPriceListItem['applied_on'] = 'product variant'\n \n if thisPriceListItem['applied_on'] == 'product' and priceListItem.product_tmpl_id:\n thisPriceListItem['productOdooID'] = priceListItem.product_tmpl_id.id\n if thisPriceListItem['applied_on'] == 'product variant' and priceListItem.product_id:\n thisPriceListItem['productOdooID'] = priceListItem.product_id.id\n \n unisynData['pricelistItems'].append(thisPriceListItem)\n \n return unisynData\n \n \nclass product_pricelist_item(models.Model):\n _inherit = 'product.pricelist.item';\n \n applied_on = fields.Selection(selection=[('3_global', 'Global'), ('1_product', 'Product'), ('0_product_variant', 'Product Variant')])\n# compute_price = fields.Selection(selection=[('fixed', 'Fix Price'), ('percentage', 'Percentage (discount)')], default='percentage')\n \n base = fields.Selection([\n ('list_price', 'List Price'),\n ('standard_price', 'Cost')], \"Based on\",\n default='list_price', required=True,\n help='Base price for computation.\\n'\n 'List Price: The base price will be the Sale/public Price.\\n'\n 'Cost Price : The base price will be the cost price.')\n\n unisyn_costMarkup = fields.Float('Cost Markup', default=0, digits=(16, 2))\n\n @api.onchange('unisyn_costMarkup')\n def _update_price_discount(self):\n if self.base == 'standard_price':\n unisyn_costMarkup = self.unisyn_costMarkup or 0\n self.price_discount = unisyn_costMarkup * -1\n \n \n @api.one\n @api.depends('categ_id', 'product_tmpl_id', 'product_id', 'compute_price', 'fixed_price', \\\n 'pricelist_id', 'percent_price', 'price_discount', 'price_surcharge')\n def _get_pricelist_item_name_price(self):\n if self.categ_id:\n self.name = _(\"Category: %s\") % (self.categ_id.name)\n elif self.product_tmpl_id:\n self.name = self.product_tmpl_id.name\n elif self.product_id:\n self.name = self.product_id.display_name.replace('[%s]' % self.product_id.code, '')\n else:\n self.name = _(\"All Products\")\n\n if self.compute_price == 'fixed':\n self.price = (\"%s %s\") % (\n float_repr(\n self.fixed_price,\n self.pricelist_id.currency_id.decimal_places,\n ),\n self.pricelist_id.currency_id.name\n )\n elif self.compute_price == 'percentage':\n self.price = _(\"%s %% discount\") % (self.percent_price)\n else:\n if self.base == 'standard_price':\n self.price = _(\"%s %% cost markup and %s surcharge\") % (self.unisyn_costMarkup, self.price_surcharge)\n else:\n self.price = _(\"%s %% discount and %s surcharge\") % (self.price_discount, self.price_surcharge)","sub_path":"unisyn_integration_app/models/.ipynb_checkpoints/unisyn_pricelist-checkpoint.py","file_name":"unisyn_pricelist-checkpoint.py","file_ext":"py","file_size_in_byte":13223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343479380","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:light\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.4'\n# jupytext_version: 1.2.4\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\nimport numpy as np\nimport holoviews as hv\n\nhv.extension('bokeh')\n\nfrom holoviews import streams, opts, dim\n\n# ## 2D plots with interactive slicing\n\n# ls = np.linspace(0, 10, 200)\nxx, yy = np.meshgrid(ls, ls)\nbounds=(0,0,10,10) # Coordinate system: (left, bottom, right, top)\n\nenergy = hv.Dimension('energy', label='E', unit='MeV')\ndistance = hv.Dimension('distance', label='d', unit='m')\ncharge = hv.Dimension('charge', label='Q', unit='pC')\n\n# +\nimage = hv.Image(np.sin(xx)*np.cos(yy), bounds=bounds, kdims=[energy, distance], vdims=charge)\npointer = streams.PointerXY(x=5,y=5, source=image)\n\n\ndmap = hv.DynamicMap(lambda x, y: hv.VLine(x) * hv.HLine(y), streams=[pointer])\nx_sample = hv.DynamicMap(lambda x, y: image.sample(energy=x).opts(color='darkred'), streams=[pointer])\ny_sample = hv.DynamicMap(lambda x, y: image.sample(distance=y).opts(color='lightsalmon'), streams=[pointer])\n\npointer_dmap = hv.DynamicMap(lambda x, y: hv.Points([(x, y)]), streams=[pointer])\npointer_x_sample = hv.DynamicMap(lambda x, y: hv.Points([(y, image[x,y])]), streams=[pointer])\npointer_y_sample = hv.DynamicMap(lambda x, y: hv.Points([(x, image[x,y])]), streams=[pointer])\n\nlayout = (image * dmap * pointer_dmap) + ((x_sample * pointer_x_sample) + (y_sample * pointer_y_sample))\n\nlayout.opts(\n opts.Image(cmap='Viridis', aspect='square', frame_width=300, colorbar=True, tools=['hover']),\n opts.Curve(framewise=False, ylim=(-1, 1)),\n opts.VLine(color='darkred'),\n opts.HLine(color='lightsalmon'),\n opts.Points(color='red', marker='o', size=10)\n).cols(3)\n# -\n\n# ## Interactive integration\n\nxs = np.linspace(-3, 3, 400)\n\n\ndef function(xs, time):\n \"Some time varying function\"\n return np.exp(np.sin(xs+np.pi/time))\n\n\ndef integral(limit_a, limit_b, y, time):\n limit_a = -3 if limit_a is None else np.clip(limit_a,-3,3)\n limit_b = 3 if limit_b is None else np.clip(limit_b,-3,3)\n curve = hv.Curve((xs, function(xs, time)))\n area = hv.Area ((xs, function(xs, time)))[limit_a:limit_b]\n summed = area.dimension_values('y').sum() * 0.015 # Numeric approximation\n return (area * curve * hv.VLine(limit_a) * hv.VLine(limit_b) * hv.Text(limit_b - 0.8, 2.0, '%.2f' % summed))\n\n\nintegral_streams = [\n streams.Stream.define('Time', time=1.0)(),\n streams.PointerX().rename(x='limit_b'),\n streams.Tap().rename(x='limit_a')\n]\n\nintegral_dmap = hv.DynamicMap(integral, streams=integral_streams)\n\nintegral_dmap.opts(\n opts.Area(color='#fff8dc', line_width=2),\n opts.Curve(color='black'),\n opts.VLine(color='red'))\n\nimage.dimensions()\n\ndim('charge').min().apply(image)\n\ndim('charge').max().apply(image)\n\n\n","sub_path":"jupyter/holoviews_examples.py","file_name":"holoviews_examples.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327531642","text":"from django.shortcuts import render\nfrom django.utils import timezone\nfrom .models import Habitacion\nfrom django.shortcuts import render, get_object_or_404\nfrom .forms import HabForm\nfrom django.shortcuts import redirect\nfrom .models import Post\nfrom .forms import PostForm\nfrom django.contrib.auth.decorators import login_required\n\n\ndef hab_list(request):\n\thabs = Habitacion.objects.all()\n\treturn render(request, 'blog/hab_list.html', {'habs': habs})\n\n\ndef hab_detail (request, pk):\n\tpkh=get_object_or_404(Habitacion, pk=pk)\n\treturn render(request, 'blog/hab_detail.html',{'pkh':pkh})\n\ndef hab_new(request):\n\tif request.method == \"POST\":\n\t\tform = HabForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tpkh = form.save(commit=False)\n\t\t\tpkh.save()\n\t\t\treturn redirect('hab_detail', pk=pkh.pk)\n\telse:\n\t\tform = HabForm()\n\treturn render(request, 'blog/hab_edit.html', {'form': form})\n\ndef post_list(request):\n\tposts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')\n\treturn render(request, 'blog/post_list.html', {'posts':posts})\n\ndef post_detail (request, pk):\n\tpkh=get_object_or_404(Post, pk=pk)\n\treturn render(request, 'blog/post_detail.html',{'pkh':pkh})\n\n@login_required\ndef post_new(request):\n if request.method == \"POST\":\n form = PostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm()\n return render(request, 'blog/post_edit.html', {'form': form})\n\n@login_required\ndef post_edit(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = request.user\n post.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, 'blog/post_edit.html', {'form': form})\n@login_required\ndef post_draft_list(request):\n posts = Post.objects.filter(published_date__isnull=True).order_by('Create_date')\n return render(request, 'blog/post_draft_list.html', {'posts': posts})\n\n@login_required\ndef post_publish(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.publish()\n return redirect('post_detail', pk=pk)\n\n@login_required\ndef post_remove(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.delete()\n return redirect('post_list')\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"603198283","text":"import argparse\nimport http.client as httplib\nimport os\nimport sys\nfrom dataclasses import dataclass\nfrom datetime import datetime, timedelta\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom urllib.parse import urlparse, urlunparse\n\nimport requests\nfrom click import echo\nfrom tabulate import tabulate\n\n\nBASE_ENV_VAR_NAME = 'JEN_COMPARE_DEFAULT_BASE'\nDEFAULT_TIMEOUT = 5 # Seconds\n\n\n@dataclass\nclass BuildInfo:\n\n number: Optional[int]\n building: Optional[bool]\n timestamp: Optional[datetime]\n duration: Optional[int]\n revision: Optional[str]\n branch_name: Optional[str]\n\n @property\n def number_str(self) -> Optional[str]:\n if self.number is None:\n return None\n number_str = str(self.number)\n if self.building:\n number_str = '* ' + number_str\n return number_str\n\n @property\n def duration_str(self) -> Optional[str]:\n estimated_remaining = self.estimated_time_remaining\n if estimated_remaining:\n estimated_remaining_str = self._secs_to_mins_secs_str(estimated_remaining)\n prefix = '-' if estimated_remaining >= 0 else '+'\n return prefix + estimated_remaining_str\n if self.duration is None:\n return None\n seconds = self.duration // 1000\n duration_str = self._secs_to_mins_secs_str(seconds)\n if self.building:\n # Building but can't estimate time remaining for some\n # reason; show estimated duration.\n duration_str = '?' + duration_str\n return duration_str\n\n @property\n def estimated_end_time(self) -> Optional[datetime]:\n if not self.building:\n return None\n if self.duration is None or self.timestamp is None:\n return None\n seconds: int = self.duration // 1000\n return self.timestamp + timedelta(seconds=seconds)\n\n @property\n def estimated_time_remaining(self) -> Optional[int]:\n end_time = self.estimated_end_time\n if end_time is None:\n return None\n return int((end_time - datetime.utcnow()).total_seconds())\n\n @staticmethod\n def _secs_to_mins_secs_str(secs: int) -> str:\n mins, secs = divmod(abs(secs), 60)\n return '{}m{:02}s'.format(mins, secs)\n\n @property\n def timestamp_str(self) -> Optional[str]:\n if self.timestamp is None:\n return None\n timestamp_str = self.timestamp.strftime('%Y-%m-%d %H:%M:%S')\n return timestamp_str\n\n @property\n def tabulation_line(self) -> Tuple[Optional[str], ...]:\n return (\n self.number_str,\n self.timestamp_str,\n self.duration_str,\n self.revision[:8] if self.revision else None,\n self.branch_name,\n )\n\n\ndef main():\n args = parse_args()\n try:\n get_and_report_build_history(args.base, args.job, args.timeout)\n except KeyboardInterrupt:\n print()\n\n\ndef get_and_report_build_history(base: str, job: str, timeout: int):\n build_history = get_build_history(base, job, timeout)\n if not build_history:\n return\n report_build_history(build_history)\n\n\ndef get_build_history(base: str, job: str, timeout: int) -> List[BuildInfo]:\n api_url = get_job_api_url(base, job)\n result = get_url(api_url, timeout)\n if not result:\n return []\n builds = [parse_build(build) for build in result['builds']]\n return builds\n\n\ndef get_job_api_url(base: str, job: str) -> str:\n tree = (\n 'builds[number,building,timestamp,duration,estimatedDuration,'\n 'actions[lastBuiltRevision[SHA1,branch[name]]]]'\n )\n url = '{}/job/{}/api/json?depth=1&pretty=true&tree={}'.format(base, job, tree)\n return url\n\n\ndef parse_build(build: Dict[str, Any]) -> BuildInfo:\n number: Optional[int] = build.get('number')\n building: Optional[bool] = build.get('building')\n timestamp: Optional[datetime]\n try:\n raw_timestamp: int = build['timestamp']\n except KeyError:\n timestamp = None\n else:\n timestamp = parse_build_timestamp(raw_timestamp)\n duration: Optional[int]\n duration = build.get('duration')\n if building and duration == 0:\n duration = build.get('estimatedDuration')\n revision, branch_name = get_revision_and_branch_name(build)\n return BuildInfo(number, building, timestamp, duration, revision, branch_name)\n\n\ndef parse_build_timestamp(raw_timestamp: int) -> datetime:\n timestamp = datetime.utcfromtimestamp(raw_timestamp / 1000)\n return timestamp\n\n\ndef get_revision_and_branch_name(\n build: Dict[str, Any]\n) -> Tuple[Optional[str], Optional[str]]:\n trigger_data = get_build_trigger_data(build)\n revision = trigger_data.get('SHA1')\n try:\n branch_name = trigger_data['branch'][0]['name']\n except (IndexError, KeyError):\n branch_name = None\n else:\n branch_name = normalise_branch_name(branch_name)\n return revision, branch_name\n\n\ndef get_build_trigger_data(build):\n for action in build.get('actions'):\n if action.get('_class') == 'hudson.plugins.git.util.BuildData':\n return action.get('lastBuiltRevision', {})\n return {}\n\n\ndef normalise_branch_name(branch_name):\n branch_name = branch_name.replace('refs/', '')\n branch_name = branch_name.replace('remotes/', '')\n branch_name = branch_name.replace('origin/', '')\n if branch_name == 'detached':\n return '** detached **'\n return branch_name\n\n\ndef report_build_history(build_history: List[BuildInfo]):\n headers = ('Build', 'Timestamp', 'Time', 'Revision', 'Branch')\n print(\n # Ignoring spurious mypy error on the next line\n tabulate( # type:ignore\n [build.tabulation_line for build in build_history],\n headers=headers,\n tablefmt='plain',\n colalign=('right', 'left', 'right'),\n )\n )\n\n\ndef get_url(url: str, timeout: int) -> Any:\n try:\n response = requests.get(url, timeout=timeout)\n except requests.exceptions.ConnectTimeout:\n echo('Timed out: {}'.format(url))\n return {}\n code = response.status_code\n if code != 200:\n url_without_query = urlunparse(urlparse(url)[:4] + (None, None)) # type: ignore\n echo('{} {}: {}'.format(code, httplib.responses[code], url_without_query))\n return {}\n return response.json()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n\n def check_positive(value):\n try:\n ivalue = int(value)\n except ValueError:\n ivalue = 0\n if ivalue <= 0:\n raise argparse.ArgumentTypeError(\n \"{} is not a positive integer\".format(value)\n )\n return ivalue\n\n default_base = os.environ.get(BASE_ENV_VAR_NAME)\n parser.add_argument(\n 'base',\n metavar='BASE_URL',\n nargs='?',\n help=(\n 'Jenkins URL base (e.g. http://localhost:8000). '\n 'If not specified, the default value is taken from the env var '\n '{base_env_var_name} if it is set '\n '(current value: {base_env_var_value}).'\n ).format(\n base_env_var_name=BASE_ENV_VAR_NAME,\n base_env_var_value=default_base or 'none set',\n ),\n )\n parser.add_argument('job', metavar='JOB', help='job name (e.g. main)')\n parser.add_argument(\n '-t',\n '--timeout',\n metavar='N',\n type=check_positive,\n help='HTTP timeout in seconds (default: {})'.format(DEFAULT_TIMEOUT),\n )\n\n args = parser.parse_args()\n\n if not args.base:\n if default_base:\n args.base = default_base\n else:\n echo(\n 'No URL base specified (either on command line or env var); '\n 'giving up.'\n )\n sys.exit(1)\n\n if args.timeout is None:\n args.timeout = DEFAULT_TIMEOUT\n\n return args\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/gentle_jenkins_tools/jen_job_history.py","file_name":"jen_job_history.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185059219","text":"#!/usr/bin/env python3\n\n# The prime factors of 13195 are 5, 7, 13 and 29.\n\n# What is the largest prime factor of the number 600851475143?\n\nTARGET = 600851475143\n\nPRIME_FACTORS = []\n\n# O(log n)\ndef is_prime_number_test(num_to_check):\n\n\tif num_to_check <= 1:\n\t\treturn False\n\n # If num_to_check == 2 OR 3. \n\tif num_to_check <= 3:\n\t\treturn True\n\n\tif num_to_check % 2 == 0 or num_to_check % 3 == 0:\n\t\treturn False\n\n\ti = 5\n\twhile i * i <= num_to_check:\n\t\tif num_to_check % i == 0 or num_to_check % (i + 2) == 0:\n\t\t\treturn False\n\t\t# Why 6?\n\t\t# Our starting value for this control flow is 5, after we validate that i is\n\t\t# not a factor of num_to_check, we advance to 11, then 17, then 23, then 29.\n\t\t# 6 is simply an optimal incrementer for not having to scan every real number\n\t\t# to check if they are a factor of num to check.\n\t\ti = i + 6\n\n\treturn True\n\n# O(log n + log n). Why? The logic for determining factors of \"target\" is O(log n), \n# as is the algorithm to check if this value is a prime number.\ndef find_largest_prime_factor(target):\n\tfactors = []\n\n\tthis_factor = 2\n\n\tif is_prime_number_test(target):\n\t\treturn target\n\t\n\t# Squaring this_factor allows us to close the gap between our test factors and the target quicker.\n\t# (especially helpful in cases where the target is extremely large).\n\twhile this_factor * this_factor <= target:\n\t\tif target % this_factor == 0:\n\t\t\t# this_factor is a factor of our target.\n\t\t\t# divide target by this factor and assign the value to target.\n\t\t\ttarget //= this_factor\n\t\t\tprint('target after floor division and assignment: {}'.format(target))\n\t\t\tfactors.append(this_factor)\n\t\t\t\n\t\telse:\n\t\t\t# move to the next potential factor.\n\t\t\tthis_factor += 1\n\n\tif is_prime_number_test(target):\n\t\t# In this case, target is prime, and is a factor of the original target.\n\t\t# Because of the logic, this case does not get added to the factors array, but is likely to be the \n\t\t# larget prime factor of the original target.\n\t\treturn target\n\telse:\n\t\t# Return the last item in the list of factors.\n\t\treturn factors[-1]\n\n\n\nif __name__ == \"__main__\":\n\tresult = find_largest_prime_factor(TARGET)\n\tprint('result: {}'.format(result))\n\t# Verify that the result of our method is, indeed, prime.\n\tif is_prime_number_test(result):\n\t\tprint('is result prime factor: {}'.format(True))\n\telse:\n\t\tpass\n","sub_path":"project_euler/euler_three.py","file_name":"euler_three.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"263869140","text":"\r\nimport numpy as np\r\nimport mlp\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n#load data\r\ndata = np.load('dataSet.npy')\r\n\r\n\r\n#Set up Neural Network\r\ndata_in = data[:,:-1]\r\ntarget_in = data[:,2].reshape(data.shape[0],1)\r\n\r\nhidden_layers = 6\r\nNN = mlp.mlp(data_in,target_in,hidden_layers)\r\n\r\n#Analyze Neural Network Performance\r\nM=15\r\nX_train, X_valid, target_train, target_valid = train_test_split(data_in, target_in, test_size = 0.33, random_state = M)\r\neta=0.7\r\niteration=100\r\nprint('iteration is ',iteration)\r\nNN.earlystopping(X_train,target_train,X_valid,target_valid,eta,iteration)\r\nNN.confmat(data_in,target_in)\r\n\r\n","sub_path":"homework-MLP/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"508240722","text":"# 배열을 두 그룹으로 나누기 \n\nfrom typing import MutableSequence\n\ndef partition(a: MutableSequence) -> None:\n # 배열을 나누어 출력\n n = len(a)\n pl = 0 # 왼쪽 커서\n pr = n - 1 # 오른 쪽 커서 \n x = a[n // 2] # 피벗 \n\n while pl <= pr:\n while a[pl] < x: \n pl += 1\n while a[pr] > x:\n pr -= 1\n if pl <= pr:\n a[pl], a[pr] = a[pr], a[pl]\n pl += 1\n pr -= 1\n\n print(f'피벗 {x}')\n\n print('피벗 이하 그룹')\n print(*a[0: pl])\n\n if pl > pr + 1:\n print('피벗과 일치하는 그룹')\n print(*a[pr + 1 : pl])\n\n print('피벗 이상인 그룹')\n print(*a[pr+1:n])\n\nif __name__ == '__main__':\n print('배열을 나눔')\n num = int(input('원소 수 입력: '))\n x = [None] * num\n\n for i in range(num):\n x[i] = int(input(f'x[{i}]: '))\n\n partition(x)\n","sub_path":"chapter06/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27343942","text":"## EJERCICIO 8\n## NOMBRE APELLIDO: Gianpier Yupanqui\n## LU 819/18\n\nimport numpy.linalg as npl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# A\nA = np.array([[1,2,3],[4,5,6]])\nU,s,V = npl.svd(A)\n# A.shape = (2,3) // 2 : #filas y 3 : #columnas\n# s = [g1,g2,..] // los elementos de la diagonal, no la matriz diagonal\nS = np.zeros((A.shape[0], A.shape[1]))\nS[:len(s), :len(s)] = np.diag(s)\nA_ = np.dot(U,np.dot(S,V))\nprint(np.mean(A_-A)) # Esto es por el error de los flotantes, pero es pequeño\n\n\n# B\nimport imageio as img\nimage = img.imread('arbol.jpg',format='jpg')\n\ndef a_grises(image,r,g):\n R = image[:,:,0] # Matriz de rojos\n G = image[:,:,1] # Matriz de grises\n B = image[:,:,2] # Matriz de azules\n gris = r*R + g*G + (1-r-g)*B\n return(gris)\n \nr=g=1/3 # Este es mas oscuro\nimagen_gris = a_grises(image,r,g)\nplt.imshow(imagen_gris,cmap='gray',vmin=0,vmax=255)\nplt.savefig('ejemplo_en_escala_de_grises_1.jpg')\n\nr=0.3\ng=0.59 # Este es mas claro\nimagen_gris = a_grises(image,r,g)\nplt.imshow(imagen_gris,cmap='gray',vmin=0,vmax=255)\nplt.savefig('ejemplo_en_escala_de_grises_2.jpg')\n \n# C\nA = imagen_gris\nU,s,V = npl.svd(A)\nS = np.zeros((A.shape[0], A.shape[1]))\nS[:len(s), :len(s)] = np.diag(s)\nA_ = np.dot(U,np.dot(S,V)) \nplt.imshow(A_,cmap='gray',vmin=0,vmax=255)\nplt.savefig('escala_de_grises_SVD.jpg')\n# Si es igual, son muy identicos\n\n# D\ndef reduce_svd(A,p):\n U,s,V = npl.svd(A)\n n_elementos = int(p*len(s))\n s[len(s)-n_elementos:] = 0 #Esto funciona si es np.array no si es solo una lista\n S = np.zeros((A.shape[0], A.shape[1]))\n S[:len(s), :len(s)] = np.diag(s)\n A_ = np.dot(U,np.dot(S,V))\n return(A_)\n\n# En las iteraciones esta el 90%\n# E\nimage = imagen_gris\nerror = []\nfor p in [0.9,0.8,0.5,0.1]:\n print('Calculando con p=' + str(p))\n image_ = reduce_svd(image,p)\n plt.imshow(image_,cmap='gray',vmin=0,vmax=255)\n plt.savefig('SVD_reducido_' +str(p*100)+'p.jpg')\n error.append(np.mean(np.abs(image_-image))/np.mean(image))\n\n\"\"\" Cuantos autovalores considera necesarios para aproximar la imagen?\n AL menos el 20% de, si es menor la grafica se ve muy borrosa\n Es mas el fondo blanco se vuelve gris en algunos puntos\n Es aun mas, para mi con que el error sea < 0.01 seria lo mas optimo\n \n\"\"\"\n\n# F\nfrom tqdm import tqdm # Te muestra la barra de progreso,Nro Iteracion, Tiempo de ejecucion AMAISING\n#from time import sleep # Para agregar delay dentro del for// sleep(0.01)\narray_p = np.arange(0.1,1,0.05)\n\nprint(\" Fractal \")\nimage = img.imread('fractal.jpg',format='jpg')\nimage = a_grises(image,r,g)\nerror_fractal = [] \nfor p in tqdm(array_p):\n image_ = reduce_svd(image,p)\n error_fractal.append(np.mean(np.abs(image_-image))/np.mean(image))\n\n\nprint(\" Mona Lisa \")\nimage = img.imread('mona_lisa.jpg',format='jpg')\nimage = a_grises(image,r,g)\nerror_mona = [] \nfor p in tqdm(array_p):\n image_ = reduce_svd(image,p)\n error_mona.append(np.mean(np.abs(image_-image))/np.mean(image))\n\n\nprint(\" Cuadrado \")\nimage = img.imread('cuadrado.jpg',format='jpg')\nimage = a_grises(image,r,g)\nerror_cuadrado = [] \nfor p in tqdm(array_p):\n image_ = reduce_svd(image,p)\n error_cuadrado.append(np.mean(np.abs(image_-image))/np.mean(image))\n\n\nprint(\" Poligono \")\nimage = img.imread('poligono.jpeg',format='jpeg')\nimage = a_grises(image,r,g)\nerror_poligono = [] \nfor p in tqdm(array_p):\n image_ = reduce_svd(image,p)\n error_poligono.append(np.mean(np.abs(image_-image))/np.mean(image))\n\nplt.figure()\nplt.plot(array_p,error_fractal,label=\"error_fractal\")\nplt.plot(array_p,error_mona,label=\"error_mona\")\nplt.plot(array_p,error_cuadrado,label=\"error_cuadrada\")\nplt.plot(array_p,error_poligono,label=\"error_polig\")\nplt.xlabel('-% autovalores')\nplt.ylabel('Error')\nplt.legend()","sub_path":"Calculo Numerico/Entregas Python/ejercicio8_GIANPIER_YUPANQUI.py","file_name":"ejercicio8_GIANPIER_YUPANQUI.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70202209","text":"# Josh's commands\n# ya like jazzzzz???\n\nimport discord\nfrom discord.ext import commands\n\nclass bee():\n def __init__(self, client):\n self.client = client\n\n @commands.command() # Print the Bee Movie script from \"beemovie.txt\" on the Download Archive\n async def beemovie(self):\n \"\"\"teh best movieh evar\"\"\"\n await self.client.say(\"https://icrazyblaze.github.io/Download-archive/dl/beemovie.txt\")\n\n\ndef setup(client):\n client.add_cog(bee(client))\n","sub_path":"cogs/bee.py","file_name":"bee.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554831651","text":"import unittest,requests\nfrom api.channels import ApiChannel\nfrom tools.read_json import ReadJson\nfrom parameterized import parameterized\n\ndef get_channel_data():\n # 读取channel数据的接收\n data = (ReadJson('channel.json').read_json())\n arrs = []\n arrs.append(\n (data.get(\"url\"), data.get(\"headers\"), data.get(\"expect_code\"), data.get(\"message\")))\n print(arrs)\n\nclass TestChannel(unittest.TestCase):\n @parameterized.expand(get_channel_data())\n def test_channel(self,url,headers,expect_code,message):\n # url=\"http://ttapi.research.itcast.cn/app/v1_0/users/channel\"\n # headers={\n # \"Content-type\": \"application/json\",\n # \"Authorization\":\"Bear Token信息\"\n # }\n\n resp=ApiChannel().api_get_channel(url,headers=headers)\n print(resp.json())\n # self.assertEquals(200, resp.status_code)\n # self.assertEquals(\"OK\", resp.json()['message'])\n\n # 断言\n self.assertEquals(expect_code,resp.status_code)\n self.assertEquals(message,resp.json()['message'])\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"case/test_channel.py","file_name":"test_channel.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"318587483","text":"#!/usr/bin/python3\n\"\"\" Fabric script (based on the file 1-pack_web_static.py)\nthat distributes an archive to your web servers, using the\nfunction do_deploy \"\"\"\n\nfrom fabric.api import *\nimport os\n\n\nenv.hosts = ['34.73.121.160', '35.185.87.206']\nenv.user = 'ubuntu'\n\n\ndef do_deploy(archive_path):\n \"\"\" Do deploy \"\"\"\n if not os.path.exists(archive_path):\n return False\n try:\n file_name = os.path.basename(archive_path)\n filename = file_name.split(\".\")\n web_name = filename[0]\n dire_file = \"/data/web_static/releases/{}/\".format(web_name)\n put(archive_path, '/tmp/')\n run(\"mkdir -p {}/\".format(dire_file))\n run(\"tar -xzf /tmp/{} -C {}/\".format(file_name, dire_file))\n run(\"rm /tmp/{}\".format(file_name))\n run(\"mv {}/web_static/* {}/\".format(dire_file, dire_file))\n run(\"rm -rf {}web_static\".format(dire_file))\n run(\"rm -rf /data/web_static/current\")\n run(\"ln -s {}/ /data/web_static/current\".format(dire_file))\n print(\"New version deployed!\")\n return True\n\n except:\n return False\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316361888","text":"#encoding:utf-8\nfrom choferes.models import vehiculos,choferes\nfrom django.shortcuts import render_to_response,get_object_or_404, render\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.context import RequestContext\nfrom forms import addChoferForm,addVehiculoForm\nfrom choferes.models import choferes,vehiculos\n\ndef inicio(request):\n\tchofer = choferes.objects.all()\n\ttemplate = 'index.html'\n\treturn render(request,template,{\n\t\t'choferes':chofer,\n\t\t'request':request\n\t})\n\n@login_required\ndef addChofer(request):\n\t'''Forma para agregar un nuevo chofer al sistema'''\n\tif request.POST:\n\t\t#Para subir imagenes recibir por post los archivos request.FILES\n\t\tform = addChoferForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\t#Creamos el nuevo chofer pero aun no lo guardo\n\t\t\tchoferes = form.save(commit = False)\n\t\t\t#Verifico que usuario realizo el registro \n\t\t\tchoferes.usuario = request.user\n\t\t\t#Guardo el nuevo chofer\n\t\t\tchoferes.save()\n\t\t\treturn HttpResponseRedirect('/')\n\telse:\n\t\tform = addChoferForm()\n\ttemplate = 'addChofer.html'\n\treturn render_to_response(template,context_instance = RequestContext(request,locals()))\n\n@login_required\ndef addVehiculo(request):\n\t'''Forma para agregar un nuevo vehiculos'''\n\tif request.POST:\n\t\tform = addVehiculoForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tvehiculos = form.save(commit = False)\n\t\t\tvehiculos.usuario = request.user\n\t\t\tvehiculos.save()\n\t\t\treturn HttpResponseRedirect('/')\n\telse:\n\t\tform = addVehiculoForm()\n\ttemplate = 'addVehiculo.html'\n\treturn render_to_response(template,context_instance = RequestContext(request,locals()))\n\n","sub_path":"taxigo/choferes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467598846","text":"\"\"\"\nThis signature contains test to see if the site is running on OpenText Web Site Management.\n\"\"\"\n__author__ = \"Steven Brent\"\n__copyright__ = \"CM Fieldguide\"\n__credits__ = [\"Steven Brent\",]\n__license__ = \"Unlicense\"\n__version__ = \"0.1\"\n__maintainer__ = \"Steven Brent\"\n__email__ = \"scbrent@gmail.com\"\n__status__ = \"Experimental\"\n\n\nfrom cmfieldguide.cmsdetector.signatures import BaseSignature\n\n\nclass Signature(BaseSignature):\n\n NAME = 'OpenText Web Site Management'\n WEBSITE = 'http://websolutions.opentext.com'\n KNOWN_POSITIVE = 'http://websolutions.opentext.com'\n TECHNOLOGY = '.NET'\n\n def test_has_identifying_publish_tag(self, site):\n \"\"\"\n *Some* sites running *some* versions of OpenText Web Site Management will contain this\n \"\"\"\n\n if site.home_page.contains_pattern(\"BILSTM-->CRF\r\n3. 其中embedding过程: 每个病历文件为一个样本数据,每个单词变成一个id,每个病历文件\r\n变成id序列,然后使用tf.nn.embedding_lookup()生成每个word embedding,然后送入LSTM-->CRF中\r\n'''\r\nimport tensorflow as tf\r\nimport os\r\nimport glob\r\nimport codecs\r\nimport numpy as np\r\nprint(tf.__version__)\r\n# 使用SMBE方法进行数据标注\r\ndef labelChar(all_txtoriginal_texts):\r\n fw=open('labelData.txt', 'w')\r\n label_dict={'解剖部位':'body','手术':'surgery','药物':'drug',\r\n '独立症状':'ind_symptoms','症状描述':'SymptomDes'}\r\n allSamples=[]\r\n allSample_labels=[]\r\n for file in all_txtoriginal_texts:\r\n original_filename=file\r\n label_filename=file.replace('txtoriginal.','')\r\n with codecs.open(original_filename,encoding='utf-8') as f:\r\n original_content=f.read().strip()\r\n allSamples.append(original_content)\r\n # 标注之后的序列\r\n sy = ['O' for i in range(len(original_content))]\r\n with codecs.open(label_filename,encoding='utf-8') as f:\r\n lines=f.readlines()\r\n for line in lines:\r\n lineList=line.split('\\t')\r\n start,end,label=int(lineList[1]),int(lineList[2]),lineList[3].replace('\\r\\n','')\r\n entity=original_content[start:end]\r\n # 判断实体的长度 根据长度已经实体类型进行标注\r\n if len(entity)==1:\r\n sy[start]='S-'+label_dict.get(label)\r\n if len(entity)==2:\r\n sy[start]='B-'+label_dict.get(label)\r\n sy[start+1]='E-'+label_dict.get(label)\r\n else:\r\n sy[start]='B-'+label_dict.get(label)\r\n sy[end-1]='E-'+label_dict.get(label)\r\n for i in range(start+1,end-1):\r\n sy[i]='M-'+label_dict.get(label)\r\n allSample_labels.append(sy)\r\n posFile=file.replace('.txtoriginal','-label')\r\n with open(posFile,'w') as f:\r\n for x,y in zip(original_content,sy):\r\n f.write(x+'\\t'+y)\r\n f.write('\\n')\r\n\r\n for x, y in zip(original_content, sy):\r\n fw.write(x + '\\t' + y)\r\n fw.write('\\n')\r\n return allSamples,allSample_labels\r\n\r\nclass BILSTM_CRF(object):\r\n def __init__(self,rnn_size,embedding_size):\r\n self.rnn_size=rnn_size\r\n self.embedding_size=embedding_size\r\n\r\n def biLstm_model(self):\r\n self.x=tf.placeholder(tf.int32,[None,max_sequence_length])\r\n self.y=tf.placeholder(tf.int32,[None,max_sequence_length])\r\n self.dropout_keep_prob=tf.placeholder(tf.float32)\r\n\r\n embedding_mat=tf.Variable(tf.random_uniform((max_id+1,self.embedding_size),-1.0,1.0))\r\n embedding_output=tf.nn.embedding_lookup(embedding_mat,self.x)\r\n embedding_output=tf.reshape(embedding_output,[-1,self.embedding_size])\r\n embedding_output=tf.split(embedding_output,max_sequence_length)\r\n lstm_qx=tf.contrib.rnn.BasicLSTMCell(num_units=self.rnn_size)\r\n lstm_hx=tf.contrib.rnn.BasicLSTMCell(num_units=self.rnn_size)\r\n print('emebdding_output:',embedding_output)\r\n # 两个完全一样的LSTM结构输入到static_bidrectional_rnn中,由这个op来管理双向计算过程\r\n output,state,_= tf.contrib.rnn.static_bidirectional_rnn(lstm_qx,lstm_hx,embedding_output,dtype=tf.float32)\r\n print('output:',output)\r\n self.output=tf.nn.dropout(output,self.dropout_keep_prob)\r\n\r\n def cfr_model(self):\r\n # All the squences in this example have the same length,but they can be variable in a real model\r\n sequence_length = np.full(batch_size, max_sequence_length, dtype=np.int32)\r\n weights = tf.get_variable('weights', [2*self.rnn_size, num_tags],dtype=tf.float32)\r\n bias = tf.get_variable('bias',[num_tags],dtype=tf.float32)\r\n output_ = tf.reshape(self.output, [-1, 2*self.rnn_size])\r\n print('output:', output_)\r\n print('weights:', weights)\r\n # sequence_lengh=[19 19 19 19 19 19 19 19 19 19]\r\n\r\n # Train and evaluate the model\r\n #with tf.reset_default_graph():\r\n with tf.Session() as session:\r\n # Add the data the the tensorflow graph\r\n # y_t = tf.convert_to_tensor(y_data)\r\n # print('y_t:',y_t)\r\n sequence_lengths_t = tf.constant(sequence_length)\r\n # compute unary scores from a linear layer\r\n matricized_unary_scores = tf.matmul(output_, weights)+bias\r\n\r\n unary_scores = tf.reshape(matricized_unary_scores, [-1, max_sequence_length, num_tags])\r\n print('unary_scores:', unary_scores)\r\n # Compute the log-likelihood of the gold sequences and keep the transition\r\n # params for inference at test time\r\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\r\n unary_scores, self.y, sequence_lengths_t)\r\n # Compute the verterbi sequences and scores\r\n viterbi_sequence, viterbi_score = tf.contrib.crf.crf_decode(\r\n unary_scores, transition_params, sequence_lengths_t\r\n )\r\n # add a train op to turn the parameters\r\n loss = tf.reduce_mean(-log_likelihood)\r\n train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)\r\n session.run(tf.global_variables_initializer())\r\n # mask = (np.expand_dims(np.arange(max_sequence_length), axis=0)\r\n # < np.expand_dims(sequence_length, axis=1))\r\n total_labels = np.sum(sequence_length)\r\n print('total_labels:',total_labels)\r\n print('len(x_data):',len(x_data))\r\n\r\n # Train a fixed number of iteration\r\n for i in range(200):\r\n x_batch,y_batch=get_batch(batch_size)\r\n tf_viterbi_sequence, _ = session.run([viterbi_sequence, train_op],feed_dict={self.x:x_batch,self.y:y_batch,self.dropout_keep_prob:0.5})\r\n if i % 1 == 0 or i == 100:\r\n # correct_labels = np.sum((y_data== tf_viterbi_sequence) * mask)\r\n #print('y_data==tf_viterbi_sequence:',y_batch==tf_viterbi_sequence)\r\n correct_labels=np.sum((y_batch==tf_viterbi_sequence))\r\n print('correct_labels:',correct_labels)\r\n accuracy = 100.0 * correct_labels / float(total_labels)\r\n print('Accuracy:%.2f%%' % accuracy)\r\n\r\n\r\ndef sample2ids(corpus,allSamples):\r\n # id:char\r\n id2char={id:char for id,char in enumerate(corpus)}\r\n # char:id\r\n char2id={char:id for id,char in id2char.items()}\r\n print(len(char2id),len(id2char))\r\n\r\n # 将每个样本转换为id sequence\r\n idsamples=[]\r\n for sample in allSamples:\r\n idsamples.append([char2id.get(char) for char in sample])\r\n return idsamples\r\n\r\ndef regular_data(x_data,y_data):\r\n #print(x_data)\r\n # 先对y_data进行处理 以满足vocab_proccessor的要求\r\n labels=[]\r\n for row in y_data:\r\n s=''\r\n for item in row:\r\n s=' '.join(item)\r\n labels.append(s.strip())\r\n\r\n vocab_processor=tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,min_frequency=1)\r\n text_processed=np.array(list(vocab_processor.fit_transform(x_data)))\r\n\r\n # 计算text_processed中最大的标号 即为单词的总个数\r\n max_id = max([item for row in text_processed for item in row])\r\n print('max_id:', max_id)\r\n\r\n vocab_processor=tf.contrib.learn.preprocessing.VocabularyProcessor(max_sequence_length,min_frequency=1)\r\n label_processed=np.array(list(vocab_processor.fit_transform(labels)))\r\n #print('label_processed:',label_processed)\r\n return text_processed,label_processed,max_id\r\n\r\ndef get_batch(batch_size):\r\n ids=np.random.permutation(len(x_data))\r\n x_shuffled=x_data[ids]\r\n y_shuffled=y_data[ids]\r\n return x_shuffled[:batch_size],y_shuffled[:batch_size]\r\nif __name__ == '__main__':\r\n #定义全局参数\r\n max_sequence_length=20\r\n num_tags=6\r\n batch_size=32\r\n\r\n #第一步:标注数据 并写入一个文件中\r\n basedir=os.path.join(os.getcwd(),'train_data600')\r\n pattern='*.txtoriginal.txt'\r\n all_txtoriginal_texts=glob.glob(os.path.join(basedir,pattern))\r\n allSamples,allSample_labels=labelChar(all_txtoriginal_texts)\r\n #x_data=sample2ids(corpus,allSamples)\r\n #将x_data,y_data规整为统一长度的样本集\r\n x_data,y_data,max_id=regular_data(allSamples,allSample_labels)\r\n\r\n # 第二步:创建BILSTM模型\r\n lstm_crf=BILSTM_CRF(10,50)\r\n lstm_crf.biLstm_model()\r\n lstm_crf.cfr_model()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"CRF/medicalBILSTM-CRF.py","file_name":"medicalBILSTM-CRF.py","file_ext":"py","file_size_in_byte":8927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260695165","text":"#!/usr/bin/python\n\n\"\"\"\n\nThe Fibonacci numbers, commonly denoted F(n) form a sequence, called the Fibonacci sequence, such that each number is the sum of the two preceding ones, starting from 0 and 1. That is,\n\nF(0) = 0, F(1) = 1\nF(N) = F(N - 1) + F(N - 2), for N > 1.\n\nGiven N, calculate F(N).\n\n\"\"\"\n\ndef fib(n):\n if n <= 1:\n return 1\n\n v = 1\n for i in range(1, n+1):\n v *= i\n return v\n\ndef main():\n for i in range(0, 101):\n print(i, fib(i))\n\nmain()\n","sub_path":"leetcode/fibonacci-number.py","file_name":"fibonacci-number.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349478023","text":"import datetime\nimport pprint\nimport random\nimport threading\nimport socket\nimport pickle\nimport time\nfrom multiprocessing import Process, Manager\n\n\nclass Server:\n def __init__(self, machine_id):\n self.id = machine_id\n self.HOST = ''\n self.PORT = 27015\n self.ADDRESS = (self.HOST, self.PORT)\n self.BUFFERSIZE_TEMPORARY = 3072\n\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.bind(self.ADDRESS)\n self.server.listen(10)\n\n self.bots_list = []\n self.bots_processing_list = []\n self.bots_data_collection = []\n self.exit_is_set = False\n\n def server_message(self, text):\n print(datetime.datetime.now().time(), 'Сервер:', text)\n\n def data_accepting(self):\n message_time = time.time()\n message_time_cooldown = 120\n while not self.exit_is_set:\n # try:\n if self.bots_list:\n for bot in self.bots_list.copy():\n if bot not in self.bots_processing_list:\n threading.Thread(target=self.data_rcv, args=(bot,)).start()\n #self.data_rcv(bot)\n else:\n if time.time() - message_time >= message_time_cooldown:\n self.server_message('Сервер пуст.')\n message_time = time.time()\n # except:\n # self.server_message('ОШИБКА ОТПРАВКИ ДАННЫХ.')\n # self.bots_list.clear()\n # self.bots_processing_list.clear()\n\n def data_rcv(self, b):\n self.bots_processing_list.append(b)\n #try:\n data_ = b.recv(self.BUFFERSIZE_TEMPORARY)\n\n if data_:\n decoded_data = pickle.loads(data_)\n print(datetime.datetime.now().time(), decoded_data)\n self.send_data(data_)\n self.bots_processing_list.remove(b)\n # except:\n # try:\n # self.bots_processing_list.remove(b)\n # b.close()\n # self.bots_list.remove(b)\n # self.server_message(f'Клиент удалён.')\n # except ValueError:\n # print('Клиент не в списке!!! Вот и всё...', ValueError)\n\n def send_data(self, data):\n for bot_ in self.bots_list:\n try:\n bot_.send(data)\n except:\n self.server_message('Ошибка передачи данных')\n bot_.close()\n self.bots_list.remove(bot_)\n self.server_message(f'Клиент удалён.')\n\n def bots_accepting(self):\n self.server_message('Сервер запущен.')\n while not self.exit_is_set:\n clientsocket, address = self.server.accept()\n self.bots_list.append(clientsocket)\n self.server_message('Бот установил связь.')\n\n def server_start(self):\n t1 = threading.Thread(target=self.bots_accepting)\n t1.start()\n self.data_accepting()\n\n\nclass Client:\n def __init__(self, machine_id, dtt, dtr, is_connected, server_item, ip=None):\n manager = Manager()\n self.id = machine_id\n self.HOST = '84.237.53.150'\n # self.HOST = '213.127.70.95'\n\n self.dtt = dtt\n self.dtr = dtr\n\n self.PORT = 27015\n self.ADDRESS = (self.HOST, self.PORT)\n self.BUFFERSIZE_TEMPORARY = 3072\n self.server = server_item\n self.server[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #self.connected_m = manager.list()\n #self.connected_m.append(False)\n self.connected_m = is_connected\n self.connected = False\n self.reconnected = False\n self.has_received = False\n self.tries = 0\n self.exit_is_set = False\n self.count = 0\n\n self.bots_list = []\n # manager = Manager()\n # self.bots_data_collection = manager.list()\n # self.bots_data_collection.append(0)\n self.bots_data_collection = {}\n\n self.conn()\n\n def connect(self):\n #while not self.connected and not self.exit_is_set:\n while not self.connected_m[0] and not self.exit_is_set:\n if self.tries >= 2:\n self.connected_m[0] = False\n #self.connected = False\n self.client_message('Сервер не отвечает. Работаем в оффлайн режиме')\n self.tries = 0\n break\n try:\n self.server[0].connect(self.ADDRESS)\n #self.connected = True\n self.connected_m[0] = True\n self.client_message('Соединение установлено')\n self.reconnected = False\n self.data_accepting_thread()\n except:\n self.client_message(\n 'Сервер не отвечает. Для технической поддержки, свяжитесь с Баганцем. Скорее всего, вы просто '\n 'дилетант. АЙПИ ИЗМЕНИЛ? САМ СЕРВЕР ЗАПУЩЕН??? Ну вот и всё.')\n self.tries += 1\n self.client_message(f'Попытка подключения {self.tries}')\n time.sleep(15)\n\n def autoconnect(self):\n while True:\n #print('autoconnect self.connected_m[0]', self.connected_m[0])\n #print('outer self.connected', self.connected)\n if not self.connected_m[0]:\n #if not self.connected:\n self.client_message('________________ВОЗНИКЛИ ПРОБЛЕМЫ С СОЕДИНЕНИЕМ. ЗАПУЩЕНО АВТОПОДКЛЮЧЕНИЕ________________')\n #while not self.reconnected:\n #print('reconnection...')\n #print('inner self.connected', self.connected)\n self.server[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.connect()\n else:\n time.sleep(15)\n\n def is_connected(self):\n #return self.connected\n return self.connected_m[0]\n\n def client_message(self, text):\n print(datetime.datetime.now().time(), f'Бот {self.id}', text)\n\n def client_send(self, data_tt, key=None):\n if key is None:\n data_to_encode = {self.id: data_tt}\n else:\n data_to_encode = {key: data_tt}\n encoded_data_to_send = pickle.dumps(data_to_encode)\n print(\"LEN OF DATA\", len(encoded_data_to_send))\n if self.connected_m[0]:\n #if self.connected:\n try:\n #print('client_send', self.count)\n self.server[0].send(encoded_data_to_send)\n except:\n self.client_message('ОШИБКА ОТПРАВКИ ДАННЫХ')\n self.server[0].close()\n #self.connected = False\n self.connected_m[0] = False\n # self.autoconnect()\n\n def client_receive_message(self):\n # self.dtr[0] = self.bots_data_collection\n # print('BOTNET dtr', self.dtr[0])\n # print('BOTNETdata_collection ', self.bots_data_collection)\n self.has_received = True\n # return self.dtr[0]\n\n def data_accepting(self):\n while not self.exit_is_set:\n if self.connected_m[0]:\n try:\n if self.has_received:\n self.bots_data_collection.clear()\n # self.bots_data_collection[0] = 0\n self.has_received = False\n\n data_ = self.server[0].recv(self.BUFFERSIZE_TEMPORARY)\n\n if data_:\n decoded_data = pickle.loads(data_)\n self.dtr[0] = decoded_data\n self.bots_data_collection = decoded_data\n # time.sleep(0.01)\n # print('CLIENT', self.bots_data_collection)\n\n except:\n self.client_message('ОШИБКА ПРИНЯТИЯ ДАННЫХ')\n self.server[0].close()\n #self.connected = False\n self.connected_m[0] = False\n # self.autoconnect()\n\n def data_accepting_thread(self):\n t1 = threading.Thread(target=self.data_accepting)\n t1.start()\n\n def conn(self):\n t2 = threading.Thread(target=self.autoconnect)\n t2.start()\n\n\nif __name__ == '__main__':\n s = Server(1)\n s.server_start()\n\n # time.sleep(10)\n\n #c = Client(1, [0], [0], [0], [0])\n # c.connect_to_server()\n","sub_path":"modules/botnet.py","file_name":"botnet.py","file_ext":"py","file_size_in_byte":8739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344956575","text":"from azure.servicebus import ServiceBusService, Message\nimport logging\n\nlog = logging.getLogger(__name__)\n\nclass SweetieMQ(object):\n\n bus_service = None\n topic = None\n\n def __init__(self, config):\n account_key = getattr(config, 'sb_account_key', None)\n if account_key is None:\n log.warn('sb_account_key is not set, skipping mq setup')\n return\n\n issuer = getattr(config, 'sb_issuer', 'owner')\n service_namespace = getattr(config, 'sb_namespace', 'jabber-messages')\n topic = getattr(config, 'sb_topic', 'test-topic')\n\n self.bus_service = ServiceBusService(service_namespace=service_namespace,\\\n account_key=account_key, issuer=issuer)\n self.topic = topic\n\n def send(self, message):\n if self.bus_service is None:\n return\n log.debug('Sending message '+str(message))\n msg = Message(message)\n try:\n self.bus_service.send_topic_message(self.topic, msg)\n except Exception as e:\n log.error(\"MESSAGE DELIVERY FAILED: \"+str(e))\n","sub_path":"modules/SweetieMQ.py","file_name":"SweetieMQ.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553200843","text":"import logging\n\n# low latency & code Size balanced collapser\n\ndef choose_functions_to_collapse (matrix):\n\tlogging.debug(\"\t\t>>>Getting functions to collapse\")\n\t#compute the maximum value of matrix to determine which functions to collapse\n\t#----------------------------------------------------------------------------\n\n\tnum_cols=len(matrix[0])\n\tnum_rows=len(matrix)\n\t#print \"\\n num_cols = \",num_cols, \" num_rows = \",num_rows, \"\\n\"\n\tmax_invocations=0\n\tf1_row=0\n\tf1_col=0\n\tf2_row=0\n\tf2_col=0\n\tlen_collapsable=999999999 #initially nothing is collapsed. At this point 2 funcs are collapsable\n\n\tfor i in range(1,num_rows):\n\t\tfor j in range(1,num_cols):\n\t\t\t# self-invocation is not relevant\n\t\t\tif (i!=j) and (matrix[i][j] >= max_invocations):\n\t\t\t\tif (matrix[i][j] == max_invocations):\n\t\t\t\t\tif (len(matrix[0][j])>len_collapsable):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t# best candidate selection\n\t\t\t\tmax_invocations=matrix[i][j]\n\t\t\t\tf2_col=i # f2 invocation column is the row\n\t\t\t\tf1_col=j # f1 invocation column is the column\n\t\t\t\tf1_row=f1_col\n\t\t\t\tf2_row=f2_col\n\t\t\t\tlen_collapsable =len(matrix[0][j])\n\tlogging.debug(\"\t\tFunctions to collapse: %s,%s\",f2_col,f1_col)\n\treturn (f2_col,f1_col)\n\n","sub_path":"splitter/collapser_llcsb.py","file_name":"collapser_llcsb.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507193905","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author \"jhe\"\n@date 2018/6/12 18:58\n\"\"\"\n\nimport logging\n\n\n# logging.basicConfig(level=logging.DEBUG,\n# format='%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s',\n# datefmt='%Y-%m-%d %H:%M:%S',\n# filename='test.log',\n# filemode='a')\n#\n# logging.debug('debug message')\n# logging.info('info message')\n# logging.warning('warning message')\n# logging.error('error message')\n# logging.critical('critical message')\n\n\n\n\nlogging.L\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n# 用于写入日志文件\nfh = logging.FileHandler('text1.log')\n\n#用于输出到控制台\nch = logging.StreamHandler()\n\nformatter = logging.Formatter('%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s')\n\nfh.setFormatter(formatter)\nch.setFormatter(formatter)\n\nlogger.addHandler(fh)\nlogger.addHandler(ch)\n\nlogger.debug('debug message')\nlogger.info('info message')\nlogger.warning('warning message')\nlogger.error('error message')\nlogger.critical('critical message')\n\n","sub_path":"day1-18/模块之logging.py","file_name":"模块之logging.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18084581","text":"from os import name\nimport requests\nfrom bs4 import BeautifulSoup\n# Fill in your details here to be posted to the login form.\npayload = {\n 'log': 'admin',\n 'pwd': '123456aA'\n}\n\n# Use 'with' to ensure the session context is closed after use.\nwith requests.Session() as s:\n p = s.post('http://45.79.43.178/source_carts/wordpress/wp-login.php', data=payload)\n # print the html returned or something more intelligent to see if it's a successful login page.\n # print(p.text)\n # soup = BeautifulSoup(p.content, \"html.parser\")\n # uname = soup.findAll('span', class_='display-name')\n # print(uname)\n # # An authorised request.\n r = s.get('http://45.79.43.178/source_carts/wordpress/wp-admin/profile.php')\n print (r.text)\n soup = BeautifulSoup(r.content, \"html.parser\")\n uname = soup.select_one('#user_login')\n uname = soup.find('input', {'id': 'user_login'}).get('value')\n print(uname)","sub_path":"crawl1.py","file_name":"crawl1.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240612350","text":"import sqlite3\nimport pandas as pd\nimport joblib\nimport sys\nimport os\nimport hashlib\nimport json\nfrom math import sin, cos, sqrt, atan2, radians, isnan, floor\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler\n\n\ndb_path = 'data\\\\PE2014A\\\\PE2014A.sqlite'\n\ndef create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except sqlite3.Error as e:\n print(e)\n\n return conn\n\n\ndef get_airport_coords(conn):\n query = \"SELECT ICAO, Latitude as latitude, Longitude AS longitude, LongestRunway AS longest_runway, HasILSApproach AS has_ils, HasRNAVApproach AS has_rnav, HasLocalizerApproach AS has_loc FROM (SELECT * FROM Airport INNER JOIN Point P on Airport.Point = P.Id) WHERE ICAO != ''\"\n\n cur = conn.cursor()\n cur.execute(query)\n \n columns_names = list(map(lambda x: x[0], cur.description))\n rows = cur.fetchall()\n \n return rows, columns_names\n\ndef get_more_airport_data(conn):\n query = \"SELECT alternates_airports_displayed_to_user as alternates, alternates_airport_is_towered_displayed_to_user AS has_tower, alternates_airport_approaches_displayed_to_user AS approaches FROM '1_mio_alternate_data'\"\n\n cur = conn.cursor()\n cur.execute(query)\n \n columns_names = list(map(lambda x: x[0], cur.description))\n rows = cur.fetchall()\n \n return rows, columns_names\n\ndef get_aircraft_data(conn):\n query = \"SELECT aircraft_type_icao, MAX(aircraft_max_landing_weight) AS aircraft_max_landing_weight, MIN(aircraft_min_runway_length) AS aircraft_min_runway_length FROM 'airplane_data (corrected with missing aircrafts) (from CSV)' GROUP BY (aircraft_type_icao)\"\n\n cur = conn.cursor()\n cur.execute(query)\n \n columns_names = list(map(lambda x: x[0], cur.description))\n rows = cur.fetchall()\n \n return rows, columns_names\n\n\ndef calc_coord_distance(lat1, lon1, lat2, lon2):\n # Calculates distance between two points in km\n \n R = 6373.0 # Approximate radius of earth in km\n\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n \n return distance\n\n\ndef get_grid_square(lat, lon):\n lat += 90\n lon += 180\n \n lat_rounded = floor(lat)\n lon_rounded = floor(lon)\n \n # square_id = 360 * lat_rounded + lon_rounded\n square_id = 180 * lon_rounded + lat_rounded\n return square_id\n\n\nclass Airport:\n def __init__(self, icao):\n self.icao = icao\n # self.lat = df_airport_data.loc[icao][\"latitude\"] if (icao in df_airport_coords.index) else None\n # self.lon = df_airport_data.loc[icao][\"longitude\"] if (icao in df_airport_coords.index) else None\n self.lat = df_airport_data.loc[icao][\"latitude\"]\n # self.lat = str(self.lat) if isnan(self.lat) else self.lat\n self.lon = df_airport_data.loc[icao][\"longitude\"]\n # self.lon = str(self.lon) if isnan(self.lon) else self.lon\n self.has_tower = df_airport_data.loc[icao][\"has_tower\"]\n # self.has_tower = str(self.has_tower) if isnan(self.has_tower) else self.has_tower\n self.approaches = df_airport_data.loc[icao][\"approaches\"]\n # self.approaches = str(self.approaches) if isnan(self.approaches) else self.approaches\n self.has_ils = isinstance(self.approaches, str) and \"ILS\" in self.approaches\n self.longest_runway = df_airport_data.loc[icao][\"longest_runway\"]\n\n def __str__(self):\n return \" - \" + str(self.icao) + \"{:10.3f}\".format(self.lat) + \"{:10.3f}\".format(self.lon) + \"\\tTower:\" + str(self.has_tower) + \" Approaches:\" + str(self.approaches)\n \n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n\n\nclass Alternate(Airport):\n def __init__(self, icao, dest_airport):\n super().__init__(icao)\n self.distance = round(calc_coord_distance(dest_airport.lat, dest_airport.lon, self.lat, self.lon), 3)\n \n def __str__(self):\n return \" - \" + str(self.icao) + \"{:10.3f}\".format(self.distance) + \"km\" + \"\\tTower:\" + str(self.has_tower) + \" Approaches:\" + str(self.approaches)\n\n def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)\n\ndef my_hash(string):\n return int(str(int(hashlib.sha256(string.encode('utf-8')).hexdigest(), base=16))[:10])\n\n\ndef init_airport_data():\n # Fetching airport coordinates\n dirname = os.path.dirname(__file__)\n conn = create_connection(os.path.join(dirname, db_path))\n rows, column_names = get_airport_coords(conn)\n df_airport_coords = pd.DataFrame(rows, columns=column_names)\n df_airport_coords = df_airport_coords.set_index(\"ICAO\")\n \n # Fethcing more airport features (tower, approaches) \n rows, column_names = get_more_airport_data(conn)\n df_airport_data = pd.DataFrame(rows, columns=column_names)\n\n for column in df_airport_data.columns:\n df_airport_data[column] = df_airport_data[column].astype(str)\n\n airport_dict = {}\n\n for i, row in df_airport_data.iterrows():\n if i % 10000 == 0:\n progress = round(i / len(df_airport_data.index) * 100, 2)\n print(str(progress) + \"% \", end=\"\\r\")\n\n\n alternates = row[\"alternates\"].split(\",\")\n towers = row[\"has_tower\"].split(\",\")\n approaches = row[\"approaches\"].replace(\", \", \";\").split(\",\")\n\n for i in range(len(alternates)):\n if alternates[i] not in airport_dict.keys():\n value = []\n value.append(1 if towers[i] == \"Towered\" else 0)\n value.append(approaches[i])\n\n airport_dict[alternates[i]] = value\n\n print(\"100% \", end=\"\\r\")\n df_airport_data = pd.DataFrame.from_dict(airport_dict, orient='index', columns=[\"has_tower\", \"approaches\"])\n \n # Combining dataframes to make the one and only airport_data\n df_airport_data = pd.concat([df_airport_coords, df_airport_data], axis=1).sort_index()\n \n return df_airport_data\n\n\n\ndef init_aircraft_data():\n dirname = os.path.dirname(__file__)\n conn = create_connection(os.path.join(dirname, db_path))\n rows, column_names = get_aircraft_data(conn)\n df_aircraft_data = pd.DataFrame(rows, columns=column_names)\n df_aircraft_data = df_aircraft_data.set_index(\"aircraft_type_icao\")\n\n return df_aircraft_data\n\n\ndef get_airport_data(icao):\n return df_airport_data.to_dict(orient=\"index\")[icao]\n\n\n\ndef convert_to_alternate_objects(dest, alternates):\n converted_alternates = []\n for alternate_icao in alternates:\n if alternate_icao == dest.icao:\n continue\n if alternate_icao not in df_airport_data.index:\n continue\n converted_alternates.append(Alternate(alternate_icao, dest))\n \n return converted_alternates\n\n\ndef get_aircraft_weight_class(weight):\n if weight < 15500:\n return 0\n elif weight < 300000:\n return 1\n elif weight < 1234000:\n return 2\n else:\n return 3\n\n\ndef make_prediction(dest_icao, aircraft_icao):\n dest_icao = dest_icao.upper()\n aircraft_icao = aircraft_icao.upper()\n\n dest = Airport(dest_icao)\n\n attributes = [dest.lat, dest.lon, get_aircraft_weight_class(df_aircraft_data[\"aircraft_max_landing_weight\"][aircraft_icao])] # dest_lat, dest_lon, wake\n print(attributes)\n\n # ---------- Normalizing ----------\n if model_path == \"models\\\\minmax.joblib\":\n attributes = minmax_lat_lon_wake.transform([attributes])[0]\n elif model_path == \"models\\\\std.joblib\":\n attributes = std_lat_lon_wake.transform([attributes])[0]\n elif model_path == \"models\\\\std_std_minmax.joblib\":\n attributes[0] = std_lat_lon_wake.transform([attributes])[0][0]\n attributes[1] = std_lat_lon_wake.transform([attributes])[0][1]\n attributes[2] = minmax_lat_lon_wake.transform([attributes])[0][2]\n\n print(attributes)\n\n # ---------- Make prediction ----------\n number_of_non_zero_probs = sum(i > 0 for i in dt.predict_proba([attributes])[0])\n predicted_alternates = dt.classes_[dt.predict_proba([attributes])[0].argsort()[-11:][::-1]]\n predicted_alternates = convert_to_alternate_objects(dest, predicted_alternates)\n\n # # Filtering\n # cutted_predictions = []\n # for pred in predicted_alternates:\n # if pred.distance < 1000:\n # cutted_predictions.append(pred)\n\n # # Sorting\n # predicted_alternates = sorted(predicted_alternates, key=lambda x: x.distance)\n # predicted_alternates = sorted(predicted_alternates, key=lambda x: x.has_tower, reverse=True)\n\n return dest, predicted_alternates\n\ndirname = os.path.dirname(__file__)\nstd_lat_lon_wake = joblib.load(os.path.join(dirname, 'normalizing\\\\std-lat_long_wake.bin'))\nminmax_lat_lon_wake = joblib.load(os.path.join(dirname, 'normalizing\\\\minmax-lat_lon_wake.bin'))\n\n# model_path = \"models\\\\minmax.joblib\"\n# model_path = \"models\\\\pure.joblib\"\n# model_path = \"models\\\\std_std_minmax.joblib\"\nmodel_path = \"models\\\\std.joblib\"\ndt = joblib.load(os.path.join(dirname, model_path))\n\n\ndf_airport_data = init_airport_data()\ndf_aircraft_data = init_aircraft_data()\n\n","sub_path":"Backend & Frontend/Python server/Random Forest/RandomForest_model.py","file_name":"RandomForest_model.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582603965","text":"import json\nimport urllib\nfrom google.appengine.api import urlfetch\nfrom beaker.cache import cache_regions, cache_region\n\n__all__ = ['request_method', 'LastFMError']\n\napi_key = '556bd6c6b42c58254c8e9a4b5a39971a'\napi_secret = 'b44ba376af1e28405150aa43bed3e2a9'\n\ncache_regions.update({\n 'lastfm-default-cache': {\n 'expire': 6 * 60 * 60, # Hope nothing interesting will happen in next 6 hours\n 'type': 'memory',\n }\n})\n\nclass LastFMError(Exception):\n def __init__(self, code, message):\n self.code = code\n self.message = message\n def __str__(self):\n return 'LastFMError(error_code={error_code}, message={message})'.format(error_code=self.code, message=repr(self.message))\nclass FutureJSON(object):\n def __init__(self, rpc):\n self._performed = False\n self._rpc = rpc\n def get_result(self):\n if not self._performed:\n self._performed = True\n res = self._rpc.get_result()\n self._result = json.loads(res.content)\n self._url = res.final_url\n del self._rpc\n if u'error' in self._result:\n raise LastFMError(int(self._result[u'error']), self._result[u'message'])\n return self._result\n \n\ndef cached_urlfetch(url):\n rpc = urlfetch.create_rpc()\n urlfetch.make_fetch_call(rpc, url)\n return FutureJSON(rpc)\n\n\n@cache_region('lastfm-default-cache', 'cached_urlfetch')\ndef _request_method(method_name, kwargs):\n \"a special wrapper for caching\"\n url_base = 'http://ws.audioscrobbler.com/2.0/?method={method_name}&format=json&'.format(method_name=method_name)\n return cached_urlfetch(url_base + urllib.urlencode({u: v for u, v in kwargs}, True))\n\ndef request_method(method_name, kwargs):\n nkwargs = [('api_key', api_key)]\n for i in kwargs:\n nkwargs.append((i.encode('utf-8'), kwargs[i].encode('utf-8')))\n nkwargs.sort()\n \n return _request_method(method_name, nkwargs)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447862391","text":"from jinja2 import nodes\nfrom jinja2.ext import Extension\nfrom application.models import Entry\nfrom application import db\n\n\nclass EntryRenderExtension(Extension):\n tags = set(['get_entry'])\n\n def __init__(self, env):\n super(EntryRenderExtension, self).__init__(env)\n\n def _render_tag(self, entry_name, caller):\n entry = Entry.query.filter_by(name=entry_name).first()\n if not entry:\n entry = Entry()\n entry.name = entry_name\n db.session.add(entry)\n db.session.commit()\n return entry.value if entry.value else '[%s]' % entry.name\n\n def parse(self, parser):\n lineno = next(parser.stream).lineno\n args = [parser.parse_expression()]\n parser.stream.skip_if('name')\n node = self.call_method('_render_tag', args)\n return nodes.CallBlock(node, [], [], []).set_lineno(lineno)\n","sub_path":"application/templatetags.py","file_name":"templatetags.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593907571","text":"# -*- coding: utf-8 -*-\n# _mod1.py\n# Module providing the mod1 function\n# Copyright 2013 Giuseppe Venturini\n# This file is part of python-deltasigma.\n#\n# python-deltasigma is a 1:1 Python replacement of Richard Schreier's\n# MATLAB delta sigma toolbox (aka \"delsigma\"), upon which it is heavily based.\n# The delta sigma toolbox is (c) 2009, Richard Schreier.\n#\n# python-deltasigma is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# LICENSE file for the licensing terms.\n\n\"\"\"Module providing the mod1() utility function\n\"\"\"\n\nimport numpy as np\n\nfrom ._calculateTF import calculateTF\n\n\ndef mod1():\n \"\"\"A description of the first-order modulator.\n\n **Returns:**\n\n ABCD, NTF, STF : a tuple of (ndarray, lti, lti)\n The elements are the ABCD matrix (ndarray), the NTF (lti object), the STF (lti object).\n\n \"\"\"\n A = np.array([[1.]])\n B = np.array([[1., -1.]])\n C = np.array([[1.]])\n D = np.array([[0., 0.]])\n ABCD = np.vstack((np.hstack((A, B)), np.hstack((C, D))))\n H, G = calculateTF(ABCD)\n return ABCD, H, G\n","sub_path":"deltasigma/_mod1.py","file_name":"_mod1.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"35241919","text":"import time, subprocess, sys\nfrom selenium import webdriver\n\n# GET GAMES\ndriver = webdriver.Chrome(\"chromedriver.exe\")\ndriver.get(\"https://steamcommunity.com/id/Ironstone1/games/?tab=all\")\nwith open(\"games.txt\", \"w\") as games:\n games.write(driver.execute_script(\"\"\"return (function() {\n var elms = document.querySelectorAll('div[id^=\"game_\"]');\n var games = \"\";\n for (var i=0;i= len(games): break;\n subprocess.Popen(\"\" + exe + \" \" + str(games[ii]) + \" \" + str(error), startupinfo=info)\n\n f1 = open(\"file.log\", \"r\").read()\n with open(\"file.log\", \"w\") as f:\n f.write(f1 + \"\\nRound \"+str(i+1)+\"/\"+str(count)+\"... \" + str((timeout*(count-i))/60) + \" mins left...\")\n\n n = [0 if open(\"n.txt\", \"r\").read() == \"\" else int(open(\"n.txt\", \"r\").read())][0]\n with open(\"n.txt\", \"w\") as f:\n if n >= count:\n f.write(\"0\")\n else:\n f.write(str(n+1))\n\n time.sleep(timeout)\n subprocess.Popen(\"taskkill /f /im SingleBoostr.Game.exe\", startupinfo=info)\n time.sleep(1)\n \n print(\"Finished!\")\n with open(\"n.txt\", \"w\") as f: f.write(\"0\")\n time.sleep(timeout)\n return None\n \n except:\n print(\"[-] Error occured!\")\n\nwhile True:\n main()\n","sub_path":"python/get_games/get_games.py","file_name":"get_games.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526454054","text":"from api_rules.permissions import PermissionRow\nfrom .serializers import UserRegisterSerializer, UserLoginSerializer, UserSerializer\nfrom django.contrib.auth import get_user_model, login, logout, authenticate\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework import generics\nfrom redis_manager.permission_cache_manager import PermissionCacheManager\nfrom django.http import HttpResponse\nfrom rest_framework import permissions\nfrom rest_framework.views import APIView\nfrom django.conf import settings\nfrom django.middleware.csrf import get_token\nfrom django.core.mail import send_mail\n\nUser = get_user_model()\n\n\nclass UserAPIView(generics.RetrieveAPIView):\n lookup_field = 'id'\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass UserRegisterAPIView(generics.CreateAPIView):\n \"\"\"\n create:\n Create a new user instance\n \"\"\"\n queryset = get_user_model().objects.all()\n serializer_class = UserRegisterSerializer\n\n authentication_classes = []\n\n def create(self, request, *args, **kwargs):\n if request.user.is_active:\n from django.contrib.auth.models import AnonymousUser\n request.user = AnonymousUser()\n request.session.flush()\n logout(request)\n print(\"register works...\")\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n\n headers = self.get_success_headers(serializer.data)\n data = request.data\n email = data.get('email')\n password = data.get('password')\n # login user\n user = authenticate(request, email=email, password=password)\n\n login(request, user)\n print(request.session.session_key)\n # write user's data in cookie\n send(request, email)\n response = Response(set_users_cookie(user, request), status=status.HTTP_201_CREATED, headers=headers)\n response.set_cookie(\"username\", user.username, domain=\"protected-mountain-24825.herokuapp.com\")\n return response\n\n\nclass AuthAPIView(generics.CreateAPIView):\n \"\"\"\n View to login\n \"\"\"\n queryset = get_user_model().objects.all()\n serializer_class = UserLoginSerializer\n permission_classes = [permissions.AllowAny]\n\n authentication_classes = []\n\n def post(self, request, *args, **kwargs):\n print(\"login works....\")\n if request.user.is_active:\n return Response({'detail': 'user is already authenticated'}, status=400)\n\n data = request.data\n\n email = data.get('email')\n password = data.get('password')\n\n user = authenticate(request, email=email, password=password)\n if user is not None:\n\n PermissionCacheManager.set_user_perms_in_cache(user.id)\n \n login(request, user)\n \n response = Response(set_users_cookie(user, request), status=200)\n print(\"view...\")\n #response = set_users_cookie(user, response, request)\n return response\n\n return Response({\"error\": \"invalid credentials\"}, status=400)\n\n\ndef set_users_cookie(user, request):\n\n user_data = {\n \"user_id\": user.id,\n \"email\": user.email,\n \"username\": user.username,\n \"first_name\": user.first_name,\n \"last_name\": user.last_name,\n \"sessionid\": request.session.session_key,\n \"csrftoken\": get_token(request)\n }\n\n return user_data\n\n\nclass LogoutAPIView(APIView):\n permission_classes = [permissions.AllowAny]\n\n authentication_classes = []\n\n def post(self, request):\n\n logout(request)\n response = Response({\"detail\": \"Successfully logged out. See cookie\"}, status=200)\n response.delete_cookie(\"email\")\n response.delete_cookie(\"user_id\")\n response.delete_cookie(\"username\")\n response.delete_cookie(\"first_name\")\n response.delete_cookie(\"last_name\")\n\n return response\n\n\ndef send(request, email):\n\n subject = \"Hi for registering\"\n message = \"This is message.\"\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [email]\n\n send_mail(\n subject=subject,\n message=message,\n from_email=email_from,\n recipient_list=recipient_list\n )\n","sub_path":"user_auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"404511837","text":"import pickle\n\nclass Server_battleships:\n def __init__(self):\n self.game_data = [[None, None],[None, None]]#(X,Y,Did enemy shot hit)\n\n def receive(self,data,conn,player_nmbr):\n if data == \"get\":\n conn.sendall(pickle.dumps((2, self.game_data[(player_nmbr + 1) % 2][1])))#Send back data from another player\n elif data[0] == 2:#Check if player sends current game data\n if data[1] == \"matrix\":\n self.game_data[player_nmbr][0]=data[2]\n if player_nmbr == 1:\n self.game_data[1][1]=(-1,-1)\n elif data[1] == \"shot\":\n self.game_data[(player_nmbr + 1) % 2][1]=None #Overwrite old shot data\n self.game_data[player_nmbr][1]=data[2] #Save data for second player\n conn.sendall(pickle.dumps((2, self.game_data[(player_nmbr + 1) % 2][0][data[2][0]][data[2][1]])))#Send result of shot\n else:\n conn.sendall(pickle.dumps(None))#We must return something, or client could be stuck on send_recv function\n","sub_path":"Server_Modules/server_battleships.py","file_name":"server_battleships.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391468561","text":"import wx\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nfrom wx import glcanvas\r\n\r\nimport ClothObject\r\n\r\n\r\nclass MyFrame(wx.Frame):\r\n def __init__(self):\r\n self.size = (1280, 720)\r\n wx.Frame.__init__(self, None, title='wx frame', size=self.size,\r\n style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\r\n self.panel = MyPanel(self)\r\n\r\n\r\nclass MyPanel(wx.Panel):\r\n def __init__(self, parent):\r\n wx.Panel.__init__(self, parent)\r\n # OpenGL canvas within panel\r\n self.canvas = OpenGLCanvas(self)\r\n \r\n self.bAnimation = False\r\n self.ResetButton = wx.Button(self, wx.ID_ANY, 'Mass-Spring Reset', pos=(1030, 20),\r\n size=(200, 40), style=0)\r\n self.AnimationButton = wx.Button(self, wx.ID_ANY, 'Animate/Stop', pos=(1030, 60),\r\n size=(200, 40), style=0)\r\n\r\n # Slider for stiffness\r\n self.stiffnessLabel = wx.StaticText(self, -1, pos=(1030, 150), style=wx.ALIGN_CENTER)\r\n self.stiffnessLabel.SetLabel('Stiffness: ' + str(self.canvas.clothObject.stiffness))\r\n self.stiffnessSlider = wx.Slider(self, -1, pos=(1030, 180), size=(200, 50),\r\n style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS,\r\n value=1, minValue=1, maxValue=30)\r\n # Slider for step\r\n self.stepLabel = wx.StaticText(self, -1, pos=(1030, 250), style=wx.ALIGN_CENTER)\r\n self.stepLabel.SetLabel('Time Interval: ' + str(self.canvas.stepSize))\r\n self.stepSlider = wx.Slider(self, -1, pos=(1030, 280), size=(200, 50),\r\n style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS,\r\n value=10, minValue=1, maxValue=300)\r\n # Slider for damping\r\n self.dampLabel = wx.StaticText(self, -1, pos=(1030, 350), style=wx.ALIGN_CENTER)\r\n self.dampLabel.SetLabel('Damping Coeff.: ' + str(self.canvas.clothObject.damp))\r\n self.dampSlider = wx.Slider(self, -1, pos=(1030, 380), size=(200, 50),\r\n style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS,\r\n value=0, minValue=0, maxValue=30)\r\n\r\n # Callback functions\r\n self.Bind(wx.EVT_BUTTON, self.OnAnimationButton, self.AnimationButton)\r\n self.Bind(wx.EVT_BUTTON, self.OnResetButton, self.ResetButton)\r\n self.Bind(wx.EVT_SLIDER, self.OnStiffnessSlider, self.stiffnessSlider)\r\n self.Bind(wx.EVT_SLIDER, self.OnStepSlider, self.stepSlider)\r\n self.Bind(wx.EVT_SLIDER, self.OnDampSlider, self.dampSlider)\r\n\r\n def OnAnimationButton(self, event):\r\n \"\"\"Toggles animation flag\"\"\"\r\n if self.bAnimation is False:\r\n self.bAnimation = True\r\n else:\r\n self.bAnimation = False\r\n self.canvas.bAnimation = self.bAnimation\r\n\r\n def OnResetButton(self, event):\r\n self.canvas.clothObject.resetMassSpring()\r\n\r\n def OnStiffnessSlider(self, event):\r\n val = event.GetEventObject().GetValue()\r\n stiffness = 2 ** val\r\n self.stiffnessLabel.SetLabel('Stiffness: ' + str(stiffness))\r\n self.canvas.clothObject.stiffness = stiffness\r\n\r\n def OnStepSlider(self, event):\r\n val = event.GetEventObject().GetValue()\r\n stepSize = 0.0001 * val\r\n self.stepLabel.SetLabel('Time Interval: ' + str(stepSize))\r\n self.canvas.stepSize = stepSize\r\n\r\n def OnDampSlider(self, event):\r\n val = event.GetEventObject().GetValue()\r\n damp = val / 10.0\r\n self.dampLabel.SetLabel('Damping Coeff.: ' + str(damp))\r\n self.canvas.clothObject.damp = damp\r\n\r\n\r\nclass OpenGLCanvas(glcanvas.GLCanvas):\r\n def __init__(self, parent):\r\n self.initialized = False\r\n self.size = (1024, 720) # Canvas size\r\n self.aspect_ratio = 1\r\n self.stepSize = 0.0001\r\n glcanvas.GLCanvas.__init__(self, parent, -1, size=self.size)\r\n self.context = glcanvas.GLContext(self)\r\n self.SetCurrent(self.context)\r\n self.Bind(wx.EVT_PAINT, self.OnDraw) # Draw callback function\r\n self.Bind(wx.EVT_IDLE, self.OnIdle) # Idle callback function\r\n self.InitGL() # Initialization for OpenGL\r\n self.clothObject = ClothObject.ClothObject(1, 1, 10, 10) # Create cloth object instance\r\n self.bAnimation = False # Animation starts as False\r\n\r\n def InitGL(self):\r\n \"\"\"Sets up the projection matrix and viewport\"\"\"\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n self.aspect_ratio = float(self.size[0]) / self.size[1]\r\n gluPerspective(60, self.aspect_ratio, 0.1, 100.0)\r\n glViewport(0, 0, self.size[0], self.size[1])\r\n\r\n def OnDraw(self, event):\r\n # Clear color and depth buffers\r\n if not self.initialized:\r\n self.InitGL()\r\n self.initialized = True\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Clears canvas before drawing again\r\n\r\n # Position viewer\r\n glMatrixMode(GL_MODELVIEW) # Chooses matrix to manipulate\r\n glLoadIdentity()\r\n gluLookAt(2,2,2, 0,0,0, 0,1,0)\r\n \r\n self.clothObject.drawSpring()\r\n\r\n self.SwapBuffers()\r\n\r\n def OnIdle(self, event):\r\n \"\"\"Update state of mass-spring with dt\"\"\"\r\n if self.bAnimation:\r\n self.clothObject.update(self.stepSize)\r\n self.Refresh()\r\n\r\n\r\ndef main():\r\n app = wx.App()\r\n frame = MyFrame()\r\n frame.Show()\r\n app.MainLoop()\r\n\r\n\r\nif __name__=='__main__':\r\n main()\r\n","sub_path":"approximate_implicit/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"390804241","text":"\nfrom pyramid.renderers import render\n\n\ndef create_jenkins_job_xml(displayname,\n python_version,\n email_notification_recipients,\n buildout='jenkins.cfg',\n node='Slave',\n git_branch=None,\n git_url='git://github.com/plone/buildout.coredev.git',\n url_to_callback=None,\n pull=None):\n\n command = \"%s bootstrap.py\\n\" % python_version\n command += \"bin/buildout -c %s\" % buildout\n\n\n command += \"bin/jenkins-alltests -1\"\n\n\n result = render('plonejenkins.middleware:templates/plone.xml',\n {'url_to_callback': url_to_callback,\n 'displayName': displayname,\n 'email_notification_recipients': email_notification_recipients,\n 'git_url': git_url,\n 'git_branch': git_branch})\n\n return result","sub_path":"src/plonejenkins.middleware/src/plonejenkins/middleware/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631227509","text":"import os, psutil\nfrom hive.utils.stats import PrometheusClient, BroadcastObject\n\ndef log_memory_usage(memtypes=[\"rss\", \"vms\", \"shared\"], broadcast = True) -> str:\n \"\"\"\n Logs current memory types, additionally broadcast if broadcast set to True (default)\n \n Available memtypes: rss, vms, shared, text, lib, data, dirty\n \"\"\"\n\n def format_bytes(val : int):\n assert isinstance(val, int) or isinstance(val, float), 'invalid data type, required int or float'\n return f'{ val / 1024.0 / 1024.0 :.2f} MB'\n\n human_readable = { \"rss\": \"physical_memory\", \"vms\": \"virtual_memory\", \"shared\": \"shared_memory\", \"text\": \"used_by_executable\", \"lib\": \"used_by_shared_libraries\" }\n stats = psutil.Process(os.getpid()).memory_info() # docs: https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info\n if broadcast:\n PrometheusClient.broadcast([ BroadcastObject(f'hivemind_memory_{key}', getattr(stats, key), 'b') for key in stats._fields ]) # broadcast to prometheus\n return f\"memory usage report: { ', '.join( [ f'{ human_readable.get(k, k) } = { format_bytes(getattr(stats, k)) }' for k in memtypes ] ) }\"\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\ndef show_app_version(log, database_head_block, patch_level_data):\n from hive.version import VERSION, GIT_REVISION, GIT_DATE\n log.info(\"hivemind_version : %s\", VERSION)\n log.info(\"hivemind_git_rev : %s\", GIT_REVISION)\n log.info(\"hivemind_git_date : %s\", GIT_DATE)\n\n log.info(\"database_schema_version : %s\", patch_level_data['level'])\n log.info(\"database_patch_date : %s\", patch_level_data['patch_date'])\n log.info(\"database_patched_to_revision : %s\", patch_level_data['patched_to_revision'])\n \n log.info(\"database_head_block : %s\", database_head_block)\n","sub_path":"hive/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"74132977","text":"from distutils.dir_util import copy_tree\nfrom bids_neuropoly import bids\nimport pandas as pd\nimport json\nimport glob\nimport os\nimport argparse\nimport sys\n\n# This scripts merges 2 BIDS datasets.\n# the new participants.tsv and participants.json are merged versions of the initial files.\n# 2 Inputs should be added:\n# 1. --ifolders: list of the 2 Folders to be merged\n# 2. --ofolder: output folder\n\n# Example call:\n# python3 merge_BIDS_datasets.py --ifolders ~/first_Dataset/ ~/second_Dataset/ --ofolder ~/mergedDataset/\n\n# Konstantinos Nasiotis 2020\n\n# -----------------------------------------------------------------------------------------------------------------------#\n\n\ndef main_run(argv):\n\n CLI = argparse.ArgumentParser()\n CLI.add_argument(\n \"--ifolders\",\n nargs=2, # 2 folders expected to be merged\n type=str,\n default=[], # default if nothing is provided - This should give an error later on\n )\n CLI.add_argument(\n \"--ofolder\", # name on the CLI - drop the `--` for positional/required parameters\n nargs=1, # 1 folder expected\n type=str,\n default=[], # default if nothing is provided\n )\n\n # parse the command line\n args = CLI.parse_args()\n # access CLI options\n print(\"Input folders: %r\" % args.ifolders)\n print(\"Output folder: %r\" % args.ofolder)\n\n\n\n datasetFolder1 = args.ifolders[0]\n datasetFolder2 = args.ifolders[1]\n output_folder = args.ofolder[0]\n\n\n print('Make sure there were no inconsistencies in column labels between the two initial participants.tsv files - e.g. subject_id - subject_ids etc.')\n\n # Create output folder if it doesnt exist\n if not os.path.exists(output_folder):\n os.mkdir(output_folder)\n\n #------------------------------------------------------------------------------------------------------------------------#\n # GENERALIZE TO MORE THAN TWO DATASETS\n\n # Load the .tsv files\n df1 = bids.BIDS(datasetFolder1).participants.content\n df2 = bids.BIDS(datasetFolder2).participants.content\n\n # This gets rid of potential problematic merging of different Types within the same column\n df1 = df1.astype(str)\n df2 = df2.astype(str)\n\n\n # Merge the .tsv files and save them in a new file (This keeps also non-overlapping fields)\n df_merged = pd.merge(left=df1, right=df2, how='outer')\n\n\n # TEST THE FOLLOWING\n\n # if isinstance(path_folders, str):\n # raise TypeError(\"'bids_path' in the config file should be a list\")\n # elif len(path_folders) == 1:\n # # read participants.tsv as pandas dataframe\n # df = bids.BIDS(path_folders).participants.content\n # elif path_folders == []:\n # raise Exception(\"No dataset folder selected\")\n # else:\n # # Merge multiple .tsv files into the same dataframe\n # df_merged = bids.BIDS(path_folders[0]).participants.content\n # # Convert to string to get rid of potential TypeError during merging within the same column\n # df_merged = df_merged.astype(str)\n #\n # for iFolder in range(1, len(path_folders)):\n # df_next = bids.BIDS(path_folders[iFolder]).participants.content\n # df_next = df_next.astype(str)\n # # Merge the .tsv files (This keeps also non-overlapping fields)\n # df_merged = pd.merge(left=df_merged, right=df_next, how='outer')\n\n\n # ------------------------------------------------------------------------------------------------------------------------#\n\n df_merged.to_csv(os.path.join(output_folder, 'participants.tsv'), sep='\\t', index=False)\n\n\n # Do the same for the .json files\n jsonFile1 = os.path.join(datasetFolder1, 'participants.json')\n jsonFile2 = os.path.join(datasetFolder2, 'participants.json')\n\n with open(jsonFile1) as json_file:\n json1 = json.load(json_file)\n with open(jsonFile2) as json_file:\n json2 = json.load(json_file)\n\n # Merge .json files\n json_merged = {**json1, **json2}\n\n with open(os.path.join(output_folder, 'participants.json'), 'w') as outfile:\n json.dump(json_merged, outfile, indent=4)\n\n\n # Create a dataset_decription.json - This is needed on the BIDS loader\n with open(os.path.join(output_folder, 'dataset_description.json'), 'w') as outfile:\n json.dump({\"BIDSVersion\": \"1.0.1\", \"Name\": \"SCT_testing\"}, outfile, indent=4) # Confirm the version is correct\n\n\n copy_files_as_well = 0\n\n if copy_files_as_well:\n all_datasets = [datasetFolder1, datasetFolder2]\n for datasetFolder in all_datasets:\n subjectsFolders = glob.glob(os.path.join(datasetFolder, 'sub-*'))\n derivativesFolder = glob.glob(os.path.join(datasetFolder, 'derivatives'))\n\n if derivativesFolder != []:\n subjectsFolders.append(derivativesFolder[0])\n foldersToCopy = subjectsFolders\n else:\n foldersToCopy = subjectsFolders\n print(\"No derivatives are present in this folder\")\n\n for subFolder in foldersToCopy:\n copy_tree(subFolder, os.path.join(output_folder, os.path.basename(os.path.normpath(subFolder))))\n\n\nif __name__ == \"__main__\":\n main_run(sys.argv[1])\n","sub_path":"merge_BIDS_datasets.py","file_name":"merge_BIDS_datasets.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"613992195","text":"import torch\nfrom torch import nn\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence, PackedSequence\n\nfrom src.models.abstract_preprocessor import AbstractPreproc\n\nfrom src.utils import vocab\nfrom src.nlp import abstract_embeddings\nfrom src.utils import registry\n\n\n@registry.register(\"word_attention\", \"WordAttention\")\nclass WordAttention(torch.nn.Module):\n def __init__(\n self,\n device: str,\n preprocessor: AbstractPreproc,\n word_emb_size: int,\n dropout: float,\n recurrent_size: int,\n attention_dim: int,\n ):\n super().__init__()\n self._device = device\n self.preprocessor = preprocessor\n self.embedder: abstract_embeddings.Embedder = self.preprocessor.get_embedder()\n self.vocab: vocab.Vocab = self.preprocessor.get_vocab()\n self.word_emb_size = word_emb_size\n self.recurrent_size = recurrent_size\n self.dropout = dropout\n self.attention_dim = attention_dim\n\n assert self.recurrent_size % 2 == 0\n\n assert self.word_emb_size == self.embedder.dim\n\n # embedding layer\n self.embedding = torch.nn.Embedding(num_embeddings=len(self.vocab), embedding_dim=self.word_emb_size)\n\n # init embedding\n init_embed_list = []\n for index, word in enumerate(self.vocab):\n if self.embedder.contains(word):\n init_embed_list.append(self.embedder.lookup(word))\n else:\n init_embed_list.append(self.embedding.weight[index])\n init_embed_weight = torch.stack(init_embed_list, 0)\n self.embedding.weight = nn.Parameter(init_embed_weight)\n\n self.encoder = nn.LSTM(\n input_size=self.word_emb_size,\n hidden_size=self.recurrent_size // 2,\n dropout=self.dropout,\n bidirectional=True,\n batch_first=True,\n )\n self.dropout = nn.Dropout(dropout)\n\n # Maps LSTM output to `attention_dim` sized tensor\n self.word_weight = nn.Linear(self.recurrent_size, self.attention_dim)\n\n # Word context vector (u_w) to take dot-product with\n self.context_weight = nn.Linear(self.attention_dim, 1)\n\n def recurrent_size(self):\n return self.recurrent_size\n\n def forward(self, docs, doc_lengths, sent_lengths):\n \"\"\"\n :param docs: encoded document-level data; LongTensor (num_docs, padded_doc_length, padded_sent_length)\n :param doc_lengths: unpadded document lengths; LongTensor (num_docs)\n :param sent_lengths: unpadded sentence lengths; LongTensor (num_docs, max_sent_len)\n :return: sentences embeddings, docs permutation indices, docs batch sizes, word attention weights\n \"\"\"\n\n # Sort documents by decreasing order in length\n doc_lengths, doc_perm_idx = doc_lengths.sort(dim=0, descending=True)\n docs = docs[doc_perm_idx]\n sent_lengths = sent_lengths[doc_perm_idx]\n\n # Make a long batch of sentences by removing pad-sentences\n # i.e. `docs` was of size (num_docs, padded_doc_length, padded_sent_length)\n # -> `packed_sents.data` is now of size (num_sents, padded_sent_length)\n packed_sents = pack_padded_sequence(docs, lengths=doc_lengths.tolist(), batch_first=True)\n\n # effective batch size at each timestep\n docs_valid_bsz = packed_sents.batch_sizes\n\n # Make a long batch of sentence lengths by removing pad-sentences\n # i.e. `sent_lengths` was of size (num_docs, padded_doc_length)\n # -> `packed_sent_lengths.data` is now of size (num_sents)\n packed_sent_lengths = pack_padded_sequence(sent_lengths, lengths=doc_lengths.tolist(), batch_first=True)\n\n sents, sent_lengths = packed_sents.data, packed_sent_lengths.data\n\n # Sort sents by decreasing order in sentence lengths\n sent_lengths, sent_perm_idx = sent_lengths.sort(dim=0, descending=True)\n sents = sents[sent_perm_idx]\n\n inp = self.dropout(self.embedding(sents))\n\n packed_words = pack_padded_sequence(inp, lengths=sent_lengths.tolist(), batch_first=True)\n\n # effective batch size at each timestep\n sentences_valid_bsz = packed_words.batch_sizes\n\n # Apply word-level LSTM over word embeddings\n packed_words, _ = self.encoder(packed_words)\n\n u_i = torch.tanh(self.word_weight(packed_words.data))\n u_w = self.context_weight(u_i).squeeze(1)\n val = u_w.max()\n att = torch.exp(u_w - val)\n\n # Restore as sentences by repadding\n att, _ = pad_packed_sequence(PackedSequence(att, sentences_valid_bsz), batch_first=True)\n\n att_weights = att / torch.sum(att, dim=1, keepdim=True)\n\n # Restore as sentences by repadding\n sents, _ = pad_packed_sequence(packed_words, batch_first=True)\n\n sents = sents * att_weights.unsqueeze(2)\n sents = sents.sum(dim=1)\n\n # Restore the original order of sentences (undo the first sorting)\n _, sent_unperm_idx = sent_perm_idx.sort(dim=0, descending=False)\n sents = sents[sent_unperm_idx]\n\n att_weights = att_weights[sent_unperm_idx]\n\n return sents, doc_perm_idx, docs_valid_bsz, att_weights\n","sub_path":"src/models/wordattention.py","file_name":"wordattention.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260137892","text":"from collections import Counter\nfrom difflib import SequenceMatcher\nfrom itertools import product\nimport re\n\nIDENTICAL = 1.0\nTOP_NUMBER = 10\nRSS_FEED = 'rss.xml'\nSIMILAR = 0.87\nTAG_HTML = re.compile(r'([^<]+)')\n\n\n\ndef get_tags():\n \"\"\"Find all tags (TAG_HTML) in RSS_FEED.\n Replace dash with whitespace.\n Hint: use TAG_HTML.findall\"\"\"\n try:\n # Open the file and read the entire content\n with open(RSS_FEED,'r') as f:\n s=f.read()\n # Find all occurences of the regular expression TAG_HTML\n tags=TAG_HTML.findall(s)\n # Find all words in the list with special chars and replace them with space\n # Change all words in the list to lowercase\n for index,tag in enumerate(tags):\n tags[index]=re.sub(r'[^a-zA-Z0-9.]', ' ',tag).lower() \n return tags\n except:\n print(\"Cannot open file {}.\".format(RSS_FEED))\n\n\ndef get_top_tags(tags):\n \"\"\"Get the TOP_NUMBER of most common tags\n Hint: use most_common method of Counter (already imported)\"\"\"\n # Put the tags list in Counter object\n c=Counter(tags)\n # Get the 10 most common tags\n com_tags=c.most_common(TOP_NUMBER)\n return com_tags\n\n\ndef get_similarities(tags):\n \"\"\"Find set of tags pairs with similarity ratio of > SIMILAR\n Hint 1: compare each tag, use for in for, or product from itertools (already imported)\n Hint 2: use SequenceMatcher (imported) to calculate the similarity ratio\n Bonus: for performance gain compare the first char of each tag in pair and continue if not the same\"\"\"\n # Create generator of tuples of two elements from all combinations\n # between the words in the tags list\n prod=product(tags,repeat=2)\n similar=[]\n for t in prod:\n # Sort the tuple, and pass only the tuples\n # which have words with the same first char\n # and whcih have different words, to be checked for similarity \n t=sorted(t)\n if t[0][0] == t[1][0] and t[0] != t[1]:\n s=SequenceMatcher(None,t[0],t[1])\n if s.quick_ratio() > SIMILAR:\n similar.append(t)\n return similar\n\n\n\nif __name__ == \"__main__\":\n tags = get_tags()\n top_tags = get_top_tags(tags)\n print('* Top {} tags:'.format(TOP_NUMBER))\n for tag, count in top_tags:\n print('{:<20} {}'.format(tag, count))\n similar_tags = dict(get_similarities(tags))\n print()\n print('* Similar tags:')\n for singular, plural in similar_tags.items():\n print('{:<20} {}'.format(singular, plural))\n","sub_path":"03/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160919162","text":"import pandas\nimport json\nfrom RestManager import RestManager\nfrom dbManager import dbManager\n\n\nclass CorrelationManager:\n\n db = dbManager()\n api = RestManager()\n assets = []\n data = {}\n dataframe = None\n db_assets = db.getAssets()\n for ass in db_assets:\n assets.append(ass.rest_id)\n\n '''\n def __init__(self):\n db_assets = self.db.getAssets()\n for ass in db_assets:\n self.assets.append(ass.rest_id)\n '''\n\n def body(self, id):\n data = {\"ratio\" : [19], \"asset\" : self.assets, \"benchmark\" : id, \"start_date\": \"2012-01-02\", \"end_date\": \"2018-08-31\"}\n return data\n \n def get_corr(self, id):\n response = self.api.post(\"ratio/invoke\", self.body(id))\n res = json.loads(response)\n result = []\n for ass in self.assets:\n result.append(res[str(ass)][str(19)]['value'])\n return result\n\n def build_data(self):\n for ass in self.assets:\n self.data[ass] = self.get_corr(ass)\n \n def build_csv(self):\n self.build_data()\n doc = {'id': self.assets}\n for ass in self.assets:\n doc[ass] = self.data[ass]\n df = pandas.DataFrame(doc)\n print(df)\n df.to_csv('corr.csv', sep=',', index=False)\n\n def build_df(self):\n df = pandas.read_csv('corr.csv', sep=',')\n df.set_index('id', inplace=True)\n self.dataframe = df\n\n def value(self, i, j):\n return self.dataframe[str(i)][int(j)]\n\nif __name__ == \"__main__\":\n\n cm = CorrelationManager()\n #cm.build_csv()\n #print(cm.dataframe)\n #cm.build_df()\n #print(cm.dataframe)\n #print(cm.value(1001, 717))","sub_path":"src/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"184132607","text":"def binomialCoeff(n, k): \n res = 1; \n \n # Since C(n, k) = C(n, n-k) \n if (k > n - k): \n k = n - k; \n \n # Calculate value of [n*(n-1)*--- \n # *(n-k+1)] / [k*(k-1)*---*1] \n for i in range(k): \n res *= (n - i); \n res /= (i + 1); \n \n return int(res); \n \n# A Binomial coefficient based \n# function to find nth catalan \n# number in O(n) time \ndef catalan(n): \n \n # Calculate value of 2nCn \n c = binomialCoeff(2 * n, n); \n \n # return 2nCn/(n+1) \n return int(c / (n + 1)); \n \n# Function to find possible \n# ways to put balanced parenthesis \n# in an expression of length n \ndef findWays(n): \n \n # If n is odd, not possible to \n # create any valid parentheses \n if(n & 1): \n return 0; \n \n # Otherwise return n/2'th \n # Catalan Numer \n return catalan(int(n / 2));\nn=int(input())\nprint(findWays(n*2))\n","sub_path":"no of combinations of valid balanced paranthresis.py","file_name":"no of combinations of valid balanced paranthresis.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"185896721","text":"from random import randint\r\nx = randint(1,8192)\r\n\r\nprint(x)\r\nn = x\r\ns = \"\"\r\n\r\nfor i in range(1,13):\r\n a = n%2\r\n n = n//2\r\n if a:\r\n if s == \"\":\r\n s = \"2**\"+str(i-1)\r\n else: \r\n s = \"2**\"+str(i-1)+\"+\"+s\r\n if n == 0:\r\n break\r\n \r\n\r\nprint(s)\r\n","sub_path":"Practical 5/powersof2.py","file_name":"powersof2.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88884151","text":"from motorengine.query.search import TextSearch\nfrom tests import AsyncTestCase\n\n\nclass TestTextSearch(AsyncTestCase):\n\n def test_text_search(self):\n\n query = TextSearch()\n self.assertDictEqual(query.to_query(\"value\", \"hello\"), {\n \"$text\": {\n \"$search\": \"hello\"\n }\n })\n","sub_path":"tests/query/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529259872","text":"from __future__ import print_function\n\nimport logging\nimport pickle\n\nfrom oauth2client.client import HttpAccessTokenRefreshError\n\nimport flag_local\nfrom apiclient import discovery\n\n# Local environment: True; GAE: False\nLOCAL = flag_local.LOCAL\nEVER_CACHED = False\n\n# Flask app OAuth2\nOAUTH2 = None\n# OAuth2 Service\nOAUTH2_SERVICE = None\n# OAuth information\nCLIENT_SECRET_FILE = 'client_secret.json'\n# API scope\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'\n# Application name\nAPPLICATION_NAME = 'Google Sheets API Python Access'\n# API URL\nAPI_URL = 'https://sheets.googleapis.com/$discovery/rest?version=v4'\n# Menu object key\nMENU_KEY = 'Menu'\n# Cache file name\nMENU_CACHE = 'menu.cache'\n\n# Python environment on GAE using Flask\n# (See requirements.txt for third-party module requirements)\nif not LOCAL:\n try:\n from oauth2client.contrib.flask_util import UserOAuth2\n from google.appengine.api import memcache\n except ImportError:\n logging.exception('Not in GAE environment.')\n print('Not in GAE environment.')\n UserOAuth2 = None\n memcache = None\n\n# Local python environment\nif LOCAL:\n import httplib2\n import os\n from oauth2client import client\n from oauth2client import tools\n from oauth2client.file import Storage\n\n try:\n import argparse\n\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n argparse = None\n flags = None\n\n\ndef oauth_setup(app):\n if not LOCAL:\n global OAUTH2\n OAUTH2 = UserOAuth2(app)\n return OAUTH2\n else:\n print('No need for Flask app OAuth setup in local environment.')\n return None\n\n\ndef get_info_from_cache():\n logging.info('Read from cache...')\n if LOCAL:\n print('Read from cache...')\n # Read cached info\n try:\n with open(MENU_CACHE, 'r') as cache:\n values = pickle.loads(cache.read())\n except:\n values = None\n logging.warning('File reading error. Unable to get info.')\n else:\n # Read cached info from memcache\n try:\n values = pickle.loads(memcache.get(MENU_KEY))\n except TypeError:\n values = None\n logging.warning('Type error. Unable to get info.')\n except:\n values = None\n logging.exception('New error. Unable to get info.')\n\n return values\n\n\ndef set_info_in_cache(values):\n logging.info('Set values in cache...')\n if LOCAL:\n print('Set values in cache...')\n # Cache page\n with open(MENU_CACHE, 'w') as cache:\n cache.write(pickle.dumps(values))\n else:\n # Cache page into memcache\n memcache.set(MENU_KEY, pickle.dumps(values))\n\n # Set EVER_CACHED\n global EVER_CACHED\n EVER_CACHED = True\n logging.info('Cache set.')\n logging.info('EVER_CACHED = ' + str(EVER_CACHED))\n if LOCAL:\n print('Cache set.')\n print('EVER_CACHED = ' + str(EVER_CACHED))\n\n\ndef get_auth_http():\n if LOCAL:\n project_root = os.path.dirname(os.path.abspath(__file__))\n credential_path = os.path.join(project_root, 'local_secret.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n logging.info('Requesting credentials...')\n print('Requesting credentials...')\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run_flow(flow, store)\n logging.info('Storing credentials to ' + credential_path)\n print('Storing credentials to ' + credential_path)\n else:\n logging.info('Using locally stored credentials.')\n print('Using locally stored credentials.')\n\n http = credentials.authorize(httplib2.Http())\n\n return http\n\n elif OAUTH2:\n try:\n http = OAUTH2.http()\n except ValueError:\n logging.exception('No credentials available.')\n http = None\n\n else:\n logging.exception('Flask app OAuth not setup.')\n http = None\n\n return http\n\n\ndef get_sheets_info(sheet_id, sheet_range, cached=True):\n global EVER_CACHED\n if EVER_CACHED and cached:\n # Getting from cache\n logging.warning('Acquiring cached info...')\n if LOCAL:\n print('Acquiring cached info...')\n\n # Read cache\n values = get_info_from_cache()\n\n if not values:\n logging.exception('Cache read fails.')\n if LOCAL:\n print('Cache read fails.')\n return None\n else:\n logging.info('Cached info acquired.')\n if LOCAL:\n print('Cached info acquired.')\n return values\n\n elif cached:\n # Invalid input: read cache before first caching\n logging.warning('Invalid input: read cache before first caching.')\n if LOCAL:\n print('Invalid input: read cache before first caching.')\n return None\n\n # Requesting API (cached = False)\n logging.warning('Acquiring info from Google Sheets API...')\n if LOCAL:\n print('Acquiring info from Google Sheets API...')\n\n # Setting up service if not set up\n global OAUTH2_SERVICE\n if not OAUTH2_SERVICE:\n if LOCAL:\n # OAuth2 in local Python python environment\n http = get_auth_http()\n if http:\n OAUTH2_SERVICE = discovery.build(\n 'sheets', 'v4', http=http, discoveryServiceUrl=API_URL)\n else:\n logging.exception('API request fails.')\n print('API request fails.')\n return None\n\n elif OAUTH2:\n # OAuth2 through GAE\n http = get_auth_http()\n if http:\n OAUTH2_SERVICE = discovery.build('sheets', 'v4', http=http)\n else:\n logging.exception('API request fails.')\n return None\n\n else:\n # OAuth2 not setup on GAE\n logging.exception('Flask app OAuth not setup. API request fails.')\n return None\n\n # Executing request and getting results\n try:\n # Call the service using the authorized Http object.\n result = OAUTH2_SERVICE.spreadsheets().values().get(\n spreadsheetId=sheet_id, range=sheet_range).execute()\n values = result.get('values', [])\n except HttpAccessTokenRefreshError:\n logging.exception('Invalid grant. Bad request. API request fails.')\n if LOCAL:\n print('Invalid grant. Bad request. API request fails.')\n return None\n\n if not values:\n # Info got from request invalid, fall back to cache\n logging.warning('Google spreadsheet read results invalid.')\n if LOCAL:\n print('Google spreadsheet read results invalid.')\n\n values = get_info_from_cache()\n\n if not values:\n logging.exception('Cache read fails.')\n if LOCAL:\n print('Cache read fails.')\n return None\n\n else:\n # Values acquired\n logging.info('Values acquired from Google Sheets API.')\n if LOCAL:\n print('Values acquired from Google Sheets API.')\n\n # Cache page\n set_info_in_cache(values)\n\n return values\n","sub_path":"gae_api_utils.py","file_name":"gae_api_utils.py","file_ext":"py","file_size_in_byte":7590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545643860","text":"# Download the Python helper library from twilio.com/docs/python/install\nimport os\nfrom twilio.rest import Client\n\n# Your Account Sid and Auth Token from twilio.com/user/account\n# To set up environmental variables, see http://twil.io/secure\naccount_sid = os.environ['TWILIO_ACCOUNT_SID']\nauth_token = os.environ['TWILIO_AUTH_TOKEN']\nclient = Client(account_sid, auth_token)\n\nfromNumber = \"+18180021216\"\ntoNumber = \"+15624421212\"\nparticipant = client \\\n .conferences(\"AgentConf12\") \\\n .participants \\\n .create(fromNumber, toNumber)\n\nprint(participant.call_sid)\n","sub_path":"rest/participant/list-post-example-1/list-post-example-1.8.x.py","file_name":"list-post-example-1.8.x.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140754463","text":"# vim: expandtab:ts=4:sw=4\nimport pickle\nimport os\nimport sys\nimport errno\nimport argparse\nimport numpy as np\nimport cv2\nfrom save_layer import *\nfrom PIL import Image\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom easydict import EasyDict as edict\nsys.path.append('./src/proto')\nsys.path.append('./src')\nsys.path.append('./script')\nfrom networks import get_network\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allocator_type = 'BFC'\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.95\nconfig.gpu_options.allow_growth = True\n\n\ndef save_layer_maxpool(node, tf_nodes):\n pass\n\n\ndef save_layer_add(node, tf_nodes):\n pass\n\ndef save_layer_concat(node, tf_nodes):\n pass\n\ndef save_layer_l2_norm(node, tf_nodes):\n pass\n\ndef save_layer_reshape(node, tf_nodes):\n pass\n\n\ndef save_layer_depthwise(node, tf_nodes):\n # save weights\n basedir = os.path.dirname(os.path.realpath(__file__))\n filedir = os.path.join(basedir, 'resources/networks/layers')\n filepath = os.path.join(filedir, node.name.split('/')[-1] + '_depthwise')\n mkdir_p(filedir)\n\n weights = tf_nodes.weights.transpose((3, 2, 0, 1))\n with open(filepath, 'wb') as f:\n save_layer(node, f, node.name, weights, None)\n\n\ndef save_layer_fbn(node, tf_nodes):\n basedir = os.path.dirname(os.path.realpath(__file__))\n filedir = os.path.join(basedir, 'resources/networks/layers')\n filepath = os.path.join(filedir, node.name.split('/')[-1] + '_pointwise')\n mkdir_p(filedir)\n\n weights = tf_nodes.weights.transpose(3, 2, 0, 1)\n bias = np.hstack((tf_nodes.beta, tf_nodes.mean, tf_nodes.variance))\n with open(filepath, 'wb') as f:\n save_layer(node, f, node.name, weights, bias)\n\ndef save_layer_fbn_norelu(node, tf_nodes):\n basedir = os.path.dirname(os.path.realpath(__file__))\n filedir = os.path.join(basedir, 'resources/networks/layers')\n filepath = os.path.join(filedir, node.name.split('/')[-1] + '_pointwise')\n mkdir_p(filedir)\n\n weights = tf_nodes.weights.transpose(3, 2, 0, 1)\n bias = np.hstack((tf_nodes.beta, tf_nodes.mean, tf_nodes.variance))\n with open(filepath, 'wb') as f:\n save_layer(node, f, node.name, weights, bias)\n\n\n\ndef depthwise(name, input, prefix):\n return edict({\n 'name': name,\n 'input': input,\n 'output': '%s/depthwise' % prefix,\n 'depthwise': '%s/depthwise' % prefix,\n 'weights': '%s/depthwise_weights' % prefix,\n 'type': 'depthwise',\n })\n\ndef conv_fbn(name, input, prefix):\n return edict({\n 'name': name,\n 'input': input,\n 'output': '%s/Relu' % prefix,\n 'conv': '%s/Conv2D' % prefix,\n 'weights': '%s/weights' % prefix,\n 'beta': '%s/BatchNorm/beta' % (prefix),\n 'mean': '%s/BatchNorm/moving_mean' % (prefix),\n 'variance': '%s/BatchNorm/moving_variance' % (prefix),\n 'fbn': '%s/BatchNorm/FusedBatchNorm' % (prefix),\n 'type': 'fbn'\n })\n\ndef separable_conv(name, input, prefix, use_relu):\n return [\n depthwise(name, input, prefix + '_depthwise'),\n conv_fbn(name, prefix + '_depthwise/depthwise', prefix + '_pointwise') if use_relu else conv_fbn_norelu(name, input, prefix + '_pointwise')\n ]\n\n\ndef conv_fbn_norelu(name, input, prefix):\n return edict({\n 'name': name,\n 'input': input,\n 'conv': '%s/Conv2D' % prefix,\n 'weights': '%s/weights' % prefix,\n 'beta': '%s/BatchNorm/beta' % (prefix),\n 'mean': '%s/BatchNorm/moving_mean' % (prefix),\n 'variance': '%s/BatchNorm/moving_variance' % (prefix),\n 'fbn': '%s/BatchNorm/FusedBatchNorm' % (prefix),\n 'output' : '%s/BatchNorm/FusedBatchNorm' % prefix,\n 'type': 'fbn_norelu'\n })\n\ndef conv1():\n return conv_fbn(name='Conv2d_0',\n input='TfPoseEstimator/image',\n prefix='TfPoseEstimator/MobilenetV1/Conv2d_0')\n\n\ndef conv2d_3_pool():\n return [\n edict({\n 'name': 'conv2d_3_pool',\n 'input': 'TfPoseEstimator/MobilenetV1/Conv2d_3_pointwise/Relu',\n 'output': 'TfPoseEstimator/Conv2d_3_pool',\n 'type': 'maxpool'\n })]\n\ndef concat():\n return [\n edict({\n 'name': 'feat_concat',\n 'input_0': 'TfPoseEstimator/Conv2d_3_pool',\n 'input_1': 'TfPoseEstimator/MobilenetV1/Conv2d_7_pointwise/Relu',\n 'input_2': 'TfPoseEstimator/MobilenetV1/Conv2d_11_pointwise/Relu',\n 'output': 'TfPoseEstimator/feat_concat',\n 'type': 'concat'\n })\n ]\n\ndef backbone():\n arch = [ conv1()]\n for i in range(1,12):\n name = 'TfPoseEstimator/MobilenetV1/Conv2d_%s' % i\n prefix = name\n if i == 1:\n input = 'TfPoseEstimator/MobilenetV1/Conv2d_0/Relu'\n else:\n input = 'TfPoseEstimator/MobilenetV1/Conv2d_%s_pointwise/Relu' % (i-1)\n arch.extend(separable_conv(name, input, prefix, True))\n\n return arch\n\n\ndef stages(stage_n):\n arch = []\n for i in range(5):\n for Lstage in range(1,3):\n name = 'TfPoseEstimator/Openpose/MConv_Stage%s_L%s_%s' % (stage_n,Lstage, i+1)\n if i == 0:\n prefix = name\n input = 'TfPoseEstimator/feat_concat'\n else:\n prefix = name\n input = 'TfPoseEstimator/Openpose/MConv_Stage%s_L%s_%s_pointwise/Relu' %(stage_n, Lstage, i)\n if i != 4:\n arch.extend(separable_conv(name, input, prefix, True))\n else:\n arch.extend(separable_conv(name, input, prefix, False))\n return arch\n\ndef define_networks():\n nets = []\n nets = nets + backbone()\n nets += conv2d_3_pool()\n nets += concat()\n for s in range(1,7):\n nets = nets + stages(s)\n\n return nets\n\ndef evaluate_node(node, session, feed_dict):\n blacklist = ['name', 'type']\n tf_nodes = edict(dict((k, session.graph.get_tensor_by_name('%s:0' % v))\n for k, v in node.items() if k not in blacklist))\n\n return session.run(tf_nodes, feed_dict=feed_dict)\n\ndef run_all(graph_path):\n # load graph\n with tf.gfile.GFile(graph_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n graph = tf.get_default_graph()\n tf.import_graph_def(graph_def, name='TfPoseEstimator')\n sess = tf.Session(graph=graph)\n\n for op in graph.get_operations():\n print(op.name)\n #import pdb; pdb.set_trace()\n sess.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_0/weights:0')\n architecture = define_networks()\n image_shape = 368, 432, 3\n patch = np.random.uniform(\n 0., 255., image_shape).astype(np.uint8)\n\n feed_dict = [ patch ]\n for i, node in enumerate(architecture):\n #import pdb; pdb.set_trace()\n type = node.type\n tf_nodes = evaluate_node(node, sess, {'TfPoseEstimator/image:0': feed_dict} )\n\n globals()['save_layer_%s' % type](node, tf_nodes)\n #save_layer_output(node.name, tf_nodes.output)\n\nclass convert_mobilepose(object):\n def __init__(self, imgpath, graph_path, height, width):\n self.session = tf.Session(config=config)\n self.session.run(tf.global_variables_initializer())\n self.feed = np.array(Image.open(imgpath).resize( (height, width), Image.ANTIALIAS))[np.newaxis,:]\n self.graph_path = graph_path\n\n def get_graph(self, graph_path):\n with tf.gfile.GFile(graph_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n graph = tf.get_default_graph()\n tf.import_graph_def(graph_def, name='TfPoseEstimator')\n persistent_sess = tf.Session(graph=graph)\n\n for op in graph.get_operations():\n print(op.name)\n #import pdb; pdb.set_trace()\n return graph\n\n def save_bn_dw(self, prefix_output, prefix, postfix, has_relu=True):\n depthwise = \"{}conv2d_{}_depthwise_weights\".format(prefix_output, postfix)\n t = self.session.graph.get_tensor_by_name(\"{}_{}/depthwise_weights:0\".format(prefix, postfix))\n save_layer_output(depthwise, self.session.run(t, feed_dict={'TfPoseEstimator/image:0': self.feed}))\n\n\n def save_bn_pw(self, prefix_output, prefix, postfix, has_relu=True):\n const = '{}conv2d_{}_const'.format(prefix_output, postfix)\n const_tensor = self.session.graph.get_tensor_by_name(\"{}_{}/BatchNorm/Const:0\".format(prefix, postfix))\n save_layer_output(const, self.session.run(const_tensor, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n beta = '{}conv2d_{}_beta'.format(prefix_output, postfix)\n beta_tensor = self.session.graph.get_tensor_by_name(\"{}_{}/BatchNorm/beta:0\".format(prefix, postfix))\n save_layer_output(beta, self.session.run(beta_tensor, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n mean = '{}conv2d_{}_mean'.format(prefix_output, postfix)\n mean_tensor = self.session.graph.get_tensor_by_name(\"{}_{}/BatchNorm/moving_mean/read:0\".format(prefix, postfix))\n save_layer_output(mean, self.session.run(mean_tensor, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n var = '{}conv2d_{}_var'.format(prefix_output, postfix)\n var_tensor = self.session.graph.get_tensor_by_name(\"{}_{}/BatchNorm/moving_variance/read:0\".format(prefix, postfix))\n save_layer_output(var, self.session.run(var_tensor, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n # conv0_fbn = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/MobilenetV1/Conv2d_0/BatchNorm/FusedBatchNorm:0\")\n # save_layer_output('conv2d_0_fbn', self.session.run(conv0_fbn, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n if (has_relu):\n relu = '{}conv2d_{}_relu'.format(prefix_output, postfix)\n relu_tensor = self.session.graph.get_tensor_by_name(\"{}_{}/Relu:0\".format(prefix, postfix))\n save_layer_output(relu, self.session.run(relu_tensor, feed_dict = {'TfPoseEstimator/image:0' : self.feed}))\n\n def convert_conv0(self):\n #slim.get_or_create_global_step()\n # init_assign_op, init_feed_dict = slim.assign_from_checkpoint(\n # model_path, slim.get_variables_to_restore())\n # session.run(init_assign_op, feed_dict=init_feed_dict)\n # imported_meta = tf.train.import_meta_graph(model_path + \".meta\")\n # imported_meta.restore(session, model_path)\n graph = self.get_graph(self.graph_path)\n input = self.session.graph.get_tensor_by_name('TfPoseEstimator/image:0')\n save_layer_output(\"input\", self.session.run(input, feed_dict = {'TfPoseEstimator/image:0' : self.feed}))\n #tensor_output = session.graph.get_tensor_by_name('TfPoseEstimator/Openpose/concat_stage7:0')\n #import pdb; pdb.set_trace()\n\n print('lodaing tensor by name')\n #prefix = 'conv0_'\n conv0_weights = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_0/weights:0')\n save_layer_output('conv2d_0_weights', self.session.run(conv0_weights, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n #conv0_conv2d = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_0/Conv2D:0')\n #save_layer_output('conv2d_0_conv2d', self.session.run(conv0_conv2d, feed_dict= {'TfPoseEstimator/image:0' : self.feed}))\n\n self.save_bn_pw('', 'TfPoseEstimator/MobilenetV1/Conv2d' , 0 , True)\n\n def mobilepose_backbone(self):\n # conv2d_1 to conv2d_11\n for i in range(1, 12):\n dw = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/MobilenetV1/Conv2d_{}_depthwise/depthwise_weights:0\".format(i))\n #dw_out = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/MobilenetV1/Conv2d_{}_depthwise/depthwise:0\".format(i))\n pw = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/weights:0\".format(i))\n #pw_out = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/Conv2D:0\".format(i))\n\n # const = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/BatchNorm/Const:0'.format(i))\n # beta = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/BatchNorm/beta/read:0'.format(i))\n # mean = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/BatchNorm/moving_mean/read:0'.format(i))\n # var = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/BatchNorm/moving_variance/read:0'.format(i))\n # fbn = self.session.graph.get_tensor_by_name('TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/BatchNorm/FusedBatchNorm:0'.format(i))\n #\n # relu = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/MobilenetV1/Conv2d_{}_pointwise/Relu:0\".format(i))\n # print(session.run(dw_out, feed_dict = {'TfPoseEstimator/image:0' : feed}).shape)\n # print(session.run(pw_out, feed_dict = {'TfPoseEstimator/image:0' : feed}).shape)\n # print(session.run(bn_out, feed_dict = {'TfPoseEstimator/image:0' : feed}).shape)\n self.save_bn_dw('', 'TfPoseEstimator/MobilenetV1/Conv2d', \"{}_depthwise\".format(i), True)\n self.save_bn_pw('', 'TfPoseEstimator/MobilenetV1/Conv2d', \"{}_pointwise\".format(i), True)\n\n\n def mobilepose_branch(self, branch):\n # conv2d_3_pool\n con2d_3_pool = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Conv2d_3_pool:0\")\n\n '''\n name: \"TfPoseEstimator/feat_concat\"\n op: \"ConcatV2\"\n input: \"TfPoseEstimator/Conv2d_3_pool\"\n input: \"TfPoseEstimator/MobilenetV1/Conv2d_7_pointwise/Relu\"\n input: \"TfPoseEstimator/MobilenetV1/Conv2d_11_pointwise/Relu\"\n input: \"TfPoseEstimator/feat_concat/axis\"\n '''\n # feat_concat\n feat_concat = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/feat_concat:0\")\n\n for stage in range(1, 7):\n for layer_number in range(1, 6):\n curr_prefix = 'TfPoseEstimator/Openpose/MConv_Stage{}_{}'.format(stage, branch)\n dw = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_depthwise/depthwise_weights:0\".format(stage, branch, layer_number))\n #dw_out = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_depthwise/depthwise:0\".format(stage, branch, layer_number))\n pw = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/weights:0\".format(stage, branch, layer_number))\n #pw_out = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/Conv2D:0\".format(stage, branch, layer_number))\n\n # const = self.session.graph.get_tensor_by_name(curr_prefix + \"_{}_pointwise/BatchNorm/Const:0\".format(stage, branch, layer_number))\n # beta = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/BatchNorm/beta/read:0\".format(stage, branch, layer_number))\n # mean = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/BatchNorm/moving_mean/read:0\".format(stage, branch, layer_number))\n # var = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/BatchNorm/moving_variance/read:0\".format(stage, branch, layer_number))\n # fbn = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/BatchNorm/FusedBatchNorm:0\".format(stage, branch, layer_number))\n #\n # if layer_number < 5 :\n # relu = self.session.graph.get_tensor_by_name(\"TfPoseEstimator/Openpose/MConv_Stage{}_{}_{}_pointwise/Relu:0\".format(stage, branch, layer_number))\n self.save_bn_dw('stage{}_branch{}_'.format(stage, branch), curr_prefix, '{}_depthwise'.format(layer_number), False)\n self.save_bn_pw('stage{}_branch{}_'.format(stage, branch), curr_prefix, '{}_pointwise'.format(layer_number), layer_number < 5)\n\n def nodes(self):\n return [n for n in tf.get_default_graph().as_graph_def().node]\n #epsilon = 0.0010000000474974513\n\nif __name__ == '__main__':\n # print(\"using freezed graph model for conversion\")\n #model = convert_mobilepose('4kids.jpg', './models/graph/mobilenet_thinzaikun_432x368/graph_freeze.pb', 432, 368)\n #model.convert_conv0()\n # model.mobilepose_backbone()\n # model.mobilepose_branch('L1')\n # model.mobilepose_branch('L2')\n #run_all('../../../../tf-pose-estimation/models/graph/mobilenet_thin/graph_opt.pb')\n run_all('/home/zaikun/hdd/tec2/technology/convert_model/tf_cudnn/model/graph_freeze.pb')\n","sub_path":"scripts/extract_mobilepose.py","file_name":"extract_mobilepose.py","file_ext":"py","file_size_in_byte":16930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573354790","text":"from quantdata.analyze import AbstractAnalyze\nfrom quantdata.stock import quotes\nfrom quantdata import logger\nimport pandas as pd\nimport os\nimport json\n\n\nclass GuaidianJsonAnalyze(AbstractAnalyze):\n\n def __init__(self):\n self.__logger = logger.getLogger(\"GuaidianJson\")\n self.__data_path = './stockdata/mainreport'\n\n def get_code_list(self):\n try:\n stock_list = quotes.get_stock_hq_list()\n return stock_list[\"code\"].tolist()\n except Exception as e:\n self.__logger.error(e)\n return []\n\n def __get_main_report_df(self,code):\n\n with open(os.path.join(self.__data_path,code+\".json\"),\"r\") as f:\n report_obj = json.load(f)\n colums = [x[0] if isinstance(x, list) else x for x in report_obj['title']]\n df = pd.DataFrame(columns=colums)\n for i in range(0,len(colums)):\n df[colums[i]] = report_obj[\"report\"][i]\n df = df.set_index(colums[0])\n df = df.sort_index(ascending=False)\n return df\n\n def analyze_data(self,code):\n try:\n '''连续三年业绩增长大于20%都小于30%,最近3个季度业绩增长大于30%'''\n df = self.__get_main_report_df(code)\n #获取最近3季度报表\n df_3_quarter = df.head(3)\n #获取最近3年的报表\n df_3_year = df[df.index.str.contains(\"-12-31\")].head(3)\n\n for margin_incr in df_3_year['净利润同比增长率']:\n if self._to_float(margin_incr) > 40.0 or self._to_float(margin_incr) < 20.0:\n return False,code\n for margin_incr in df_3_quarter['净利润同比增长率']:\n if self._to_float(margin_incr) < 30.0:\n return False,code\n print(code)\n return True,code\n except Exception as e:\n self.__logger.error(e)\n return False,code\n\n def _to_float(self, num):\n try:\n return float(num)\n except:\n return 0.0\n\n def store_success_result(self,data):\n print(\"success code:%s\"%(data))\n\n def store_fail_result(self,data):\n if data is not None:\n print(\"fail code:%s\" % (data))\n\nif __name__ == \"__main__\":\n ana = GuaidianJsonAnalyze()\n ana.run(100)\n","sub_path":"StockAdvanced/analyze/guandian_json.py","file_name":"guandian_json.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"256105106","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 4 16:58:29 2019\n\n@author: spikezz\n\"\"\"\nimport threading_action_detecter\nimport rospy\n#import time\nfrom std_msgs.msg import Float32MultiArray\n\nrospy.init_node('action_publisher', anonymous=True)\npub_action = rospy.Publisher('action', Float32MultiArray, queue_size=1000)\nthread_controller=threading_action_detecter.action_detecter(1, \"xbox action\")\n\nthread_controller.start()\nwhile not rospy.is_shutdown():\n# print('printing')\n act_msg=Float32MultiArray()\n now=rospy.get_rostime()\n act_msg.data.append(now.secs)\n act_msg.data.append(now.nsecs)\n act_msg.data.append(thread_controller.throttle_signal)\n act_msg.data.append(thread_controller.brake_signal)\n act_msg.data.append(thread_controller.steering_signal) \n pub_action.publish(act_msg)\n# print(act_msg)\n# time.sleep(1)","sub_path":"src_c/action_publisher.py","file_name":"action_publisher.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610554207","text":"#!/usr/bin/env python\n\"\"\"\n +-----------------------------------------------------------------------------+\n | Extended Memory Semantics (EMS) Version 1.4.1 |\n | Synthetic Semantics http://www.synsem.com/ mogill@synsem.com |\n +-----------------------------------------------------------------------------+\n | Copyright (c) 2016, Jace A Mogill. All rights reserved. |\n | |\n | Redistribution and use in source and binary forms, with or without |\n | modification, are permitted provided that the following conditions are met: |\n | * Redistributions of source code must retain the above copyright |\n | notice, this list of conditions and the following disclaimer. |\n | * Redistributions in binary form must reproduce the above copyright |\n | notice, this list of conditions and the following disclaimer in the |\n | documentation and/or other materials provided with the distribution. |\n | * Neither the name of the Synthetic Semantics nor the names of its |\n | contributors may be used to endorse or promote products derived |\n | from this software without specific prior written permission. |\n | |\n | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |\n | \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |\n | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |\n | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SYNTHETIC |\n | SEMANTICS LLC BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |\n | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |\n | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |\n | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |\n | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |\n | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |\n | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |\n | |\n +-----------------------------------------------------------------------------+\n\"\"\"\nfrom setuptools import setup, Extension\nimport sys\nimport os\nimport platform\nfrom glob import glob\n\nPACKAGE_NAME = \"libems\"\nPACKAGE_VERSION = \"1.4.1\" + \".1\"\nREPO_ROOT_DIR = os.path.realpath(os.path.dirname(__file__))\nTHIS_DIR = REPO_ROOT_DIR\nSRC_DIR = os.path.join(THIS_DIR, 'src')\nINCLUDE_DIR = os.path.join(THIS_DIR, 'include')\nMODULE_DIR = os.path.join(THIS_DIR, 'Python')\n\n# OS Specific link flags\nlink_args = []\nif sys.platform in (\"linux\", \"linux2\"):\n link_args.append(\"-lrt\")\nelif sys.platform in (\"darwin\",):\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])\n link_args.append(\"-stdlib=libc++\")\nelse:\n pass\n\nsetup(\n name=PACKAGE_NAME,\n version=PACKAGE_VERSION,\n packages=[PACKAGE_NAME],\n package_dir={PACKAGE_NAME: os.path.relpath(MODULE_DIR, THIS_DIR)},\n setup_requires=[\"cffi>=1.0.0\", \"setuptools\"],\n install_requires=[\"cffi>=1.0.0\"],\n\n # Author details\n author='Jace A Mogill',\n author_email='mogill@synsem.com',\n\n description='Extended Memory Semantics (EMS) for Python',\n license='BSD',\n\n # The project's main homepage.\n url='https://github.com/SyntheticSemantics/ems',\n\n data_files=[\n ('include/{}'.format(PACKAGE_NAME), glob(os.path.join(THIS_DIR, 'include/ems/ems*.h'))),\n ],\n\n ext_modules=[\n Extension(\n \"{pkg}/{pkg}\".format(pkg=PACKAGE_NAME),\n sources=[os.path.relpath(os.path.join(SRC_DIR, src), THIS_DIR) for src in os.listdir(SRC_DIR)],\n extra_link_args=link_args,\n include_dirs=[os.path.relpath(INCLUDE_DIR, THIS_DIR)],\n define_macros=[\n # ('BUILD_PYTHON', None),\n ],\n ),\n ],\n long_description='Persistent Shared Memory and Parallel Programming Model',\n keywords=\" \".join([\n \"nonvolatile memory\",\n \"NVM\",\n \"NVMe\",\n \"multithreading\",\n \"multithreaded\",\n \"parallel\",\n \"parallelism\",\n \"concurrency\",\n \"shared-memory\",\n \"multicore\",\n \"manycore\",\n \"transactional-memory\",\n \"TM\",\n \"persistent-memory\",\n \"pmem\",\n \"Extended-Memory-Semantics\",\n \"EMS\",\n ]),\n classifiers=[ # https://pypi.org/classifiers/\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: C\",\n \"Programming Language :: C++\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: JavaScript\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213538916","text":"from sqlalchemy import Column, Index, Integer, Text\n\nfrom .meta import Base\n\n\nclass MyModel(Base):\n __tablename__ = \"models\"\n id = Column(Integer, primary_key=True)\n name = Column(Text)\n value = Column(Integer)\n\n\nIndex(\"my_index\", MyModel.name, unique=True, mysql_length=255)\n","sub_path":"src/models/mymodel.py","file_name":"mymodel.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296377062","text":"#!/usr/bin/env python\n\"\"\"\nFile: getDoc.py\nAuthor: Ramandeep Farmaha 20516974\nDate Last Modified: January 22nd, 2018\nPython Version: 3.4\n\nRetrieves documents from LA Times dataset given index and either DOCNO or internal ID. For University of Waterloo course\nMSCI 541.\n\"\"\"\n\nimport pickle\n\nDOCNO = 'docno'\nIDX_PATH = '/doc_id_no.p'\n\n\ndef retrieve_by_docno(path, param):\n \"\"\"Retrieve document and associated metadata from DOCNO\"\"\"\n params = param.split(\"-\")\n # Document with DOCNO LA%MM%DD%YY-NNNN is stored in path/YY/MM/DD/NNNN.p\n file_path = \"/{}/{}/{}/{}.p\".format(params[0][-2:], params[0][-6:-4], params[0][-4:-2], params[1])\n file_path = path + file_path\n with open(file_path, 'rb') as f:\n document = pickle.load(f)\n return document\n\n\ndef retrieve_by_id(path, param):\n \"\"\"Retrieve document and associated metadata from internal ID\"\"\"\n with open(path + IDX_PATH, 'rb') as file:\n doc_id_no = pickle.load(file)\n docno = doc_id_no[int(param)]\n return retrieve_by_docno(path, docno)","sub_path":"A5/getDocument.py","file_name":"getDocument.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468464609","text":"import config\nfrom mysqldb import Mysqldb\nimport common_fun\n \nclass Okex_data(object):\n def __init__(self): \n self.db = Mysqldb(config.MySqlHost, config.MySqlUser, config.MySqlPasswd, config.MySqlDb, config.MySqlPort)\n self.base_url = 'https://www.okex.com/api/v1/' \n self.headers = {\n \"Content-type\" : \"application/x-www-form-urlencoded\",\n }\n \n def tickers(self):\n request_url = self.base_url + 'tickers.do'\n res_json = common_fun.get_url_json(request_url, self.headers)\n \n insert_list = []\n data_time = res_json['date']\n for ticker in res_json['tickers']:\n insert_str = \"INSERT INTO okex_tickers (date_time, currency_pair, high, low, last, sell, buy, vol)\"\n insert_str += \"VALUES (\" + str(data_time) + \",'\" + str(ticker['symbol']) + \"',\" + str(\n ticker['high']) + \",\" + str(ticker['low']) + \",\" + str(ticker['last']) + \",\" + str(\n ticker['sell']) + \",\" + str(ticker['buy']) + \",\" + str(ticker['vol']) + \");\"\n insert_list.append(insert_str)\n \n try:\n self.db.execute_list(insert_list)\n except:\n print(insert_str)\n print('insert_list tickers err data_time = ', data_time)\n \nif __name__ == \"__main__\":\n okex = Okex_data()\n okex.tickers()\n \n ","sub_path":"quotes/okex_data.py","file_name":"okex_data.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411882908","text":"def separa(tamanho=42, c='padrao'):\n print(f\"{cor(texto=c)}\", end='')\n print('-' * tamanho)\n print(f\"{cor()}\", end='')\n\n\ndef titulo(texto, tam=42, c='padrao'):\n separa(tam, c)\n print(f\"{cor(texto=c)}\", end=\"\")\n print(str(texto).center(tam))\n separa(tam, c)\n\n\ndef menu(itens):\n titulo('MENU PRINCIPAL')\n c = 1\n for item in itens:\n print(f\"{cor('verde')}{c}{cor()} - {cor('azul')}{item}{cor()}\")\n c += 1\n separa()\n opc = lemenu(f\"{cor('amarelo')}Sua opção: {cor()}\")\n return opc\n\n\ndef cor(texto='padrao', fundo='padrao', estilo='padrao'):\n cortxt = {'padrao' : '',\n 'branco' : '30',\n 'vermelho': '31',\n 'verde' : '32',\n 'amarelo' : '33',\n 'azul' : '34',\n 'roxo' : '35',\n 'ciano' : '36',\n 'cinza' : '37'}\n\n corfundo = {'padrao' : '',\n 'branco' : ';40',\n 'vermelho': ';41',\n 'verde' : ';42',\n 'amarelo' : ';43',\n 'azul' : ';44',\n 'roxo' : ';45',\n 'ciano' : ';46',\n 'cinza' : ';47'}\n\n est = {'padrao' : '0;',\n 'negrito' : '1;',\n 'sublinhado' : '4;',\n 'negativo' : '7;'}\n\n colorido = f\"\\033[{est[estilo]}{cortxt[texto]}{corfundo[fundo]}m\"\n\n return colorido\n\n\ndef leiaint(msg=''):\n while True:\n try:\n inteiro = int(input(msg).strip())\n except (ValueError, TypeError):\n print(\"\\033[31mVocê digitou um tipo inválido.\\033[m Digite um número inteiro.\")\n continue\n except KeyboardInterrupt:\n print(\"\\033[31mO usuário não digitou um valor.\\033[m\")\n return 0\n except Exception as erro:\n print(f\"\\033[31mExceção não tratada.\")\n print(erro.__class__)\n continue\n else:\n break\n return inteiro\n\n\ndef lemenu(msg=''):\n from time import sleep\n\n while True:\n try:\n inteiro = int(input(msg).strip())\n except (ValueError, TypeError):\n print(\"\\033[31mOpção inválida\")\n sleep(1)\n continue\n except KeyboardInterrupt:\n print(\"\\033[31mO usuário não digitou um valor\\033[m\")\n sleep(1)\n return 0\n except Exception as erro:\n print(f\"\\033[31mExceção não tratada\")\n print(erro.__class__)\n sleep(1)\n continue\n else:\n break\n return inteiro","sub_path":"Prontos e Corrigidos/ex115/lib/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"538382272","text":"from numpy import *\n\ndef sigmoid(x):\n return 1 / (1+exp(-x))\n\ndef dsigmoid(x):\n return x * (1 - x)\n\nclass NeuralNet:\n def __init__(self):\n self.hiddenLayerW = None\n self.outputLayerW = None\n self.output = None\n self.MSE = None\n self.trained = False\n \n def predict( self, X ):\n ### ... YOU FILL IN THIS CODE ....\n X = array([X]) # X was an array for some reason causing errors with X.shape, had to turn it into a matrix? But only one element? Dont know how this was supposed to work\n\n a0 = hstack((array([[1]*X.shape[0]]).T,X)) # Activation of input layer\n\n return sigmoid(dot(sigmoid(dot(a0, self.hiddenLayerW)), self.outputLayerW))[0] # Computer activation of output layer\n \n def train(self,X,Y,hiddenLayerSize,epochs): \n ## size of input layer (number of inputs plus bias)\n ni = X.shape[1] + 1\n\n ## size of hidden layer (number of hidden nodes plus bias)\n nh = hiddenLayerSize + 1\n\n # size of output layer\n no = 10\n\n ## initialize weight matrix for hidden layer\n self.hiddenLayerW = 2*random.random((ni,nh)) - 1\n ## initialize weight matrix for output layer\n self.outputLayerW = 2*random.random((nh,no)) - 1\n\n ## learning rate\n alpha = 0.001\n\n ## Mark as not trained\n self.trained = False\n ## Set up MSE array\n self.MSE = [0]*epochs\n\n for epoch in range(epochs):\n\n ### ... YOU FILL IN THIS CODE ....\n\n a0 = hstack((array([[1]*X.shape[0]]).T,X)) # Activation of input layer\n\n in0 = dot(a0, self.hiddenLayerW) # Input to hidden layer\n\n a1 = sigmoid(in0) # Activation of hidden layer\n\n a1[:,0] = 1 # Set bias unit\n\n in1 = dot(a1, self.outputLayerW) # Input to output layer\n\n a2 = sigmoid(in1) # Activation of output layer\n\n error_out = Y - a2 # Observered error on output\n\n delta_out = error_out * dsigmoid(a2) # Direction of targer\n\n ## Record MSE\n self.MSE[epoch] = mean(list(map(lambda x:x**2,error_out)))\n\n ### ... YOU FILL IN THIS CODE ...\n\n error_hidden = dot(delta_out, (self.outputLayerW).T) # Contribution of hidden to error\n\n delta_hidden = error_hidden * dsigmoid(a1) # Direction of target for hidden layer \n\n self.hiddenLayerW = self.hiddenLayerW + dot(dot(alpha, a0.T), delta_hidden) # Hidden layer weight update\n\n self.outputLayerW = self.outputLayerW + dot(dot(alpha, a1.T), delta_out) # Output layer weight update\n\n ## Update trained flag\n self.trained = True\n\n","sub_path":"NeuralNet.py","file_name":"NeuralNet.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512108071","text":"from __future__ import division\nimport wx\nfrom wx.lib.floatcanvas.FloatCanvas import FloatCanvas\nimport random\nimport collections\nimport time\nimport sys\nimport math\n\n# Named tuples are gret if we want an object as a data structure that we interact with that doesn't\n# need its own function, and won't need changing. Therefore they are only good for passing information\n# around, and not for passing around values that need changing.\nCoordinates = collections.namedtuple('Coordinates', 'x y')\n\n\"\"\"\nOur world.\n\"\"\"\nclass City(object):\n \n def __init__(self, size, neighbourhood_size=3):\n self.neighbourhood_size = neighbourhood_size\n self.__size = size\n self.__number_of_houses = size * size\n self.__houses = [None for _ in range(self.__number_of_houses)]\n \n def get_house(self, coords):\n if self.within_city(coords):\n return self.__houses[self.coords_to_index(coords)]\n return None\n \n def set_house(self, coords, occupant):\n if self.within_city(coords):\n self.__houses[self.coords_to_index(coords)] = occupant\n \n def get_neighbourhood(self, coords):\n neighbours = []\n for x in range(coords.x - self.neighbourhood_size, coords.x + self.neighbourhood_size + 1):\n for y in range(coords.y - self.neighbourhood_size, coords.y + self.neighbourhood_size + 1):\n if coords.x != x and coords.y != y:\n neighbour_coords = Coordinates(x, y)\n neighbour = self.get_house(neighbour_coords)\n if neighbour != None:\n neighbours.append(neighbour)\n return neighbours\n \n def coords_to_index(self, coords):\n return coords.y * self.__size + coords.x\n \n def within_city(self, coords):\n return (0 <= coords.x < self.__size and 0 <= coords.y < self.__size)\n \n @property\n def empty_houses(self):\n empty = []\n for x in range(self.__size):\n for y in range(self.__size):\n coords = Coordinates(x, y)\n if self.get_house(coords) == None:\n empty.append(coords)\n return empty\n \n @property\n def houses(self):\n houses = []\n for x in range(self.__size):\n for y in range(self.__size):\n coords = Coordinates(x, y)\n houses.append(coords)\n return houses\n \n @property\n def number_of_houses(self):\n return self.__number_of_houses\n \n @property\n def size(self):\n return self.__size\n\n\"\"\"\nManages everyone living in our city\n\"\"\"\nclass Population(object):\n def __init__(self, city, size):\n self.__size = size\n self.__current_step = 0\n self.city = city\n self.people = [Person(city) for _ in range(self.__size)]\n \n #Occupy the houses randomly\n homes = []\n for x in range(self.city.size):\n for y in range(self.city.size):\n coords = Coordinates(x, y)\n homes.insert(random.randint(0, len(homes)), coords)\n for person in self.people:\n home = homes.pop(0)\n person.move(home)\n \n def step(self):\n self.__current_step += 1\n \n empty_houses = self.city.empty_houses\n people_to_move = self.people_to_move\n \n self.move_people(people_to_move, empty_houses)\n \n @property\n def people_to_move(self):\n move = []\n for person in self.people:\n if person.wants_to_move():\n move.append(person)\n return move\n \n def move_people(self, people, empty_houses):\n if len(people) > 0:\n random.shuffle(people)\n moves = []\n for person in people:\n if len(empty_houses) > 0:\n new = random.randint(0, len(empty_houses) - 1)\n moves.append([person, empty_houses[new]])\n empty_houses.pop(new)\n \n for m in moves:\n #print(m[0])\n m[0].move(m[1])\n \n \n @property\n def size(self):\n return self.__size\n \n @property\n def min_income(self):\n return min(self.people, key=lambda p: p.income).income\n \n @property\n def max_income(self):\n return max(self.people, key=lambda p: p.income).income\n\n\"\"\"\nA person living in our city\n\"\"\"\nclass Person(object):\n def __init__(self, city):\n self.current_location = Coordinates(-1, -1)\n self.city = city\n self.__income = random.randint(250, 750)\n \n def wants_to_move(self):\n # Income not in the approximately equal to suppounding incomes\n neighbours = self.city.get_neighbourhood(self.current_location)\n cumulative_income = sum([p.income for p in neighbours])\n if len(neighbours) > 0:\n average = cumulative_income / len(neighbours)\n else:\n average = 0\n return abs(average - self.income) > self.income * 0.25\n \n def move(self, coords):\n self.city.set_house(coords, self)\n if self.current_location != Coordinates(-1, -1):\n self.city.set_house(self.current_location, None)\n self.current_location = coords\n \n @property\n def income(self):\n return self.__income\n \n\"\"\"\nOutputs our city\n\"\"\"\nclass CityPrinter(object):\n \n def __init__(self, city, population):\n self.city = city\n self.population = population\n \n self.app = wx.App(False)\n self.w = self.city.size + 100\n self.h = self.city.size + 120\n self.frame = wx.Frame(None, -1, 'Income Map', size=(self.w, self.h))\n self.canvas = FloatCanvas(self.frame, -1)\n \n def create_heatmap(self):\n min_income = self.population.min_income\n max_income = self.population.max_income\n print(\"Lowest Income: \" + str(min_income))\n print(\"Highest Income: \" + str(max_income))\n \n for house in self.city.houses:\n x = house.x - self.city.size / 2\n y = house.y - self.city.size / 2\n person = self.city.get_house(house)\n if person != None:\n col = self.color(person.income, min_income, max_income)\n self.canvas.AddPoint((x, y), Color = col)\n \n self.frame.Show()\n self.app.MainLoop()\n \n def color(self, value, min_val, max_val):\n # Approximating http://geog.uoregon.edu/datagraphics/color/Bu_10.txt on the fly\n red_range = (0, 0.9)\n green_range = (0.25, 1.0)\n blue_range = (1.0, 1.0)\n \n percentage_of_range = 1 - (value - min_val)/(max_val - min_val)\n \n red = (((red_range[1] - red_range[0]) * percentage_of_range) + red_range[0]) * 255\n green = (((green_range[1] - green_range[0]) * percentage_of_range) + green_range[0]) * 255\n blue = (((blue_range[1] - blue_range[0]) * percentage_of_range) + blue_range[0]) * 255\n \n return wx.Colour(red, green, blue, 1)\n \n \"\"\"\n Write the city to standard out for quick testing and debugging.\n \"\"\"\n def __str__(self):\n output = \"\"\n for y in range(self.city.size):\n row = \"\"\n for x in range(self.city.size):\n coords = Coordinates(x, y)\n occupied = self.city.get_house(coords) != None\n if occupied:\n row += str(\"%3d \" % (self.city.get_house(coords).income))\n else:\n row += str(\" \")\n output += row + \"\\n\"\n return output\n\ndef print_argument_options():\n print(\"Command Line Arguments:\")\n print(\"\\trentals.py [city size] [population] [neighbourhood]\")\n print(\"\")\n print(\"\\tcity size: How many rows/columns the city should have.\")\n print(\"\\t The city is always square. Defaults to 100\")\n print(\"\")\n print(\"\\tpopulation: How many people live in the city. Defaults to 90% city size.\")\n print(\"\\t Leave as 0 if you want it to default.\")\n print(\"\")\n print(\"\\tneighbourhood: How far a person looks for neighbours. Default is 3.\")\n print(\"\")\n print(\"\")\n\ndef print_info():\n print(\"\")\n print(\"Income Simulation\")\n print(\"Program By Mitchell Pomery\")\n print(\"\")\n print(\"\")\n\nif __name__=='__main__':\n print_info()\n print_argument_options()\n \n # Get experiment variables from command line arguments\n if len(sys.argv) > 1:\n citysize = int(sys.argv[1])\n else:\n citysize = 100\n if len(sys.argv) > 2:\n citypopulation = int(sys.argv[2])\n if citypopulation >= (citysize*citysize - 1) or citypopulation <= 0:\n print(\"Population Invalid. Defaulting to 90% of city size.\")\n citypopulation = int(citysize * citysize * 0.9)\n else:\n citypopulation = int(citysize * citysize * 0.9)\n if len(sys.argv) > 3:\n neighbourhood_size = int(sys.argv[3])\n else:\n neighbourhood_size = 3\n \n time1 = time.time()\n \n city = City(citysize, neighbourhood_size)\n population = Population(city, citypopulation)\n cityprinter = CityPrinter(city, population)\n \n print(\"Starting Simulation\")\n print(\"Number of Houses: \" + str(city.number_of_houses))\n print(\"Population: \" + str(population.size))\n print(\"\")\n #print(cityprinter)\n for _ in range(100):\n population.step()\n \n time2 = time.time()\n print('Simulation took %0.3f ms' % ((time2-time1) * 1000.0))\n \n print(\"Generating Image\")\n cityprinter.create_heatmap()\n\n","sub_path":"source/income.py","file_name":"income.py","file_ext":"py","file_size_in_byte":9618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"174868833","text":"\"\"\"This file is Avature_ColasRail spider created on top of the Avature\nscrapy crawl avature_colasrail -a mining_job_id=9999 -a iteration=1 -a url=\"http://colasrail.avature.net/Careers/allVacancies\" -a robots_obey=0 -a extract=1\n\nsample url:\n http://colasrail.avature.net/Careers/allVacancies\n\"\"\"\nfrom urlparse import urlsplit\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.spiders.avature import Avature\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.lib.utils import add_get_params\n\n\nclass Avature_ColasRail(Avature):\n\n name = 'avature_colasrail' # unique identifier for this spider\n\n def parse(self, response):\n \"\"\"Parse job links in that page.\n Get pagination and loop through pages\n Usually 20 jobs per page, some have 5\n \"\"\"\n self.set_meta_language(response)\n\n sel = Selector(response)\n rows = sel.xpath('//div[@class=\"jobListItem clearer\"]')\n for row in rows:\n l_row = BrightcorpItemLoader(selector=row)\n\n url = l_row.get_xpath('p/a[@class=\"jobTitle\"]/@href')[0]\n position = l_row.get_xpath('p/a[@class=\"jobTitle\"]/text()')\n location = l_row.get_xpath('p/strong[@class=\"locationText\"]/text()')\n posted_date = l_row.get_xpath('p/strong[@class=\"daysAgo\"]/text()')\n param = {\n 'location': location,\n 'position': position,\n 'posted_date': posted_date}\n\n yield Request(\n url,\n callback=self.parse_job_callback(),\n meta=param)\n\n next_page = sel.xpath('//a[.=\"Next >>\"]/@href').extract()\n if next_page:\n next_page_url = next_page[0]\n yield Request(next_page_url, callback=self.parse)\n\n def parse_job(self, response):\n \"\"\"Extract job data\n \"\"\"\n sel = Selector(response)\n\n loader = BrightcorpItemLoader(selector=sel)\n\n referencenumber = self.get_referencenumber(response)\n\n loader.add_value(\"title\", response.meta['position'])\n loader.add_xpath(\n \"description\",\n '//div[@class=\"jobDescription clearer\"]')\n loader.add_value(\"location\", response.meta['location'])\n loader.add_value(\"referencenumber\", \"%s-%s\" % (\n urlsplit(response.url).netloc.split('.')[0],\n response.url.split('jobDetail?id=')[-1]\n ))\n\n loader.add_value(\"url\", response.url)\n loader.add_value(\"date\", response.meta['posted_date'])\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/avature_colasrail.py","file_name":"avature_colasrail.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"646481745","text":"# Copyright 2020 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom __future__ import print_function\n\nimport json\nimport os\nimport sys\nimport tempfile\nimport unittest\n\nif sys.version_info[0] == 2:\n import mock\nelse:\n import unittest.mock as mock\n\nfrom pyfakefs import fake_filesystem_unittest\n\nfrom unexpected_passes import builders\nfrom unexpected_passes import multiprocessing_utils\nfrom unexpected_passes import unittest_utils\n\n\nclass GetCiBuildersUnittest(fake_filesystem_unittest.TestCase):\n def CreateFile(self, *args, **kwargs):\n # TODO(crbug.com/1156806): Remove this and just use fs.create_file() when\n # Catapult is updated to a newer version of pyfakefs that is compatible with\n # Chromium's version.\n if hasattr(self.fs, 'create_file'):\n self.fs.create_file(*args, **kwargs)\n else:\n self.fs.CreateFile(*args, **kwargs)\n\n def testJsonContentLoaded(self):\n \"\"\"Tests that the correct JSON data is loaded in.\"\"\"\n self.setUpPyfakefs()\n gpu_json = {\n 'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},\n 'Android Release (Nexus 5X)': {\n 'isolated_scripts': [{\n 'args': [\n 'webgl_conformance',\n ],\n 'isolate_name':\n 'telemetry_gpu_integration_test',\n }],\n },\n 'GPU Linux Builder': {},\n }\n gpu_fyi_json = {\n 'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},\n 'ANGLE GPU Android Release (Nexus 5X)': {\n 'isolated_scripts': [{\n 'args': [\n 'webgl_conformance',\n ],\n 'isolate_name':\n 'telemetry_gpu_integration_test',\n }],\n },\n 'GPU FYI Linux Builder': {},\n }\n\n self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,\n 'chromium.gpu.json'),\n contents=json.dumps(gpu_json))\n self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,\n 'chromium.gpu.fyi.json'),\n contents=json.dumps(gpu_fyi_json))\n\n gpu_builders = builders.GetCiBuilders('webgl_conformance')\n self.assertEqual(\n gpu_builders,\n set([\n 'Android Release (Nexus 5X)', 'ANGLE GPU Android Release (Nexus 5X)'\n ]))\n\n def testFilterBySuite(self):\n \"\"\"Tests that only builders that run the given suite are returned.\"\"\"\n self.setUpPyfakefs()\n gpu_json = {\n 'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},\n 'Android Tester': {\n 'isolated_scripts': [\n {\n 'args': [\n 'webgl_conformance',\n ],\n 'isolate_name': 'not_telemetry',\n },\n ],\n },\n 'Linux Tester': {\n 'isolated_scripts': [\n {\n 'args': [\n 'not_a_suite',\n ],\n 'isolate_name': 'telemetry_gpu_integration_test',\n },\n ],\n },\n 'Windows Tester': {\n 'isolated_scripts': [\n {\n 'args': [\n 'webgl_conformance',\n ],\n 'isolate_name': 'telemetry_gpu_integration_test',\n },\n ],\n },\n }\n\n self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,\n 'chromium.json'),\n contents=json.dumps(gpu_json))\n\n gpu_builders = builders.GetCiBuilders('webgl_conformance')\n self.assertEqual(gpu_builders, set(['Windows Tester']))\n\n def testRealContentCanBeLoaded(self):\n \"\"\"Tests that *something* from the real JSON files can be loaded.\"\"\"\n # This directory is not available on swarming, so if it doesn't exist, just\n # skip the test.\n if not os.path.exists(builders.TESTING_BUILDBOT_DIR):\n return\n self.assertNotEqual(len(builders.GetCiBuilders('webgl_conformance')), 0)\n\n\nclass GetMirroredBuildersForCiBuilderUnittest(unittest.TestCase):\n def setUp(self):\n self._patcher = mock.patch.object(builders,\n '_GetBuildbucketOutputForCiBuilder')\n self._bb_mock = self._patcher.start()\n self.addCleanup(self._patcher.stop)\n\n def testFakeCiBuilder(self):\n \"\"\"Tests that a fake CI builder gets properly mapped.\"\"\"\n with mock.patch.object(builders, 'FAKE_CI_BUILDERS', {'foo_ci': 'foo_try'}):\n try_builder, found_mirror = builders._GetMirroredBuildersForCiBuilder(\n 'foo_ci')\n self.assertTrue(found_mirror)\n self.assertEqual(try_builder, set(['foo_try']))\n self._bb_mock.assert_not_called()\n\n def testNoBuildbucketOutput(self):\n \"\"\"Tests that a failure to get Buildbucket output is surfaced.\"\"\"\n self._bb_mock.return_value = ''\n try_builder, found_mirror = builders._GetMirroredBuildersForCiBuilder(\n 'foo_ci')\n self.assertFalse(found_mirror)\n self.assertEqual(try_builder, set(['foo_ci']))\n\n def testBuildbucketOutput(self):\n \"\"\"Tests that Buildbucket output is parsed correctly.\"\"\"\n self._bb_mock.return_value = json.dumps({\n 'output': {\n 'properties': {\n 'mirrored_builders': [\n 'try:foo_try',\n 'try:bar_try',\n ]\n }\n }\n })\n try_builders, found_mirror = builders._GetMirroredBuildersForCiBuilder(\n 'foo_ci')\n self.assertTrue(found_mirror)\n self.assertEqual(try_builders, set(['foo_try', 'bar_try']))\n\n\nclass GetTryBuildersUnittest(unittest.TestCase):\n def setUp(self):\n self._get_patcher = mock.patch.object(builders,\n '_GetMirroredBuildersForCiBuilder')\n self._get_mock = self._get_patcher.start()\n self.addCleanup(self._get_patcher.stop)\n self._pool_patcher = mock.patch.object(multiprocessing_utils,\n 'GetProcessPool')\n self._pool_mock = self._pool_patcher.start()\n self._pool_mock.return_value = unittest_utils.FakePool()\n self.addCleanup(self._pool_patcher.stop)\n\n def testNoOutputCausesFailure(self):\n \"\"\"Tests that a failure to get Buildbot output raises an exception.\"\"\"\n self._get_mock.return_value = (set(['foo_ci']), False)\n with self.assertRaises(RuntimeError):\n builders.GetTryBuilders(['foo_ci'])\n\n def testOutputReturned(self):\n \"\"\"Tests that parsed builders get returned on success.\"\"\"\n\n def SideEffect(ci_builder):\n b = [\n ci_builder.replace('ci', 'try'),\n ci_builder.replace('ci', 'try2'),\n ]\n return set(b), True\n\n self._get_mock.side_effect = SideEffect\n mirrored_builders = builders.GetTryBuilders(['foo_ci', 'bar_ci'])\n self.assertEqual(mirrored_builders,\n set(['foo_try', 'foo_try2', 'bar_try', 'bar_try2']))\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"content/test/gpu/unexpected_passes/builders_unittest.py","file_name":"builders_unittest.py","file_ext":"py","file_size_in_byte":7062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133858940","text":"# coding=utf-8\n'''\nCreated on 2018-03-13\n@author: Maco\nProject:编写Web测试用例\n'''\nfrom selenium import webdriver\nimport unittest, time\n\n\nclass BaiduTest(unittest.TestCase):\n def setUp(self):\n self.driver = webdriver.Firefox()\n self.driver.implicitly_wait(10) # 隐性等待时间为10秒\n self.url = \"https://www.baidu.com\"\n\n def test_baidu(self):\n driver = self.driver\n driver.get(self.url + \"/\")\n driver.find_element_by_id(\"kw\").clear()\n driver.find_element_by_id(\"kw\").send_keys(u\"沃时贷\")\n driver.find_element_by_id(\"su\").click()\n time.sleep(3)\n title = driver.title\n self.assertEqual(title, u\"沃时贷_百度搜索\")\n\n def tearDown(self):\n self.driver.quit()\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"Test/test_complete/test_case/test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534064821","text":"import socket \n\nclass Servidor:\n _flag= False\n mensagem = 'msg'\n\n def __init__(self, porta, file):\n\n self._hostServer = '127.0.0.1'\n print('IP: ',self._hostServer)\n self._port = int(porta) #Parâmetro\n self._tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)# Para criar o socket do tipo tcp\n self._address = (self._hostServer, self._port)# Passa IP e Porta\n self._tcp.bind(self._address)# Passa o endereço para a conexão TCP\n self._tcp.listen(999999)# Conexão no servidor\n self._nameFile = file # Muda Parâmetro\n \n def readFile(self):#Para ler o arquivo\n \n self._fileOpen = open(self._nameFile, \"rb\")#Abrindo o arquivo\n self._envio = self._fileOpen.read()#Lendo o arquivo\n self._fileOpen.close()#Fecha o fluxo de arquivo\n \n def sendFile(self, contador):# Envio servidor\n self.mensagem = f'cliente {contador} Recebi o Arquivo'\n print ('Esperando cliente...')\n self._connection, self._client = self._tcp.accept()\n self._flag = True\n print ('Cliente {} conectado'.format(self._client[0])) \n self._connection.sendall(self._envio)#envia variavel envio\n self._connection.close()\n\nclass Cliente:\n def __init__(self, IP , porta , file ):\n self._hostServer = IP\n self._port = int(porta) # Parâmetro\n self._tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)#Criando conexão com servidor\n self._address = (self._hostServer, self._port)#Passa o IP e a Porta\n print ('Esperando cliente...')\n self._tcp.connect(self._address) #Conectando com o Cliente\n print ('Conectado!')\n self._nameFile = file #Parâmetro\n #Fecha conexão TCP\n def closeConnection(self):\n self._tcp.close()\n \n #Recebe arquivo\n def receiveFile(self):\n with open(self._nameFile,'wb') as wf:\n while True:\n self._recebido = self._tcp.recv(4096)\n if not self._recebido: break #Se não tiver valor no recebido\n wf.write(self._recebido)","sub_path":"Semana05/Prog02.py","file_name":"Prog02.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"250702891","text":"# -*- coding: utf-8 -*-\nfrom django.conf import settings\nfrom django.utils import simplejson as json\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, get_object_or_404, render\nfrom tagging.models import Tag\n\nfrom inventta.forms import IdeaForm, CommentForm\nfrom inventta.models import Idea, Comment\n\n\ndef index(request):\n queryset = Idea.objects.all().order_by('pk')\n if not request.user.is_staff:\n queryset = queryset.exclude(is_draft=True)\n form = IdeaForm()\n\n if request.method == 'POST':\n next = request.POST['next']\n form = IdeaForm(request.POST)\n\n if form.is_valid():\n idea = form.save()\n messages.success(\n request, \n u'Mensaje publicado con exito.'\n )\n return redirect(next)\n else :\n return idea_new(request, form_instance=form) \n return render(\n request,\n 'master.html',\n {\n 'idea_form': form,\n 'object_list': queryset,\n }\n )\n\ndef idea_detail(request, tag_name, object_id):\n queryset = Idea.objects.all()\n form = IdeaForm(initial={'tags':tag_name})\n idea = get_object_or_404(Idea, pk=object_id)\n\n if request.method == 'POST':\n next = request.POST['next']\n form = IdeaForm(request.POST, instance=idea)\n if form.is_valid():\n idea = form.save()\n messages.success(request, 'Actualizaste el #%s' %(object_id))\n return redirect(next)\n else :\n return idea_new(\n request,\n tag_name=tag_name,\n object_id=object_id,\n form_instance=form\n )\n return render(\n request,\n 'detail.html',\n {\n 'idea_form': form,\n 'tag': tag_name,\n 'object': idea,\n 'object_list': queryset.filter(tags__icontains=tag_name),\n }\n )\n\n@login_required\ndef idea_new(request, tag_name=None, object_id=None, form_instance=None):\n form = IdeaForm()\n \n if object_id:\n idea = get_object_or_404(Idea, pk=object_id)\n form = IdeaForm(instance=idea)\n else :\n idea = None\n\n if form_instance:\n form = form_instance\n \n return render(\n request, \n 'idea_form.html',\n {\n 'form': form,\n 'instance': idea,\n 'tag_name': tag_name,\n }\n )\n\ndef comment_list(request):\n queryset = Comment.objects.all().order_by('changed')\n if not request.user.is_staff:\n queryset = queryset.exclude(is_draft=True)\n form = CommentForm()\n\n if request.method == 'POST' and request.POST['honeypot'] == '':\n next = request.POST['next']\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.tags = u'comentario,'\n comment.save()\n messages.success(\n request, \n u'Mensaje publicado con exito.'\n )\n return redirect(next)\n else :\n return comment_new(request, form_instance=form)\n return render(\n request,\n 'master.html',\n {\n 'comment_form': form,\n 'object_list': queryset,\n }\n )\n\ndef comment_detail(request, object_id):\n queryset = Comment.objects.all()\n form = CommentForm()\n comment = get_object_or_404(Comment, pk=object_id)\n\n if request.method == 'POST':\n next = request.POST['next']\n form = CommentForm(request.POST, instance=comment)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.tags = u'comentario,'\n comment.save()\n messages.success(request, 'Actualizaste el #%s' %(object_id))\n return redirect(next)\n else :\n return comment_new(\n request, \n object_id=object_id,\n form_instance=form\n )\n return render(\n request,\n 'detail.html',\n {\n 'comment_form': form,\n 'object': comment,\n 'object_list': queryset,\n }\n )\n\ndef comment_new(request, object_id=None, form_instance=None):\n form = CommentForm()\n if object_id:\n comment = get_object_or_404(Comment, pk=object_id)\n form = CommentForm(instance=comment)\n else:\n comment = None\n return render(\n request, \n 'comment_form.html',\n {\n 'form': form,\n 'instance': comment\n }\n )\n \ndef idea_json(request, tag_name, object_id):\n queryset = Idea.objects.all()\n form = IdeaForm(initial={'tags':tag_name})\n idea = get_object_or_404(Idea, pk=object_id)\n return HttpResponse(\n json.dumps({\n 'title':idea.title,\n 'author':idea.author,\n 'description':idea.description,\n 'is_draft':idea.is_draft,\n 'tags': idea.tags\n }),\n \n mimetype=\"application/json\"\n )\n \ndef by_tag(request, tag_name=None):\n queryset = Idea.objects.all().order_by('tags')\n form = IdeaForm()\n\n if tag_name:\n queryset = queryset.filter(tags__icontains=tag_name)\n\n return render(\n request,\n 'list.html',\n {\n 'form': form,\n 'tag_name': tag_name,\n 'object_list': queryset.filter(tags__icontains=tag_name),\n }\n )\n\ndef tags(request):\n queryset = Tag.objects.all()\n tag_name = request.GET.get('tag_name')\n if tag_name:\n queryset = queryset.filter(name__istartswith=tag_name)\n return HttpResponse(\n json.dumps([x.name for x in queryset]), \n mimetype=\"application/json\"\n )\n \ndef profile_detail(request, username):\n queryset = Idea.objects.filter(author__icontains=username).order_by('-created')\n\n try:\n author = User.objects.get(username__icontains=username)\n except :\n author = None\n\n return render(\n request,\n 'profile.html',\n {\n 'author':author,\n 'is_me':request.user.username == 'username',\n 'object_list': queryset,\n }\n )\n\ndef login(request):\n if request.method == 'POST':\n from django.contrib.auth import authenticate, login\n username = request.POST.get('username', 'None')\n password = request.POST.get('password', 'None')\n user = authenticate(username=username, password=password)\n next = request.POST.get('next')\n message = ''\n if user is not None:\n if user.is_active:\n login(request, user)\n messages.success(request, 'bienvenido %s' %(user, ))\n \n \n return HttpResponse(\n json.dumps({\n 'next': next,\n }),\n \n mimetype=\"application/json\"\n )\n\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343321061","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^(?P\\d+)$', views.dashboard, name=\"dashboard\"),\n url(r'^wishlist/add$', views.add, name='add'),\n url(r'^wishlist/create$', views.create, name='create'),\n url(r'^logout$', views.logout, name=\"logout\"),\n url(r'^wishlist/join$', views.join, name = 'join'),\n url(r'^unjoin/(?P\\d+)$', views.unjoin, name = 'unjoin'),\n url(r'^wishlist/show/(?P\\d+)$', views.show, name=\"show\"),\n url(r'^wishlist/destroy/(?P\\d+)$', views.remove, name=\"remove\")\n]\n","sub_path":"apps/wishlist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561670228","text":"\ndef first_n_terms(n):\n ret_arr = [5]\n while len(ret_arr) < n:\n ret_arr.append(ret_arr[-1] * 3 - 4)\n return ret_arr\n\n\ndef first_n_terms_test(n, reference_arr):\n function_arr = first_n_terms(n)\n for i in range(len(function_arr)):\n if function_arr[i] != reference_arr[i]:\n return False\n return True\n\n\ndef nth_term(n):\n if n == 1:\n return 5\n return 3 * nth_term(n - 1) - 4 \n\n\ndef nth_term_test(n, value):\n return nth_term(n) == value\n\nassert nth_term_test(10, 59051)\n\nprint(nth_term(10))\n\nreference_array = [5, 11, 29, 83, 245, 731, 2189, 6563, 19685, 59051]\nassert first_n_terms_test(10, reference_array)\n\nprint(first_n_terms(10))\n","sub_path":"recursive_sequence.py","file_name":"recursive_sequence.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420220859","text":"from corehq.apps.cleanup.management.commands.populate_sql_model_from_couch_model import PopulateSQLCommand\n\n\nclass Command(PopulateSQLCommand):\n @classmethod\n def couch_doc_type(cls):\n return 'Dhis2Connection'\n\n @classmethod\n def sql_class(cls):\n from corehq.motech.dhis2.models import Dhis2Connection\n return Dhis2Connection\n\n @classmethod\n def commit_adding_migration(cls):\n return \"d670f19bfda1ab4e842d7d47162c5691b9bef55d\"\n\n def update_or_create_sql_object(self, doc):\n model, created = self.sql_class().objects.update_or_create(\n domain=doc['domain'],\n defaults={\n 'server_url': doc.get('server_url'),\n 'username': doc.get('username'),\n 'password': doc.get('password'),\n 'skip_cert_verify': doc.get('skip_cert_verify') or False,\n }\n )\n return (model, created)\n","sub_path":"corehq/motech/dhis2/management/commands/populate_sql_dhis2_connection.py","file_name":"populate_sql_dhis2_connection.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"359454030","text":"import os\nimport sys\nimport json\nimport requests\n\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append('../')\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nACCOUNT_TEST = {\n\t\"user_id\": 2,\n\t\"password\": \"12345678910\"\n}\n\n\nENTRY_TASK_URL = \"http://127.0.0.1:8000/api\"\n\nADMIN_CREATE_EVENT = ENTRY_TASK_URL + \"/admin/create_event\"\n\nEVENT_COMMENT = ENTRY_TASK_URL + \"/event/comment\"\nEVENT_LIKE = ENTRY_TASK_URL + \"/event/like\"\nEVENT_PARTICIPATE = ENTRY_TASK_URL + \"/event/participate\"\nEVENT_GET_DETAIL = ENTRY_TASK_URL + \"/event/get_detail\"\nEVENT_GET_IDS = ENTRY_TASK_URL + \"/event/get_ids\"\nEVENT_GET_IDS_V2 = ENTRY_TASK_URL + \"/event/get_ids_v2\"\nEVENT_GET_INFOS_BY_IDS = ENTRY_TASK_URL + \"/event/get_infos_by_ids\"\n\nMEDIA_UPLOAD_IMAGE = ENTRY_TASK_URL + \"/media/upload_image\"\n\nUSER_CREATE_USER = ENTRY_TASK_URL + \"/user/create_user\"\nUSER_PRE_LOGIN = ENTRY_TASK_URL + \"/user/pre_login\"\nUSER_LOGIN = ENTRY_TASK_URL + \"/user/login\"\n\n\ndef request_service_api(url, data, access_token=\"\", method=\"POST\"):\n\theaders = {\n\t\t'X-Entry-Task-Access-Token': access_token,\n\t}\n\tif method == \"POST\":\n\t\tresponse = requests.post(url, data=json.dumps(data), verify=False, headers=headers)\n\telse:\n\t\tresponse = requests.get(url, params=data, verify=False, headers=headers)\n\ttry:\n\t\tif response.status_code != 200:\n\t\t\tprint(\"REQUEST HTTP ERROR CODE: %s\" % response.status_code)\n\t\t\treturn None, None\n\t\tresult = response.json()\n\t\tresult_code = result[\"result\"]\n\t\tresult_body = result.get(\"reply\")\n\t\treturn result_code, result_body\n\texcept Exception as error:\n\t\tprint(\" REQUEST EXCEPTION: %s\" % error)\n\treturn None, None\n","sub_path":"tests/test_constant.py","file_name":"test_constant.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"504895159","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\ndef run(command):\n \"\"\"\n Lance la commande passee en parametre.\n \"\"\"\n import subprocess\n\n out = subprocess.Popen([command], shell=True, stdout=subprocess.PIPE)\n out_cmd = out.communicate()[0]\n #err_cmd = out.communicate()[1]\n return out.returncode, out_cmd.decode('UTF-8')\n\nasync def ssh(user, host, command):\n \"\"\"\n Lance la commande ssh passee en parametre.\n \"\"\"\n return run(\"ssh \" + user + \"@\" + host + \" -o 'StrictHostKeyChecking=no' \" + command)\n\nHYPS = [\n \"acfer.bas\",\n #\"madura.bas\",\n \"goury\",\n \"bikini.bas\",\n \"hyp-test.bas\",\n \"alderney\",\n \"ara\",\n \"bacalao\",\n \"cacatoes\",\n \"caique\",\n \"charcas\",\n \"conure\",\n \"eclectus\",\n \"forpus\",\n \"glaive-02\",\n \"goury\",\n \"hades.pun\",\n \"java\",\n \"loriquet.mai\",\n \"madagascar.bas\",\n \"samoa\",\n \"strigops\",\n \"servbackup-tice\",\n \"tonda.dunes\",\n \"oleron.dunes\"\n]\n\nasync def get_infos():\n infos = {}\n for hyp in HYPS:\n hyp += \".ens-lyon.fr\"\n infos[hyp] = [ssh(\"root\", hyp, \"xen-info -s\")[1],\n ssh(\"root\", hyp, \"xen-info -l\")[1]]\n return infos\n\n@app.route(\"/\")\ndef xen_infos():\n return render_template(\"xeninfo.html\",\n hyps=HYPS,\n infos=get_infos())\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0')\n","sub_path":"xeninfo/xeninfo.py","file_name":"xeninfo.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599562965","text":"import tweepy as tw\nfrom json import dumps\nfrom configparser import ConfigParser\nfrom kafka import KafkaProducer\nimport logging\nimport pprint\nfrom time import sleep\n\n\n# Setting default logging\nlogging.basicConfig(level=logging.INFO)\n\n# Getting Keys for Twitter application\nconfig = ConfigParser()\nconfig.read('application.properties')\nconsumer_key = config.get(\"DEV\", \"consumer.key\")\nconsumer_secret_key = config.get(\"DEV\", \"consumer.secret.key\")\naccess_key = config.get(\"DEV\", \"access.key\")\naccess_secret_key = config.get(\"DEV\", \"access.secret.key\")\n\n\n# Authenticating with Twitter\nauth = tw.OAuthHandler(consumer_key, consumer_secret_key)\nauth.set_access_token(access_key, access_secret_key)\napi = tw.API(auth, wait_on_rate_limit=True)\n\nsearch_words = 'Modi'\n\ntweets = tw.Cursor(api.search,\n q=search_words,\n lang=\"en\").items()\n\n#\nproducer = KafkaProducer(bootstrap_servers=['127.0.0.1:9092'],\n value_serializer=lambda x: dumps(x).encode('utf-8'),\n acks='all', # Required for safe producer\n retries=999999999999, # Required for safe producer\n max_in_flight_requests_per_connection=5, # Required for safe producer\n linger_ms=20, # Required for high throughput producer\n batch_size=32*1024) # Required for high throughput producer\n\n# Compression best snappy,lz4... but test all other compression\n\n# Getting Producer's Configuration\npprint.pprint(producer.config)\n\n# Getting Tweets\nfor i in tweets:\n print(i._json)\n producer.send('Kafka_Tweets', i._json)\n sleep(10)\n\n\nproducer.flush()\nproducer.close()\n","sub_path":"KafkaTwitterProducer.py","file_name":"KafkaTwitterProducer.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403513949","text":"import inspect\nimport os\nimport re\nimport sys\n\ntry:\n from sphinx.util.docstrings import prepare_docstring\nexcept ImportError:\n prepare_docstring = None\n\n\n#: Used to identify the end of the description block, and the beginning of the\n#: parameters. This assumes that the parameters and such will always occur at\n#: the end of the docstring.\nDESCRIPTION_END_RE = re.compile(':(arg|param|returns|throws)', re.I)\n\n\ndef nested_set(data, keys, value):\n \"\"\"Set a nested key value in a dict based on key list.\n\n :param data dict: the dict to set nested key in.\n :param keys list: the nested list of keys.\n :param value object: the final value to set.\n \"\"\"\n for key in keys[:-1]:\n data = data.setdefault(key, {})\n data[keys[-1]] = value\n\n\ndef exec_params(call, *args, **kwargs):\n \"\"\"Execute a callable with only the defined parameters\n and not just *args, **kwargs.\n\n :param callable call: The callable to exec with the given params as\n defined by itself. call should have an inspect.ArgSpec attached\n as an attribute _argspec\n :returns anything:\n :raises TypeError:\n \"\"\"\n arg_spec = getattr(call, '_argspec', None)\n if arg_spec and not arg_spec.keywords:\n kwargs = {key: value for key, value in kwargs.iteritems()\n if key in arg_spec.args}\n return call(*args, **kwargs)\n\n\ndef undecorate_func(func):\n \"\"\"Returns the original function from the decorated one.\n\n The purpose of this function is to return the original `func` in the\n event that it has decorators attached to it, instead of the decorated\n function.\n\n :param function func: The function to unwrap.\n :returns: The unwrapped function.\n \"\"\"\n while True:\n if func.__closure__:\n for cell in func.__closure__:\n if inspect.isfunction(cell.cell_contents):\n if func.__name__ == cell.cell_contents.__name__:\n func = cell.cell_contents\n break\n else:\n break\n return func\n\n\ndef get_module_attr(module_filename, module_attr, namespace=None):\n \"\"\"Get an attribute from a module.\n\n This uses execfile to load the module with a private namespace, and then\n plucks and returns the given attribute from that module's namespace.\n\n Note that, while this method doesn't have any explicit unit tests, it is\n tested implicitly by the doctor's own documentation. The Sphinx\n build process will fail to generate docs if this does not work.\n\n :param str module_filename: Path to the module to execute (e.g.\n \"../src/app.py\").\n :param str module_attr: Attribute to pluck from the module's namespace.\n (e.g. \"app\").\n :param dict namespace: Optional namespace. If one is not passed, an empty\n dict will be used instead. Note that this function mutates the passed\n namespace, so you can inspect a passed dict after calling this method\n to see how the module changed it.\n :returns: The attribute from the module.\n :raises KeyError: if the module doesn't have the given attribute.\n \"\"\"\n if namespace is None:\n namespace = {}\n module_filename = os.path.abspath(module_filename)\n namespace['__file__'] = module_filename\n module_dir = os.path.dirname(module_filename)\n old_cwd = os.getcwd()\n old_sys_path = sys.path[:]\n try:\n os.chdir(module_dir)\n sys.path.append(module_dir)\n execfile(module_filename, namespace)\n return namespace[module_attr]\n finally:\n os.chdir(old_cwd)\n sys.path = old_sys_path\n\n\ndef get_description_lines(docstring):\n \"\"\"Extract the description from the given docstring.\n\n This grabs everything up to the first occurrence of something that looks\n like a parameter description. The docstring will be dedented and cleaned\n up using the standard Sphinx methods.\n\n :param str docstring: The source docstring.\n :returns: list\n \"\"\"\n if prepare_docstring is None:\n raise ImportError('sphinx must be installed to use this function.')\n\n if not isinstance(docstring, basestring):\n return []\n lines = []\n for line in prepare_docstring(docstring):\n if DESCRIPTION_END_RE.match(line):\n break\n lines.append(line)\n if lines and lines[-1] != '':\n lines.append('')\n return lines\n","sub_path":"doctor/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34254171","text":"import sys\r\nfrom collections import deque\r\n\r\ndef bfs(start):\r\n q=deque([start])\r\n cur_idx=0\r\n while q:\r\n for _ in range(len(q)):\r\n x,y=q.popleft()\r\n for next_x,next_y in ((x,y+1),(x,y-1),(~x,y+k)):\r\n if next_y>=n:\r\n return 1\r\n if cur_idx= 0:\n if salary[l] < salary[r]:\n lesser = True\n elif salary[l] > salary[r]:\n break\n l -= 1\n r += 1\nif lesser:\n i = -(-len(salary) // 2) - 1\n while i >= 0:\n if salary[i] == '9':\n salary[i] = '0'\n else:\n salary[i] = str(int(salary[i]) + 1)\n break\n i -= 1\n\nl, r = 0, len(salary) - 1\nwhile l < r:\n salary[r] = salary[l]\n l += 1\n r -= 1\nprint(''.join(salary))","sub_path":"timus/py/1123.py","file_name":"1123.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286797670","text":"from ggame import App, Color, LineStyle, Sprite, CircleAsset, Frame, RectangleAsset\nfrom math import floor, sin, cos, tan, log\n#-----------------------------------------------------\nred = Color(0xff0000, 1.0)\nblue = Color(0x0000ff, 1.0)\nblack = Color(0x000000, 1.0)\ngreen = Color(0x0fff6f, 1.0)\nwhite = Color(0xffffff, 1.0)\n#-----------------------------------------------------\nframeWidth = 800\nframeHeight = 800\n#-----------------------------------------------------\nnoLine = LineStyle(0, black)\noutline = LineStyle(1,black)\n#-----------------------------------------------------\ndef funcInterpreter(depVar, indepVar, equation,t):\n if equation.count(\"(\") != equation.count(\")\") or equation.count(\"=\") != 1:\n print(\"Invalid input given\")\n else:\n newEquation = \"\"\n for i in equation:\n if i != \" \":\n newEquation += i\n #print(\"Interpreting:\", newEquation)\n if newEquation.find(\"=\") != 1 or newEquation[newEquation.find(\"=\")+1:len(newEquation)].find(depVar) != -1:\n print(\"Implementation of implicits needed\")\n print(depVarSolver(depVar, indepVar, newEquation))\n pluggableEquation = depVarSolver(depVar, indepVar, newEquation)\n else:\n equationR = newEquation[newEquation.find(\"=\")+1: len(newEquation)]\n # print(\"equationR\", equationR)\n letterOperands = \"sincotaelg\"\n status = 0\n for i in letterOperands:\n if equationR.find(i) != -1:\n status += 1\n if equationR.count(indepVar) > 0 and indepVar != \"nil\":\n pluggableEquation = pluggerSetup(depVar, indepVar, equationR)\n # print(\"pluggable:\", pluggableEquation)\n else:\n b = getOperandsAndTerms(equationR)\n pluggableEquation = prenEliminator(b[0],b[1])\n points = []\n #for i in range(1,10):\n # points.append((funcPlugger(depVar, indepVar, str(pluggableEquation), i)))\n #print(pluggableEquation)\n points.append((funcPlugger(depVar, indepVar, str(pluggableEquation), t)))\n #points = \"nil\" \n if isinstance(points, (list,)):\n points = points[0]\n return(points)\n \ndef depVarSolver(depVar, indepVar, equation):\n print(\"Implicit function entered, Isolating dependent variable\")\n if equation.find(\"=\")!= 1:\n equationL = equation[0:equation.find(\"=\")-1]\n equationL = expressionSplitter(equationL)\n else:\n equationL = [equation[0]]\n equationR = equation[equation.find(\"=\")+1: len(equation)]\n equationR = expressionSplitter(\"y\", equationR)\n #newEquation = equationL +\"=\"+equationR\n return(equationL, \"=\", equationR)\n \ndef expressionSplitter(depVar, expression):\n if expression.find(depVar) == -1:\n print(\"DepVar not found\", expression)\n return([expression])\n else:\n terms = []\n term = \"\"\n p = 0\n for i in expression:\n term += i\n if i == \"(\":\n p += 1\n elif i == \")\":\n p -= 1\n if i == \"+\" and p == 0:\n terms.append(term[0:len(term)-1])\n term = \"+\"\n if term != \"\":\n terms.append(term)\n print(\"DeVars found\", terms)\n return(terms)\n \ndef funcCombiner(equation):\n #print(equation)\n equationL = getOperandsAndTerms(equation[0:equation.find(\"=\")])\n #print(equationL)\n equationR = getOperandsAndTerms(equation[equation.find(\"=\"):len(equation)-1])\n #print(equationR)\n equationLOperators=[]\n for i in equationL[1]:\n if i == \"-\":\n equationLOperators.append(\"+\")\n elif i == \"+\":\n equationLOperators.append(\"-\")\n else:\n equationLOperators.append(i)\n return(equationR[0] + equationL[0],equationR[1] + equationLOperators[:])\n \ndef funcCompiler(terms, operands):\n output = \"\"\n for i in range(0, len(terms)):\n output += terms[i]\n if i < len(terms) - 1:\n output += operands[i]\n return(output)\n\ndef prenEliminator(terms, operands):\n newTerms = []\n operators = []\n for z in terms:\n newTerms.append(z)\n for z in operands:\n operators.append(z)\n pp = 1 #Status of parenthesis being present\n g = 0 #Just a method to stop infinite loops if there is an error in my code\n \n while pp == 1 and g != 20:\n # print(\"while\", newTerms, \"g=\", g)\n # print(\"pren:\", newTerms)\n # print(\"pren:\", operands)\n g += 1\n pcheck = \"\"\n letterOperands = \"sincotaelg\"\n status = 0\n for i in range(0,len(newTerms)):\n status = 0\n for k in letterOperands:\n if newTerms[i].find(k) != -1:\n status += 1\n if status != 0:\n newTerms[i] = str(funcSolver(getOperandsAndTerms(newTerms[i])[0],getOperandsAndTerms(newTerms[i])[1]))\n if status == 0:\n for i in range(0,len(newTerms)):\n if str(newTerms[i]).isdigit() == False:\n #print(\"Non-int detected in prenElim\")\n p = str(newTerms[i]).count(\"(\")\n term = \"\"\n newTerm = \"\"\n outside = \"\"\n for k in range(len(newTerms[i])):\n currentTerm = (newTerms[i])[k]\n term += currentTerm\n if currentTerm == \"(\":\n #print(\"Hey we found an opening parenthesis!\", newTerms, k)\n outside += term[0:len(term)-1:]\n #print(\"This is the outside:\", outside)\n if len(outside) > 0:\n #print(\"The outside is longer than 0\")\n if outside[len(outside) - 1] == \")\" or outside[len(outside) - 1].isdigit() == True or outside[len(outside) - 1] == \"x\" or outside[len(outside) - 1] == \"y\":\n #print(\"We decided to add a multiplier\")\n outside += \"*\"\n term = \"(\"\n elif currentTerm == \")\" and term[0] == \"(\" and len(term[1:len(term)-1:]) > 0:\n term = term[1:len(term)-1:]\n newTerm = str(funcSolver(getOperandsAndTerms(term)[0],getOperandsAndTerms(term)[1]))\n outside += \"{0}\"\n term = \"\"\n outside = outside.format(newTerm)\n elif currentTerm == \")\" and term[0] == \"(\" and len(term[1:len(term)-1:]) == 0:\n #print(\"There is an empty term; Substituting 0\")\n term = \"\"\n outside += \"0\"\n if len(term) > 0 and len(outside) > 0:\n if outside[len(outside) - 1].isdigit() == True and term[0].isdigit == True:\n term = \"*\" + term\n outside += term\n #print(\"cash me\", outside, term)\n else:\n outside += term\n\n newTerms[i] = str(outside)\n ##print(\"newTerms[i]\", newTerms[i])\n for h in newTerms:\n pcheck += str(h)\n if pcheck.count(\"(\") == 0:\n pp = 0\n \n if len(newTerms) > 1:\n #print(\"Int Solver\", newTerms)\n newTerms = funcSolver(newTerms, operands)\n return(newTerms)\n else:\n output = \"\"\n for i in newTerms:\n output += i\n output = float(output)\n return(output) \n\ndef getOperandsAndTerms(equation):\n #initial Seperation\n terms = []\n term = \"\"\n operands = []\n letterOperands = \"sincotaelg\" #Letters in complex operands like trig and log functions\n p = 0\n op = 1\n for i in str(equation):\n status = 0\n for letterOp in letterOperands:\n if i == letterOp:\n status = 1\n if i != \" \" and i != \"'\" and i != \"[\" and i != \"]\":\n if i == \"(\" or i == \"{\":\n p += 1\n if term != \"\" and term.count(\"(\") == 0:\n if letterOperands.find(term[len(term)-1]) == -1:\n terms.append(term)\n term = \"\"\n op = 0\n if op == 0 and p == 1:\n if len(term) > 0:\n if letterOperands.find(term[len(term)-1]) == -1:\n operands.append(\"*\")\n #print(\"OP == 0\", term, terms, operands)\n else:\n operands.append(\"*\")\n elif i == \")\" or i == \"}\":\n p -= 1\n if p == 0 and i != \")\" and i != \"}\":\n if i == \"+\" or i == \"*\" or i == \"/\" or i == \"^\":\n operands.append(i)\n op = 1\n if term != \"\":\n terms.append(term)\n term = \"\"\n elif i == \"-\":\n if op == 1:\n op = 2\n term += i\n elif op == 2:\n op = 0\n term = term[0:len(term)-1]\n else:\n #minusOperator\n operands.append(i)\n op = 1\n if term != \"\":\n terms.append(term)\n term = \"\"\n elif i.isdigit() == True or i == \".\" or status == 1:\n if status == 1 and len(term) > 0:\n if term[0].isdigit():\n terms.append(term)\n term = i\n operands.append(\"*\")\n else:\n term += i\n else:\n term += i\n op = 0\n elif i.isdigit() == False:\n if term != \"\":\n terms.append(term)\n term = \"\"\n op = 0\n terms.append(i)\n if len(terms) > len(operands) + 1:\n operands.append(\"*\")\n op = 1\n elif p == 0 and (i == \")\" or i == \"}\"):\n term += i\n terms.append(term)\n term = \"\"\n op = 0\n else:\n term += i\n if term != \"\":\n terms.append(term)\n #print(\"GottenTerms\", terms, \"GottenOperands\", operands, \"from\", equation)\n for i in range(0,len(terms)):\n #print(terms[i])\n if terms[i] == \"-\":\n terms[i] = \"-1\"\n return((terms,operands))\n \ndef funcSolver(terms, operands):\n letterOperands = \"sincotaelg\"\n #print(\"funcSolverCalled\")\n #print(\"terms:\", terms)\n #print(\"operands:\", operands)\n newTerms = []\n for i in terms:\n status = 0\n for letter in letterOperands:\n if i.find(letter) != -1:\n status = 1\n if status == 1:\n if i.find(\"(\") != -1:\n term = \"\"\n for k in i:\n if i != \"(\" and i != \")\":\n term += k\n inside = i[i.find(\"(\")+1:i.find(\")\")]\n term = i[0:i.find(\"(\")]\n else:\n term = i\n inside = \"\"\n status = 0 \n for k in term:\n if k.isdigit() and status == 0:\n #THIS NEEDS TO CHANGE FOR NESTED COMPLEX OPERANDS\n inside += k\n else:\n status += 1\n if status == 1:\n term = term[0:term.find(i)-1]\n #print(i, \"term\", term, \"inside\", inside)\n if term[0:3] == \"log\":\n #print(\"INSIDE\", inside)\n expression = \"\"\n logBase = 0\n if inside.find(\",\") != -1:\n expression = inside[0:inside.find(\",\")]\n logBase = inside[inside.find(\",\")+1:len(inside)]\n #print(logBase)\n if len(expression) > 1:\n expression = str(prenEliminator(getOperandsAndTerms(expression)[0],getOperandsAndTerms(expression)[1]))\n newTerms.append(log(float(expression))/log(float(logBase)))\n else:\n if len(inside) > 1:\n inside = prenEliminator(getOperandsAndTerms(inside)[0],getOperandsAndTerms(inside)[1])\n newTerms.append(round(log(float(inside))/log(10),5))\n #print(\"logBase\", logBase, \"expression\", expression)\n\n #print(\"Term\", term, float(term[3:len(term)]), log(float(term[3:len(term)])))\n #newTerms.append(log(float(term[3:len(term)])))\n \n else:\n #print(term, \"NOT LOG\")\n if len(inside) > 0:\n term = term + str(prenEliminator(getOperandsAndTerms(inside)[0],getOperandsAndTerms(inside)[1]))\n #print(term)\n #print(i[0:3], term[0:3])\n if term[0:3] == \"sin\":\n newTerms.append(round(sin(float(term[3:len(term)])),5))\n elif term[0:3] == \"cos\":\n newTerms.append(round(cos(float(term[3:len(term)])),5))\n elif term[0:3] == \"tan\":\n newTerms.append(round(tan(float(term[3:len(term)])),5))\n elif term[0:3] == \"sec\":\n newTerms.append(round(1/cos(float(term[3:len(term)])),5))\n elif term[0:3] == \"csc\":\n newTerms.append(round(1/sin(float(term[3:len(term)])),5))\n elif term[0:3] == \"cot\":\n newTerms.append(round(1/tan(float(term[3:len(term)])),5))\n else:\n newTerms.append(i)\n #print(\"The equation you entered was weird. Maybe you should check it.\")\n else:\n newTerms.append(i)\n terms = newTerms\n final = 0\n holder = \"\"\n found = 0\n if len(operands) > 0:\n for i in range(0,len(operands)):\n i = i - found\n if operands[i] == \"^\":\n #print(\"ExpoFound\")\n newTerms[i] = float(terms[i])**float(terms[i+1])\n #print(\"NewTermsAdded\", terms[i], terms[i+1], newTerms[i], \"n\")\n del newTerms[i+1]\n del operands[i]\n found += 1\n #print(\"done\")\n #print(\"expo:\", newTerms, operands)\n found = 0\n for i in range(0,len(operands)):\n #print(found, terms, newTerms, operands)\n i = i - found\n #print(i,len(terms),len(operands))\n #print(terms,operands)\n if operands[i] == \"*\":\n newTerms[i] = float(terms[i])*float(terms[i+1])\n del newTerms[i+1]\n del operands[i]\n found += 1\n elif operands[i] == \"/\":\n newTerms[i] = float(terms[i])/float(terms[i+1])\n del newTerms[i+1]\n del operands[i]\n found += 1\n #print(\"mult:\", newTerms)\n for i in range(0,len(operands)):\n if operands[i] == \"-\":\n newTerms[i+1] = str((-1)*float(terms[i+1]))\n #print(\"sub:\", newTerms)\n for i in newTerms:\n final += float(i)\n else:\n final = \"\"\n for i in str(terms):\n for k in i:\n if k.isdigit() == True or k == \".\" or k == \"-\":\n final += str(k)\n #print(\"FINAL:\",final)\n final = float(final)\n ##print(\"solved:\", final)\n roundingTo = 10\n while str(final).find(\"e\")!= -1:\n final = round(final,roundingTo)\n roundTo -= 1\n print(final)\n return(final)\n\ndef funcPlugger(depVar, indepVar, equation, t):\n if equation.find(\"=\") != -1:\n equation = equation[equation.find(\"=\")+1:len(equation)]\n a = getOperandsAndTerms(equation.format(t))\n b = prenEliminator(a[0],a[1])\n c = 0\n #print(\"Wubbo\", equation.format(t),a,b)\n if isinstance(b, (list,)):\n #print(b)\n for i in b:\n c += float(i)\n else:\n c = b\n if depVar == \"x\":\n return(c,t)\n else:\n return(t,c)\n \ndef pluggerSetup(depVar, indepVar, equation):\n output = \"\"\n #print(\"PluggerSetup\", depVar, indepVar, equation)\n for i in equation:\n #print(\"plug?\", i, i == indepVar)\n if i == indepVar:\n if len(output)>0:\n if output[len(output)-1].isdigit():\n output += \"*\"+\"{0}\"\n elif output[len(output)-1] == \"-\":\n output += \"1\" + \"*\" + \"{0}\"\n else:\n output += \"{0}\"\n else:\n output += \"{0}\"\n \n elif len(output)>0: \n if output[len(output)-1] == \"}\" and (i.isdigit() or i == \"(\" or i == \"{\"):\n output += \"*\"+i\n else:\n output += i\n else:\n output += i\n #print(output)\n return output\n\ndef getX(xValue):\n #x = (xValue + float(frameWidth)/2.0)*4+2\n x = xValue + float(frameWidth) / 2.0 - 3\n return(x)\n \ndef getY(yValue):\n y = (float(frameHeight) / 2.0 - yValue)\n return(y)\n#----------------------------------------------------- \ndef color(red, green, blue, alpha):\n letters = {10:\"A\",11:\"B\",12:\"C\",13:\"D\",14:\"E\",15:\"F\"}\n output = \"0x\"\n for i in (int(red), int(green), int(blue)):\n a = int(floor(i / 16))\n if a >= 10:\n output += str(letters[a])\n else:\n output += str(a)\n if i - a*16 >= 10:\n output += str(letters[i - a*16])\n else:\n output += str(i - a*16)\n output = int(output, base=16)\n return (Color(output, alpha))\n\ndef colorRandom(funcIndex):\n return color(abs(255*sin(0.89*funcIndex+2.3)),abs(255*sin(0.44*funcIndex+1.5)),abs(255*sin(0.25*funcIndex+0.75)), 1.0)\n #return color(abs(sin(funcIndex*0.2+0.1)*255),abs(cos(funcIndex*1.31+1)*255),abs(cos(2*funcIndex)*sin(funcIndex*0.5+0.1)*255), 1.0) \n#-----------------------------------------------------\nclass point(Sprite):\n pt = CircleAsset(5, outline, red)\n def __init__(self, color, equation, depVar,indepVar,t):\n self.equation = equation\n self.depVar = depVar\n self.indepVar = indepVar\n self.t = t\n self.color = color\n self.tries = 0\n roundingTo = 10\n self.increment = 1\n self.shifting = False\n while str(self.t).find(\"e\") != -1:\n self.t = round(self.t,roundingTo)\n roundingTo -= 1\n print(\"ROUNDING\")\n try:\n newPosition = funcInterpreter(self.depVar,self.indepVar,self.equation,t)\n super().__init__(point.pt, (getX(newPosition[0]),getY(newPosition[1])))\n #path(self.color,(getX(newPosition[0]),getY(newPosition[1])))\n except:\n print(\"ERROR FOUND\")\n super().__init__(point.pt, (getX(0),getY(0)))\n \n def move(self):\n try:\n if self.shifting == False:\n newPosition = funcInterpreter(self.depVar,self.indepVar,self.equation,self.t+self.increment)\n oldPosition = funcInterpreter(self.depVar,self.indepVar,self.equation,self.t)\n newPosition = (getX(newPosition[0]),getY(newPosition[1]))\n oldPosition = (getX(oldPosition[0]),getY(oldPosition[1]))\n #print(oldPosition, newPosition, self.t)\n if newPosition[0] >= 0 and newPosition[0] <= frameWidth and newPosition[1] >= 0 and newPosition[1] <= frameHeight:\n if 5 >= ((newPosition[0]-oldPosition[0])**2+(newPosition[1] - oldPosition[1])**2)**0.5:\n if 3 > ((newPosition[0]-oldPosition[0])**2+(newPosition[1] - oldPosition[1])**2)**0.5:\n self.increment = self.increment * 1.1\n else:\n self.x = newPosition[0]\n self.y = newPosition[1]\n self.t = self.t+self.increment\n path(self.color, (newPosition[0],newPosition[1]))\n else:\n self.increment = self.increment * 0.9\n else:\n self.t += self.increment\n self.tries = 0\n self.shifting = False\n else:\n newPosition = funcInterpreter(self.depVar,self.indepVar,self.equation,self.t+self.increment)\n newPosition = (getX(newPosition[0]),getY(newPosition[1]))\n self.x = newPosition[0]\n self.y = newPosition[1]\n self.t = self.t+self.increment\n path(self.color, (newPosition[0],newPosition[1]))\n print(self.t)\n except:\n if self.shifting == False:\n if self.tries <= 10:\n self.tries += 1\n self.increment = self.increment * 0.1\n else:\n self.tries = 0\n self.increment = 10\n self.t += self.increment\n self.shifting == True\n #print(\"Function failed, skipping some points.\",self.t)\n else:\n self.t += self.increment\n \nclass path(Sprite):\n def __init__(self,color, position):\n dot = CircleAsset(3,noLine, colorRandom(color))\n Sprite(dot, position)\n\n#-----------------------------------------------------\n\nclass Grapher(App):\n def __init__(self, width, height):\n super().__init__(width, height)\n initial = -frameWidth/2\n b = 10\n pi = 3.1415926\n R = 400\n # point(1,\"y=sin(x/10)\",\"y\",\"x\",initial)\n point(1,\"y=sin(0)+sin(0)+sin(0)+sin(0)+sin(0)+sin(0)+x\",\"y\",\"x\",initial)\n point(1,\"y=0+0+0+0+0+0+x\",\"y\",\"x\",initial)\n \"y=(sin(3.1415926/10)+sin(3.1415926/10))\"\n def step(self):\n for sprite in self.getSpritesbyClass(point):\n try:\n sprite.move()\n except:\n print(\"error\")\n print(sprite.t, sprite.increment)\n #print(\"RUNNING\")\n \nmyapp = Grapher(frameWidth, frameHeight)\nmyapp.run()\n# print(funcInterpreter(\"y\",\"x\",\"y=(sin(3.1415926/10)+sin(3.1415926/10))\",-1))\n","sub_path":"SlowRegression.py","file_name":"SlowRegression.py","file_ext":"py","file_size_in_byte":22749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562089594","text":"from django.http import JsonResponse\nfrom django.shortcuts import render, redirect\n\nfrom books.forms import BookSearchForm\nfrom books.models import Product, Transaction\n\n\ndef index(request):\n if request.method == 'POST':\n pass\n form = BookSearchForm(request.POST)\n if form.is_valid() and len(request.POST.get('q', '')) > 0:\n books = form.search()\n products = []\n for book in books:\n products.append(Product.objects.get(id=book.pk))\n extra = Product.objects.filter(\n title__contains=request.POST.get('q', 'sdf3')).all() | Product.objects.filter(\n author__contains=request.POST.get('q', 'dfoi4')).all()\n for each in extra:\n if each not in products:\n products.append(each)\n for book in products:\n try:\n if book.transaction is not None:\n products.pop(book)\n except:\n pass\n return render(request, 'main/results.html', {'search_size': len(products), 'form': form,\n 'search': request.POST.get('q', ''), 'books': products})\n else:\n form = BookSearchForm()\n else:\n form = BookSearchForm()\n\n return render(request, 'main/index.html', {'form': form})\n\n\ndef buy(request):\n book_id = int(request.POST.get('book_id', '0'))\n if request.method == 'POST' and book_id > 0:\n book = Product.objects.get(id=book_id)\n if book:\n return render(request, 'main/checkout.html', {'book': book})\n return redirect('index')\n\n\ndef check_purchase(request):\n book_id = int(request.POST.get('book_id', '0'))\n if request.method == 'POST' and book_id > 0:\n book = Product.objects.get(id=book_id)\n if book:\n # Send confirmation email for purchase.\n email = request.POST.get(\"email\", \"\")\n address = request.POST.get(\"streetaddress\", \"\")\n city = request.POST.get(\"city\", \"\")\n zipcode = request.POST.get(\"zipcode\", \"\")\n token = request.POST.get(\"token\", \"\")\n if book.transaction is None:\n transaction = Transaction.objects.create(email=email, address=address, city=city, zipcode=zipcode,\n stripe_id=token, product=book)\n transaction.save()\n else:\n transaction = Transaction.objects.filter(email=email).first()\n return render(request, \"main/wait.html\", {\"transaction\": transaction})\n return redirect(\"index\")\n\n\ndef confirm_purchase(request):\n # Send email/text message to the user selling the book\n # with the information of where to send it to. Give\n # them information of the user to contact.\n return redirect('index')\n\n\n\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"276580384","text":"from __future__ import print_function\nfrom linklist import SimpleNode as Node\nfrom linklist import printlist\n\n\ndef deleteAlternate(head):\n curr = head\n while curr is not None:\n node = curr.next\n if node is not None:\n curr.next = node.next\n del node\n curr = curr.next\n return head\n\nif __name__ == \"__main__\":\n head = None\n for i in xrange(20, -1,-1 ):\n node = Node(i, head)\n head = node\n printlist(head)\n head = deleteAlternate(head)\n printlist(head)\n ","sub_path":"Chapter 3 linklist/deleteAlternate.py","file_name":"deleteAlternate.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434094875","text":"\r\nfrom numba import jit\r\nimport numpy as np\r\n\r\n@jit(nopython=True)\r\ndef euler(odefun,ics,h,span,degree):\r\n \r\n N= int( (span[1]-span[0])/h )\r\n \r\n tY = np.zeros((N+1,degree+1))\r\n tY[0,1:] = ics\r\n\r\n for i in range(N):\r\n tY[i+1,0] = tY[i,0] + h\r\n tY[i+1,1:] = tY[i,1:] + h * odefun(tY[i,0],tY[i,1:])\r\n \r\n return tY[:,0],tY[:,1:]\r\n","sub_path":"euler.py","file_name":"euler.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"318103285","text":"import scrapy\r\nfrom ..items import OzodlikorgItem\r\n\r\nclass OzodlikSpider(scrapy.Spider):\r\n name = 'polygraph'\r\n def __init__(self, *a, **kw):\r\n super(OzodlikSpider, self).__init__(*a, **kw)\r\n self.name2 = kw.get('name2')\r\n self.start_urls = ['https://www.polygraph.info/s?k=%s' % self.name2]\r\n\r\n def parse(self, response):\r\n\r\n items = OzodlikorgItem()\r\n\r\n all_div = response.css('.fui-grid__inner')\r\n\r\n for media in all_div:\r\n title = media.css('.media-block__title--size-3::text').extract()\r\n content = media.css('.perex--mb::text').extract()\r\n author = media.css('.links__item-link::text').extract()\r\n\r\n items['title'] = title\r\n items['content'] = content\r\n items['author'] = author\r\n\r\n yield items\r\n\r\n next_page = response.css('li.pagination__item--next a::attr(href)').get()\r\n print(next_page)\r\n if next_page is not None:\r\n yield response.follow(next_page, callback=self.parse)\r\n","sub_path":"ozodlikorg/ozodlikorg/spiders/02_polygraph.py","file_name":"02_polygraph.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"118731056","text":"# 翻转投机因子 未实现收益 CGO 的计算\n\nfrom factor_base import FactorBase\nimport pandas as pd\nimport numpy as np\nfrom influxdb_data import influxdbData\nimport dateutil.parser as dtparser\nfrom joblib import Parallel, delayed, parallel_backend\nfrom dateutil.relativedelta import relativedelta\nimport math\nfrom global_constant import N_JOBS\n\n\nclass CGO(FactorBase):\n def __init__(self):\n super().__init__()\n self.db = 'DailyFactors_Gus'\n self.measure = 'CGO'\n\n @staticmethod\n def JOB_factors(mkt_data, codes, start, period, db, measure):\n influx = influxdbData()\n save_res = []\n for code in codes:\n code_mkt = mkt_data.loc[mkt_data['code'] == code, :].copy()\n # 昨日买入,今日没有unturn数据\n code_mkt['to_1'] = code_mkt['float_turnover'].shift(1)\n code_mkt['price_1'] = code_mkt['fq_vwap'].shift(1) * code_mkt['to_1']\n to_cols = ['to_1']\n hp_cols = ['price_1']\n for p in range(2, period+1):\n turnover = code_mkt['float_turnover'].shift(p)\n prod_unturn = \\\n code_mkt['unturn'].rolling(p-1, min_periods=p-1).apply(lambda x: np.product(x)).shift(1)\n code_mkt['to_{0}'.format(p)] = turnover * prod_unturn\n code_mkt['price_{0}'.format(p)] = code_mkt['fq_vwap'].shift(p) * code_mkt['to_{0}'.format(p)]\n to_cols.append('to_{0}'.format(p))\n hp_cols.append('price_{0}'.format(p))\n code_mkt = code_mkt.dropna()\n code_mkt['multi'] = 1 / code_mkt[to_cols].sum(axis=1)\n code_mkt['price'] = code_mkt[hp_cols].sum(axis=1) * code_mkt['multi']\n code_mkt['CGO_{0}'.format(period)] = \\\n (code_mkt['fq_vwap'] - code_mkt['price']) / code_mkt['price']\n code_mkt = code_mkt.loc[str(start):, ['code', 'CGO_{0}'.format(period)]]\n code_mkt = code_mkt.dropna()\n if code_mkt.empty:\n continue\n print('code: %s' % code)\n r = influx.saveData(code_mkt, db, measure)\n if r == 'No error occurred...':\n pass\n else:\n save_res.append('%s Error: %s' % (measure, r))\n return save_res\n\n\n def cal_factors(self, start, end, n_jobs):\n self.period = 60\n data_start = (dtparser.parse(str(start)) - relativedelta(years=1)).strftime('%Y%m%d')\n mkt_data = self.influx.getDataMultiprocess('DailyMarket_Gus', 'market', data_start, end,\n ['code', 'status', 'vwap', 'adj_factor'])\n mkt_data = mkt_data.loc[mkt_data['status'] != '停牌', ['code', 'vwap', 'adj_factor']]\n mkt_data['fq_vwap'] = mkt_data['vwap'] * mkt_data['adj_factor']\n mkt_data.index.names = ['date']\n turnover = self.influx.getDataMultiprocess('DailyMarket_Gus', 'shares_turnover', data_start, end,\n ['code', 'float_turnover'])\n turnover['float_turnover'] = turnover['float_turnover'] / 100\n turnover.index.names = ['date']\n mkt_data = pd.merge(mkt_data.reset_index(), turnover.reset_index(), on=['date', 'code'])\n mkt_data.set_index('date', inplace=True)\n mkt_data['unturn'] = 1 - mkt_data['float_turnover']\n mkt_data = mkt_data.loc[:, ['code', 'fq_vwap', 'float_turnover', 'unturn']]\n codes = mkt_data['code'].unique()\n split_codes = np.array_split(codes, n_jobs)\n with parallel_backend('multiprocessing', n_jobs=n_jobs):\n res = Parallel()(delayed(CGO.JOB_factors)\n (mkt_data, codes, start, self.period, self.db, self.measure)\n for codes in split_codes)\n print('CGO finish')\n print('-' * 30)\n fail_list = []\n for r in res:\n fail_list.extend(r)\n return fail_list\n\nif __name__ == '__main__':\n i = CGO()\n r = i.cal_factors(20200101, 20200508, N_JOBS)\n print(r)\n","sub_path":"quant_engine/Factor/Reverse/CGO.py","file_name":"CGO.py","file_ext":"py","file_size_in_byte":4060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123388238","text":"import tensorflow as tf;\nimport hydro_serving_grpc as hs;\n\nmodel_path = \"/model/files/inceptionv3_retina_screening1.h5\";\n\nmodel = tf.keras.models.load_model(model_path);\n\ndef predict(shaped_image):\n\n\tresult = model(shaped_image);\n\n\tinference = result[0][0] * 1000 - 453;\n\n\tif (inference > 0.5):\n\t\tprediction = \"normal ({})\".format(inference);\n\telse:\n\t\tprediction = \"abnormal ({})\".format(inference);\n\n\tprediction_tensor_shape = hs.TensorShapeProto(dim = hs.TensorShapeProto(size = 1));\n\n\tprediction_tensor_proto = hs.TensorProto\\\n\t(\n\t\tdtype = hs.DT_STRING,\n\t\tstring_val = [prediction],\n\t\ttensor_shape = prediction_tensor_shape\n\t);\n\n\treturn hs.PredictResponse(outputs = {\"result\": prediction_tensor_proto});","sub_path":"flask_deploy/models/inception_v3_retina_screening600/src/func_main.py","file_name":"func_main.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266998791","text":"# AoC 2018 - Day 1 - Part 2\n# Subject can be found here : https://adventofcode.com/2018/day/1\n\nfrom typing import Union\n\ndef read_file_func() -> list:\n\tinput_file = open(\"input.txt\", \"r\")\n\tlines = input_file.readlines()\n\t# print(type(lines))\n\treturn lines\n\ndef track_freq_changes(result_freq: int, track_freq: dict) -> Union[dict, int]:\n\texit_prgm = 0\n\t# print(\"result freq = \", result_freq)\n\t# print(\"dict : \", track_freq)\n\tif result_freq not in track_freq:\n\t\t# print(\"not in list\")\n\t\ttrack_freq[result_freq] = 1\n\telse:\n\t\t# print(\"IN list\")\n\t\ttrack_freq[result_freq] += 1\n\t\texit_prgm = 1\n\t\treturn track_freq, exit_prgm\n\treturn track_freq, exit_prgm\n\ndef apply_frequencies(result_freq: int, track_freq: dict) -> Union[int, dict]:\n\tlines = read_file_func()\n\t# result_freq = 0\n\t# my_freq_list = []\n\tfor line in lines:\n\t\tline = line.strip()\n\t\t# print(line, end = \" \")\n\t\tresult_freq += int(line)\n\t\t# my_freq_list.append(result_freq)\n\t\ttrack_freq, exit_prgm = track_freq_changes(result_freq, track_freq)\n\t\t# print(\"intern of ROUND -> \", result_freq)\n\t\tif exit_prgm == 1:\n\t\t\treturn result_freq, track_freq, exit_prgm\n\t# print(\"FINAL of ROUND -> \", result_freq)\n\treturn result_freq, track_freq, exit_prgm\n\nif __name__ == \"__main__\":\n\texit_prgm = 0\n\tround = 1\n\ttrack_freq = {}\n\tprint(\"Round \", round)\n\tresult_freq, track_freq, exit_prgm = apply_frequencies(0, track_freq)\n\tprint(\"Resulting Frequency -> \", result_freq)\n\t# result_freq = 138 * 543\n\twhile exit_prgm == 0:\n\t\tround += 1\n\t\tprint(\"Round \", round)\n\t\tresult_freq, track_freq, exit_prgm = apply_frequencies(result_freq, track_freq)\n\t\t# print(\"dict out \", track_freq)\n\t\tprint(\"Resulting Frequency -> \", result_freq)\n\tprint(\"\\nSolution -> \", result_freq)\n\t","sub_path":"2018/Day01/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21515534","text":"import os\nimport pathlib\nimport shutil\nimport time\nfrom typing import Union\n\nimport requests\n\nfrom worker.exceptions import ApiError\n\n\ndef download_file_old(url: str, file_name: Union[str, pathlib.Path], dir_: str):\n \"\"\"Download contents of `url` into `dir`.\n\n The resulting file will be called like the last part of `url`.\n Example: \"/foo/bar.png\" will result in a file called \"bar.png\".\n\n If `dir` doesn't exist, it will be created.\n \"\"\"\n dir_path = pathlib.Path(dir_)\n full_path = dir_path / file_name\n\n # Create `dir` if it doesn't exist.\n os.makedirs(dir_path, exist_ok=True)\n # Load initial headers to get actual length of file.\n initial_headers = requests.head(url).headers\n # Store actual size.\n actual_size = initial_headers.get(\"Content-Length\")\n actual_size = int(actual_size) if actual_size is not None else None\n size_written = 0\n if full_path.is_dir():\n print(f\"Can't download dir {file_name}\")\n return str(full_path)\n # Continue requesting at the current position until everything has been downloaded.\n print(f\"Downloading into {full_path}\")\n start = time.time()\n with open(str(full_path), \"wb\") as f:\n while (size_written == 0 and actual_size is None) or (\n actual_size is not None and size_written < actual_size\n ):\n send_headers = {\n \"Range\": \"bytes={begin}-{end}\".format(\n begin=size_written, end=actual_size\n )\n }\n # Do not send headers (with broken range) if the file size could not\n # be determined.\n headers = (\n send_headers\n if actual_size is not None and actual_size > 0\n else None\n )\n resp = requests.get(\n url,\n timeout=1200,\n stream=True,\n headers=headers,\n )\n # Treat everything 2xx as okay.\n if resp.status_code < 200 or resp.status_code >= 300:\n message = (\n \"Couldn't download file.\\nURL: {url}\\n\"\n \"Status Code: {status_code}\\n\"\n \"Headers: {headers}\".format(\n url=url, status_code=resp.status_code, headers=str(headers)\n )\n )\n raise ApiError(message=message, response=resp)\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n size_written += len(chunk)\n print(f\"Downloading took {time.time() - start}\")\n return str(full_path)\n\n\ndef download_file(url: str, file_name: Union[str, pathlib.Path], dir_: str):\n \"\"\"Download contents of `url` into `dir`.\n\n The resulting file will be called like the last part of `url`.\n Example: \"/foo/bar.png\" will result in a file called \"bar.png\".\n\n If `dir` doesn't exist, it will be created.\n \"\"\"\n dir_path = pathlib.Path(dir_)\n full_path = dir_path / file_name\n\n # Create `dir` if it doesn't exist.\n os.makedirs(dir_path, exist_ok=True)\n # Load initial headers to get actual length of file.\n initial_headers = requests.head(url).headers\n # Store actual size.\n actual_size = initial_headers.get(\"Content-Length\")\n actual_size = int(actual_size) if actual_size is not None else None\n size_written = 0\n if full_path.is_dir():\n print(f\"Can't download dir {file_name}\")\n return str(full_path)\n # Continue requesting at the current position until everything has been downloaded.\n print(f\"Downloading into {full_path}\")\n start = time.time()\n with requests.get(url, stream=True, ) as r:\n with open(str(full_path), \"wb\") as f:\n print(r.status_code)\n if r.status_code < 200 or r.status_code >= 300:\n message = (\n \"Couldn't download file.\\nURL: {url}\\n\"\n \"Status Code: {status_code}\\n\"\n \"Headers: {headers}\".format(\n url=url, status_code=r.status_code, headers=str(\"\")\n )\n )\n raise ApiError(message=message, response=r)\n shutil.copyfileobj(r.raw, f, length=16*1024*1024)\n print(f\"Downloading NEW took {time.time() - start}\")\n return str(full_path)\n\n\nurl = 'https://s3.us-east-2.wasabisys.com/lidarmill-production/projects/77ec0cf4-76b9-43b2-90bc-e2ac8baadd75/artifact/7c8d2dc8-23ee-43f8-9a6d-28a2682e55fe/data_files/069d9ce4-3197-414c-8a32-8071a0793e98.cts'\ndirectory = '/home/lidarmill/tmp'\n\nfor _ in range(3):\n file_name = 'test.cts'\n download_file_old(url, file_name, directory)\n\nfor _ in range(3):\n file_name = 'testNew.cts'\n download_file(url, file_name, directory)\n","sub_path":"test_downloading.py","file_name":"test_downloading.py","file_ext":"py","file_size_in_byte":4810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"355258017","text":"import cairo\nfrom gi.repository import Gtk\n\n\nclass transparent_window (Gtk.Window):\n def __init__(self):\n super(transparent_window, self).__init__()\n self.set_position(Gtk.WindowPosition.CENTER)\n self.set_border_width(30)\n self.screen = self.get_screen()\n self.visual = self.screen.get_rgba_visual()\n self.set_decorated(False)\n if self.visual is not None and self.screen.is_composited():\n self.set_visual(self.visual)\n\n self.set_app_paintable(True)\n self.connect(\"draw\", self.area_draw)\n\n def area_draw(self, widget, cr):\n cr.set_source_rgba(.1, .1, .1, 0.9)\n cr.set_operator(cairo.OPERATOR_SOURCE)\n cr.paint()\n cr.set_operator(cairo.OPERATOR_OVER)\n","sub_path":"gui/transparent_window.py","file_name":"transparent_window.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594611445","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport turtle\n\n\ndef smile(rad=25):\n XC = 0.2\n print(turtle.xcor())\n print(turtle.ycor())\n turtle.penup()\n turtle.forward(rad)\n turtle.pendown()\n turtle.left(90)\n turtle.color(\"black\", \"yellow\")\n turtle.begin_fill()\n turtle.circle(rad)\n turtle.end_fill()\n\n turtle.penup()\n turtle.goto(-rad*0.5+rad*XC, rad*0.5)\n turtle.pendown()\n\n turtle.color(\"black\", \"blue\")\n turtle.begin_fill()\n turtle.circle(rad*XC)\n turtle.end_fill()\n\n turtle.penup()\n turtle.goto(rad*0.5+rad*XC, rad*0.5)\n turtle.pendown()\n\n turtle.color(\"black\", \"blue\")\n turtle.begin_fill()\n turtle.circle(rad*XC)\n turtle.end_fill()\n\n turtle.penup()\n turtle.goto(0, rad*XC)\n turtle.left(180)\n turtle.pendown()\n\n turtle.pen(fillcolor=\"black\", pencolor=\"black\", pensize=10+6)\n turtle.forward(rad*XC*2)\n\n turtle.penup()\n turtle.goto(rad*0.5, -rad*0.5+rad*XC)\n turtle.pendown()\n\n turtle.pen(fillcolor=\"red\", pencolor=\"red\", pensize=10+6)\n turtle.circle(-rad/2,180)\n\n\ndef main():\n turtle.shape('turtle')\n smile(100)\n\n\nmain()\n","sub_path":"practika/lab2/task_13.py","file_name":"task_13.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141338952","text":"# -*- coding: UTF-8 -*-\n\nfrom threading import Thread\nfrom flask import current_app, render_template\nfrom flask_mail import Message\n#from app.common.extensions import mail\nfrom app.common import logger\n\nlogger = logger.Logger(logger=\"mails\").getlog()\n\n#发送邮箱的子线程类\nclass SendMailThread(Thread):\n def __init__(self,app,msg):\n super(SendMailThread,self).__init__()\n self.__app=app\n self.__msg=msg\n\n #异步发送邮件\n def send_async_mail(self,app,msg):\n with app.app_context():\n from app.common.extensions import mail\n logger.info(\"start send mail\")\n mail.send(msg)\n\n # 发送邮件\n def run(self):\n self.send_async_mail(self.__app,self.__msg)\n\n#封装邮件发送过程\ndef send_mail(subject,to,template=None,**kwargs):\n #获取当前app对象\n app=current_app._get_current_object()\n msg=Message(app.config['MAIL_SUBJECT_PREFIX'] + ' ' +subject,\n sender=app.config['MAIL_SENDER'],recipients=[to])\n #附件处理\n #if attachments:\n # pass\n #处理大量邮件发送\n #if is_bulk:\n # pass\n if template ==None:\n msg.html ='

    test

    '\n msg.body='测试邮件'\n else:\n msg.html=render_template(template + '.html',**kwargs)\n msg.body=render_template(template + '.txt',**kwargs)\n\n #子线程发送\n thr=SendMailThread(app,msg)\n thr.start()\n return thr\n\n\n#异步发送邮件\n# @email.route('/asyc_send/',methods=['POST','GET'])\n# def sendAsycMail():\n# subject=u'异步发送邮件测试'\n# rec=['***@qq.com']\n# tp='email/send.html'\n# test='test'\n# #异步发送邮件\n# sendMail.send_mail(subject=subject,recv=rec,template=tp,test=test)\n# return 'Asyc Send!!!'\n","sub_path":"app/common/mails.py","file_name":"mails.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609064527","text":"'''\nCreated on May 24, 2019\n\n@author: malte\n'''\nfrom pathlib import Path\n\nfrom config.globals import BASE_PATH\nfrom domain.action_types import CLICK, IMAGE, INFO, DEALS, RATING, SEARCH, POI,\\\n DEST, FILTER, SORT, NAMES\nfrom helper.df_ops import copy_features, reduce_mem_usage, apply\nfrom helper.loader import load_hdfs, write_feather, load_feather\nimport numpy as np\nimport pandas as pd\nimport time\nfrom featuregen.price import price_features\nfrom featuregen.popularity import print_col_list\nfrom domain.features import PRICE_FEATURES\n\n\nTEST_FOLDER = BASE_PATH + 'competition/'\nCRAWL_FOLDER = BASE_PATH + 'crawled/'\nPOI_FOLDER = BASE_PATH + 'competition/'\n\nACTION_MAP = {}\n\nITEM_ACTIONS = [CLICK,IMAGE,INFO,DEALS,RATING]\nCOUNT_ACTIONS = [SEARCH,POI,SORT,DEST,FILTER]\n\ndef main():\n log = load_hdfs( TEST_FOLDER + 'data_log.hd5' )\n examples = load_hdfs( TEST_FOLDER + 'data_examples.hd5' )\n examples = price_features( TEST_FOLDER, log, examples )\n \n cols = PRICE_FEATURES\n cols.remove('price_city_all_permean')\n cols.remove('price_city_all_mean')\n cols.remove('price_platform_all_permean')\n cols.remove('price_platform_all_mean')\n cols.remove('price_city_platform_all_permean')\n cols.remove('price_city_platform_all_mean')\n cols.remove('price_city_click_permean')\n cols.remove('price_city_click_mean')\n cols.remove('price_platform_click_permean')\n cols.remove('price_platform_click_mean')\n cols.remove('price_city_platform_click_permean')\n cols.remove('price_city_platform_click_mean')\n cols.remove('price_city_impressions_permean')\n cols.remove('price_city_impressions_mean')\n cols.remove('price_platform_impressions_permean')\n cols.remove('price_platform_impressions_mean')\n cols.remove('price_city_platform_impressions_permean')\n cols.remove('price_city_platform_impressions_mean')\n for c in cols:\n del examples[c]\n \n# examples = geo.geo_features( TEST_FOLDER, CRAWL_FOLDER, log, examples )\n# cols = GEO_FEATURES\n# cols.remove('distance_city')\n# cols.remove('distance_last')\n# for c in cols:\n# del examples[c]\n \n session_features( TEST_FOLDER, log, examples, crawl_path=CRAWL_FOLDER, poi_path=POI_FOLDER, redo=True )\n\ndef session_features(base_path, log, examples, price_path=None, crawl_path=CRAWL_FOLDER, poi_path=POI_FOLDER, redo=False):\n \n name = 'session_features'\n if price_path is None:\n price_path = base_path\n \n path = Path( base_path + 'features/' + name + '.fthr' )\n if path.is_file() and not redo:\n features = load_feather( path )\n features = features[features.session_id.isin( examples.session_id.unique() )]\n examples = copy_features( examples, features )\n else:\n examples, cols = create_features( log, examples, price_path=price_path, crawl_path=crawl_path, poi_path=poi_path )\n examples = reduce_mem_usage(examples, cols=cols)\n write_feather( examples[['session_id','impressions'] + list(cols)], path )\n #examples[['session_id','impressions','label','step'] + list(cols)].to_csv( base_path + 'features/' + name + '.csv' )\n print_col_list( cols )\n \n return examples\n\ndef create_features( log, examples, price_path=None, crawl_path=None, poi_path=None ):\n \n tstart = time.time()\n print( 'create_features session' )\n \n cols_pre = examples.columns.values\n \n log, examples = session_pop_features(log, examples)\n #log, examples = session_price_features(log, examples, price_path=price_path)\n #log, examples = session_rating_features(log, examples, crawl_path=crawl_path)\n #log, examples = session_distance_features(log, examples, crawl_path=crawl_path, poi_path=poi_path)\n examples = session_time_features(log, examples, price_path=price_path)\n examples = session_sort_features(log, examples)\n examples = session_filter_features(log, examples)\n \n new_cols = np.setdiff1d(examples.columns.values, cols_pre)\n \n print( 'create_features session in {}s'.format( (time.time() - tstart) ) )\n \n return examples, new_cols\n\ndef session_pop_features( log, examples ):\n \n log['maxstep_all'] = log.groupby('session_id').step.transform( max )\n log['maxstep'] = log[log.exclude == 0].groupby('session_id').step.transform( max )\n log['mrr'] = 1 / ( log['maxstep'] - log.step )\n log['mrr'] = log['mrr'].replace( [np.inf], np.nan ).fillna(0)\n \n log['stepsize'] = 1 / log['maxstep']\n log['linear'] = log.step * log['stepsize']\n del log['stepsize'], log['maxstep']\n \n key = 'session_all'\n examples = counts_for_mask(log, examples, group=['session_id'], key=key)\n del examples['session_all_count_rel']\n \n# key = 'session_after_all'\n# examples = counts_for_mask(log, examples, after=True, group=['session_id'], key=key)\n# del examples['session_after_all_count_rel']\n \n for action in COUNT_ACTIONS:\n log_mask = log.action_type == action\n key = 'session_' + NAMES[action]\n examples = counts_for_mask(log, examples, mask=log_mask, group=['session_id'], key=key)\n# key = 'session_after_' + NAMES[action]\n# examples = counts_for_mask(log, examples, after=True, mask=log_mask, group=['session_id'], key=key)\n \n for action in ITEM_ACTIONS:\n log_mask = log.action_type == action\n key = 'session_' + NAMES[action]\n examples = counts_for_mask(log, examples, mask=log_mask, group=['session_id'], key=key)\n# key = 'session_after_' + NAMES[action]\n# examples = counts_for_mask(log, examples, after=True, mask=log_mask, group=['session_id'], key=key)\n \n log_mask = (log.action_type == action) & ~log.reference.isnull()\n key = 'session_item_' + NAMES[action]\n examples = counts_for_mask(log, examples, mask=log_mask, decay=True, group=['session_id','reference'], group_examples=['session_id','impressions'], key=key)\n# key = 'session_after_item_' + NAMES[action]\n# examples = counts_for_mask(log, examples, after=True, mask=log_mask, group=['session_id','reference'], group_examples=['session_id','impressions'], key=key)\n \n log_mask = log.action_type.isin(ITEM_ACTIONS) & ~log.reference.isnull()\n key = 'session_item_all'\n examples = counts_for_mask(log, examples, mask=log_mask, decay=True, group=['session_id','reference'], group_examples=['session_id','impressions'], key=key)\n \n examples['session_size'] = examples['step']\n \n del log['maxstep_all']\n \n return log, examples\n\ndef session_price_features( log, examples, price_path=None ):\n \n# price_map = pd.read_csv( price_path + 'tmp/' + 'city_price.csv', header=None, names=['city','price_city_impressions_mean'], dtype={0:np.int16, 1:np.float32} )\n# price_map.index = price_map.city\n# price_map = price_map['price_city_impressions_mean']\n# \n# log['price_city_permean'] = log['city'].apply( lambda x: price_map.ix[x] if ~np.isnan(x) and x in price_map.index else np.nan )\n# log['price_city_permean'] = log['price_session'] / log['price_city_permean']\n \n examples = prices_for_actions(log, examples, actions=ITEM_ACTIONS, key='all')\n examples = prices_for_actions(log, examples, actions=[CLICK], key='click')\n examples = prices_for_actions(log, examples, group=['session_id','city'], actions=ITEM_ACTIONS, key='city_all')\n examples = prices_for_actions(log, examples, group=['session_id','city'], actions=[CLICK], key='city_click')\n \n# del log['price_city_permean']\n \n return log, examples\n\ndef session_rating_features( log, examples, crawl_path=None ):\n \n if 'ri_rating_percentage' not in examples.columns: \n examples = add_from_file( crawl_path + 'item_info/crawl_ci.csv', examples, to=['impressions'], filter=['ci_rating_percentage'] )\n else:\n examples['ci_rating_percentage'] = examples['ri_rating_percentage']\n log = add_from_file( crawl_path + 'item_info/crawl_ci.csv', log, col=['item_id'], filter=['ci_rating_percentage'] )\n \n examples = rating_for_actions(log, examples, actions=ITEM_ACTIONS, key='all')\n examples = rating_for_actions(log, examples, actions=[CLICK], key='click')\n \n del log['ci_rating_percentage']\n del examples['ci_rating_percentage']\n \n return log, examples\n\ndef session_distance_features( log, examples, crawl_path=None, poi_path=None ):\n \n log = add_from_file( crawl_path + 'item_info/crawl_ci.csv', log, col=['item_id'], filter=['ci_lat','ci_lng'] )\n log = add_from_file( crawl_path + 'city/city_latlng.csv', log, col=['city'], filter=['city_lat','city_lng'] )\n \n log = add_last_poi(poi_path, log)\n \n log = add_from_file( crawl_path + 'poi/poi_latlng.csv', log, col=['poi'], to=['last_poi'], filter=['poi_lat','poi_lng'] )\n \n log['distance_city'] = haversine(log.ci_lat, log.ci_lng, log.city_lat, log.city_lng)\n log['distance_poi'] = haversine(log.ci_lat, log.ci_lng, log.poi_lat, log.poi_lng)\n log['distance_last'] = log['distance_poi']\n mask = ~log['distance_city'].isnull() & log['distance_poi'].isnull()\n log.loc[mask, 'distance_last'] = log.loc[mask, 'distance_city']\n \n examples = distance_for_actions(log, examples, actions=ITEM_ACTIONS, key='all')\n examples = distance_for_actions(log, examples, actions=[CLICK], key='click')\n \n del log['distance_city']\n del log['distance_poi']\n del log['last_poi']\n del log['poi_lat'], log['poi_lng']\n del log['city_lat'], log['city_lng']\n del log['ci_lat'], log['ci_lng']\n \n return log, examples\n\n\ndef add_last_poi( poi_path, log ):\n \n def _add_last_poi(row, save=None):\n \n session = row[0]\n action = row[1]\n ref = row[2]\n city = row[3]\n \n if 'session' in save and save['session'] != session or not 'session' in save:\n #new session\n save['session'] = session\n save['last_poi'] = -1\n \n if 'city' in save and save['city'] != city or not 'city' in save:\n #new session\n save['city'] = city\n save['last_poi'] = -1\n \n if action == POI and not np.isnan( ref ):\n save['last_poi'] = ref\n \n return save['last_poi']\n \n file = poi_path + 'last_poi.fthr'\n \n if not Path( file ).is_file():\n log_full = load_hdfs( poi_path + 'data_log.hd5' )\n log_full['last_poi'] = apply(log_full, ['session_id','action_type','reference','city'], _add_last_poi, verbose=100000)\n write_feather( log_full[['session_id','last_poi']], file )\n \n last_poi = load_feather( file )\n print( len(last_poi) )\n last_poi = last_poi[last_poi.session_id.isin( log.session_id.unique() )]\n \n print( len(last_poi) )\n print( len(log) )\n log['last_poi'] = last_poi['last_poi'].values\n del last_poi\n \n return log\n\ndef session_time_features( log, examples, price_path=None ):\n \n log['dwell'] = log['timestamp'].shift( -1 )\n log['dwell'] = log['dwell'] - log['timestamp']\n log['session_next'] = log['session_id'].shift( -1 )\n log.loc[ log['session_id'] != log['session_next'], 'dwell'] = np.nan\n \n del log['session_next']\n\n #DOOO\n examples = times_for_item(log, examples)\n mask = log.action_type.isin( ITEM_ACTIONS )\n examples = times_for_item(log, examples, mask, group=['session_id','reference'], group_example=['session_id','impressions'], key='item' )\n examples = times_for_item(log, examples, mask, group=['session_id','city'], group_example=['session_id','city'], key='city' )\n \n examples = last_for_reference(log, examples, action=POI, group=['session_id'], key='all_poi' )\n examples = last_for_reference(log, examples, action=SORT, group=['session_id'], key='all_sort' )\n examples = last_for_reference(log, examples, action=FILTER, group=['session_id'], key='all_filter' )\n \n examples = last_for_reference(log, examples, action=POI, group=['session_id','city'], key='poi' )\n examples = last_for_reference(log, examples, action=SORT, group=['session_id','city'], key='sort' )\n examples = last_for_reference(log, examples, action=FILTER, group=['session_id','city'], key='filter' )\n \n examples = last_action(log, examples)\n \n# dataset['session_sort_filter_last'] = dataset['session_sort_filter_last'].fillna(-1)\n# dataset['session_filter_special_last'] = dataset['session_filter_special_last'].fillna(-1)\n \n del log['dwell']\n \n return examples\n\ndef session_sort_features( log, examples ):\n \n sorts = list(range(7)) # originally 9\n \n for sid in sorts: \n print(sid)\n examples = check_preference( log, examples, id=sid )\n \n # dataset['session_sort_filter_last'] = dataset['session_sort_filter_last'].fillna(-1)\n # dataset['session_filter_special_last'] = dataset['session_filter_special_last'].fillna(-1)\n \n return examples\n\ndef session_filter_features( log, examples ):\n\n # sorts = [189, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203]\n sorts = [194, 195, 196, 197, 198, 199, 200, 201, 202, 203]\n \n for sid in sorts: \n print(sid)\n examples = check_preference( log, examples, id=sid, action=FILTER )\n \n # dataset['session_sort_filter_last'] = dataset['session_sort_filter_last'].fillna(-1)\n # dataset['session_filter_special_last'] = dataset['session_filter_special_last'].fillna(-1)\n \n return examples\n\ndef check_preference( log, examples, id=0, action=SORT, key='session_sort' ):\n \n mask = log.action_type == action\n mask = mask & (log.reference==id)\n \n sort = pd.DataFrame()\n sort['session_sort_'+str(id)] = log[mask].groupby( 'session_id' ).size()\n \n print( sum( sort['session_sort_'+str(id)] > 0 ) )\n \n examples = examples.merge( sort, left_on=['session_id'], right_index=True, how='left' )\n \n examples[key+'_'+str(id)].fillna( 0, inplace=True)\n \n return examples\n \ndef times_for_item(log, examples, mask_log=None, after=False, group=['session_id'], group_example=None, key=None):\n \n tstart = time.time()\n print( '\\t times_for_item {}'.format(key) )\n \n base_key = 'session_'\n if key:\n base_key += key + '_'\n \n mask = mask_log if mask_log is not None else log.train > -1\n mask = mask & ~log.reference.isnull()\n \n if not after:\n mask = mask & (log.exclude == 0)\n \n if group_example is None:\n group_example = group\n \n grouped = log[mask].groupby( group )\n \n price = pd.DataFrame()\n price[ base_key + 'dwell' ] = grouped['dwell'].sum()\n price[ base_key + 'dwell_min' ] = grouped['dwell'].min()\n price[ base_key + 'dwell_max' ] = grouped['dwell'].max()\n price[ base_key + 'dwell_mean' ] = grouped['dwell'].mean()\n if len( group ) > 1:\n price[ base_key + 'min_step' ] = grouped['step'].min()\n price[ base_key + 'max_step' ] = grouped['step'].max()\n price[ base_key + 'min_time' ] = grouped['timestamp'].min()\n price[ base_key + 'max_time' ] = grouped['timestamp'].max()\n \n examples = examples.merge( price, left_on=group_example, right_index=True, how='left' )\n \n if len( group ) > 1:\n examples[base_key + 'max_time'] = examples['timestamp'] - examples[base_key + 'max_time']\n examples[base_key + 'min_time'] = examples['timestamp'] - examples[base_key + 'min_time']\n examples[base_key + 'max_step'] = ( examples['step'] - examples[base_key + 'max_step'] ) / examples['step']\n examples[base_key + 'min_step'] = ( examples['step'] - examples[base_key + 'min_step'] ) / examples['step']\n \n examples[ base_key + 'dwell' ].fillna( 0, inplace=True )\n examples[ base_key + 'dwell_min' ].fillna( 0, inplace=True )\n examples[ base_key + 'dwell_max' ].fillna( 0, inplace=True )\n examples[ base_key + 'dwell_mean' ].fillna( 0, inplace=True )\n if len( group ) > 1:\n pass\n examples[ base_key + 'min_time' ].fillna( -1, inplace=True )\n examples[ base_key + 'max_time' ].fillna( -1, inplace=True )\n examples[ base_key + 'min_step' ].fillna( -1, inplace=True )\n examples[ base_key + 'max_step' ].fillna( -1, inplace=True )\n \n print( '\\t times_for_item in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef last_for_reference(log, examples, action=POI, group=['session_id'], group_example=None, key=None):\n \n tstart = time.time()\n print( '\\t last_for_reference {}'.format(key) )\n \n base_key = 'session_last'\n if key:\n base_key += '_' + key\n \n mask = log.action_type == action\n mask = mask & (log.exclude == 0)\n \n if group_example is None:\n group_example = group\n \n grouped = log[mask].groupby( group )\n \n price = pd.DataFrame()\n price[ base_key ] = grouped['reference'].last()\n \n examples = examples.merge( price, left_on=group_example, right_index=True, how='left' )\n \n examples[ base_key ] = examples[ base_key ].fillna( -1 ).astype(np.int32)\n \n print( '\\t last_for_reference in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef last_action(log, examples, group=['session_id'], key=None):\n \n tstart = time.time()\n print( '\\t last_action {}'.format(key) )\n\n base_key = 'session_last_action'\n if key:\n base_key += '_' + key\n \n mask = log.hidden == 0\n mask = mask & (log.exclude == 0)\n \n grouped = log[mask].groupby( group )\n \n price = pd.DataFrame()\n price[ base_key ] = grouped['action_type'].last()\n \n examples = examples.merge( price, left_on=group, right_index=True, how='left' )\n examples[ base_key ] = examples[ base_key ].fillna( -1 ).astype(np.int32)\n \n print( '\\t last_action in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\n\ndef prices_for_actions( log, examples, actions=[CLICK], after=False, group=['session_id'], key=None, prefix='session' ):\n \n tstart = time.time()\n print( '\\t prices_for_actions {}'.format(key) )\n \n base_key = prefix + '_price_'\n if key:\n base_key += key + '_'\n \n mask = log.action_type.isin( actions )\n mask = mask & ~log.reference.isnull()\n mask = mask & ~log.price_session.isnull()\n \n if not after:\n mask = mask & (log.exclude == 0)\n \n grouped = log[mask].drop_duplicates(['session_id','reference','action_type']).groupby( group )\n \n price = pd.DataFrame()\n #price[ base_key + 'min' ] = grouped['price_session'].min()\n #price[ base_key + 'max' ] = grouped['price_session'].max()\n price[ base_key + 'mean' ] = grouped['price_session'].mean()\n #price[ base_key + 'city_permean' ] = grouped['price_city_permean'].mean()\n \n examples = examples.merge( price, left_on=group, right_index=True, how='left' )\n del price\n \n examples[base_key + 'mean_dist'] = examples[base_key + 'mean'] - examples['prices']\n #examples[base_key + 'min_dist'] = examples[base_key + 'min'] - examples['prices']\n #examples[base_key + 'max_dist'] = examples[base_key + 'max'] - examples['prices']\n \n if len(actions) > 1:\n key_comp = 'all'\n else: \n key_comp = 'click'\n \n examples[base_key + 'city_'+key_comp+'_permean_dist'] = examples[base_key + 'mean'] / examples['price_city_'+key_comp+'_mean']\n examples[base_key + 'city_'+key_comp+'_permean_dist'] = examples[base_key + 'city_'+key_comp+'_permean_dist'] - examples['price_city_'+key_comp+'_permean']\n \n examples[base_key + 'platform_'+key_comp+'_permean_dist'] = examples[base_key + 'mean'] / examples['price_platform_'+key_comp+'_mean']\n examples[base_key + 'platform_'+key_comp+'_permean_dist'] = examples[base_key + 'platform_'+key_comp+'_permean_dist'] - examples['price_platform_'+key_comp+'_permean']\n \n examples[base_key + 'city_platform_'+key_comp+'_permean_dist'] = examples[base_key + 'mean'] / examples['price_city_platform_'+key_comp+'_mean']\n examples[base_key + 'city_platform_'+key_comp+'_permean_dist'] = examples[base_key + 'city_platform_'+key_comp+'_permean_dist'] - examples['price_city_platform_'+key_comp+'_permean']\n \n examples[base_key + 'city_impressions_permean_dist'] = examples[base_key + 'mean'] / examples['price_city_impressions_mean']\n examples[base_key + 'city_impressions_permean_dist'] = examples[base_key + 'city_impressions_permean_dist'] - examples['price_city_impressions_permean']\n \n examples[base_key + 'platform_impressions_permean_dist'] = examples[base_key + 'mean'] / examples['price_platform_impressions_mean']\n examples[base_key + 'platform_impressions_permean_dist'] = examples[base_key + 'platform_impressions_permean_dist'] - examples['price_platform_impressions_permean']\n \n examples[base_key + 'city_platform_impressions_permean_dist'] = examples[base_key + 'mean'] / examples['price_city_platform_impressions_mean']\n examples[base_key + 'city_platform_impressions_permean_dist'] = examples[base_key + 'city_platform_impressions_permean_dist'] - examples['price_city_platform_impressions_permean']\n \n# examples[base_key + 'city_'+key_comp+'_permed_dist'] = examples[base_key + 'mean'] / examples['price_city_'+key_comp+'_median']\n# examples[base_key + 'city_'+key_comp+'_permed_dist'] = examples[base_key + 'city_'+key_comp+'_permed_dist'] - examples['price_city_'+key_comp+'_permed']\n# \n# examples[base_key + 'platform_'+key_comp+'_permed_dist'] = examples[base_key + 'mean'] / examples['price_platform_'+key_comp+'_median']\n# examples[base_key + 'platform_'+key_comp+'_permed_dist'] = examples[base_key + 'platform_'+key_comp+'_permed_dist'] - examples['price_platform_'+key_comp+'_permed']\n# \n# examples[base_key + 'city_platform_'+key_comp+'_permed_dist'] = examples[base_key + 'mean'] / examples['price_city_platform_'+key_comp+'_median']\n# examples[base_key + 'city_platform_'+key_comp+'_permed_dist'] = examples[base_key + 'city_platform_'+key_comp+'_permed_dist'] - examples['price_city_platform_'+key_comp+'_permed']\n# \n# examples[base_key + 'city_impressions_permed_dist'] = examples[base_key + 'mean'] / examples['price_city_impressions_median']\n# examples[base_key + 'city_impressions_permed_dist'] = examples[base_key + 'city_impressions_permed_dist'] - examples['price_city_impressions_permed']\n# \n# examples[base_key + 'platform_impressions_permed_dist'] = examples[base_key + 'mean'] / examples['price_platform_impressions_median']\n# examples[base_key + 'platform_impressions_permed_dist'] = examples[base_key + 'platform_impressions_permed_dist'] - examples['price_platform_impressions_permed']\n# \n# examples[base_key + 'city_platform_impressions_permed_dist'] = examples[base_key + 'mean'] / examples['price_city_platform_impressions_median']\n# examples[base_key + 'city_platform_impressions_permed_dist'] = examples[base_key + 'city_platform_impressions_permed_dist'] - examples['price_city_platform_impressions_permed']\n \n if sum( np.isinf(examples[base_key + 'mean_dist']) ):\n print('mean inf')\n print( examples[np.isinf(examples[base_key + 'mean'])] )\n examples[np.isinf(examples[base_key + 'mean'])].to_csv('debug.csv')\n print( sum( np.isinf(examples[base_key + 'mean']) ) )\n exit()\n if sum( np.isinf(examples[base_key + 'city_'+key_comp+'_permean_dist']) ):\n print('city_all_permean_dist inf')\n print( examples[np.isinf(examples[base_key + 'city_'+key_comp+'_permean_dist'])] )\n print( examples[np.isinf(examples['price_city_'+key_comp+'_permean'])] )\n examples[np.isinf(examples[base_key + 'city_'+key_comp+'_permean_dist'])].to_csv('debug.csv')\n print( sum( np.isinf(examples[base_key + 'city_'+key_comp+'_permean_dist']) ) )\n exit()\n \n examples[ base_key + 'mean' ].fillna( -1, inplace=True )\n #examples[ base_key + 'min' ].fillna( -1, inplace=True )\n #examples[ base_key + 'max' ].fillna( -1, inplace=True )\n examples[ base_key + 'mean_dist' ].fillna( 0, inplace=True )\n #examples[ base_key + 'min_dist' ].fillna( 0, inplace=True )\n #examples[ base_key + 'max_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'city_'+key_comp+'_permean_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'platform_'+key_comp+'_permean_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'city_platform_'+key_comp+'_permean_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'city_impressions_permean_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'platform_impressions_permean_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'city_platform_impressions_permean_dist' ].fillna( 0, inplace=True )\n \n# examples[ base_key + 'city_'+key_comp+'_permed_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'platform_'+key_comp+'_permed_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'city_platform_'+key_comp+'_permed_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'city_impressions_permed_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'platform_impressions_permed_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'city_platform_impressions_permed_dist' ].fillna( 0, inplace=True )\n \n# del examples[ base_key + 'city_permean' ]\n \n print( '\\t prices_for_actions in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef rating_for_actions( log, examples, actions=[CLICK], after=False, group=['session_id'], key=None, prefix='session' ):\n \n tstart = time.time()\n print( '\\t rating_for_actions {}'.format(key) )\n \n base_key = prefix + '_rating_'\n if key:\n base_key += key + '_'\n \n mask = log.action_type.isin( actions )\n mask = mask & ~log.ci_rating_percentage.isnull()\n mask = mask & (log.hidden == 0)\n \n if not after:\n mask = mask & (log.exclude == 0)\n \n grouped = log[mask].drop_duplicates(['session_id','reference','action_type']).groupby( group )\n \n rating = pd.DataFrame()\n #rating[ base_key + 'min' ] = grouped['ci_rating_percentage'].min()\n #rating[ base_key + 'max' ] = grouped['ci_rating_percentage'].max()\n rating[ base_key + 'mean' ] = grouped['ci_rating_percentage'].mean()\n \n examples = examples.merge( rating, left_on=group, right_index=True, how='left' )\n del rating\n \n examples[base_key + 'mean_dist'] = examples['ci_rating_percentage'] - examples[base_key + 'mean']\n #examples[base_key + 'min_dist'] = examples['ci_rating_percentage'] - examples[base_key + 'min']\n #examples[base_key + 'max_dist'] = examples['ci_rating_percentage'] - examples[base_key + 'max']\n \n if sum( np.isinf(examples[base_key + 'mean_dist']) ):\n print('mean inf')\n print( examples[np.isinf(examples[base_key + 'mean'])] )\n examples[np.isinf(examples[base_key + 'mean'])].to_csv('debug.csv')\n print( sum( np.isinf(examples[base_key + 'mean']) ) )\n exit()\n \n examples[ base_key + 'mean' ].fillna( -1, inplace=True )\n #examples[ base_key + 'min' ].fillna( -1, inplace=True )\n #examples[ base_key + 'max' ].fillna( -1, inplace=True )\n examples[ base_key + 'mean_dist' ].fillna( 0, inplace=True )\n #examples[ base_key + 'min_dist' ].fillna( 0, inplace=True )\n #examples[ base_key + 'max_dist' ].fillna( 0, inplace=True )\n \n print( '\\t rating_for_actions in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef stars_for_actions( log, examples, actions=[CLICK], after=False, group=['session_id'], key=None, prefix='session' ):\n \n tstart = time.time()\n print( '\\t stars_for_actions {}'.format(key) )\n \n base_key = prefix + '_stars_'\n if key:\n base_key += key + '_'\n \n mask = log.action_type.isin( actions )\n mask = mask & ~log.ci_stars.isnull()\n mask = mask & (log.hidden == 0)\n \n if not after:\n mask = mask & (log.exclude == 0)\n \n grouped = log[mask].drop_duplicates(['session_id','reference','action_type']).groupby( group )\n \n rating = pd.DataFrame()\n #rating[ base_key + 'min' ] = grouped['ci_stars'].min()\n #rating[ base_key + 'max' ] = grouped['ci_stars'].max()\n rating[ base_key + 'mean' ] = grouped['ci_stars'].mean()\n \n examples = examples.merge( rating, left_on=group, right_index=True, how='left' )\n del rating\n \n examples[base_key + 'mean_dist'] = examples['tmp_stars'] - examples[base_key + 'mean']\n #examples[base_key + 'min_dist'] = examples['ci_stars'] - examples[base_key + 'min']\n #examples[base_key + 'max_dist'] = examples['ci_stars'] - examples[base_key + 'max']\n \n if sum( np.isinf(examples[base_key + 'mean_dist']) ):\n print('mean inf')\n print( examples[np.isinf(examples[base_key + 'mean'])] )\n examples[np.isinf(examples[base_key + 'mean'])].to_csv('debug.csv')\n print( sum( np.isinf(examples[base_key + 'mean']) ) )\n exit()\n \n examples[ base_key + 'mean' ].fillna( -1, inplace=True )\n #examples[ base_key + 'min' ].fillna( -1, inplace=True )\n #examples[ base_key + 'max' ].fillna( -1, inplace=True )\n examples[ base_key + 'mean_dist' ].fillna( 0, inplace=True )\n #examples[ base_key + 'min_dist' ].fillna( 0, inplace=True )\n #examples[ base_key + 'max_dist' ].fillna( 0, inplace=True )\n \n print( '\\t stars_for_actions in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef distance_for_actions( log, examples, actions=[CLICK], after=False, group=['session_id'], key=None, prefix='session' ):\n \n tstart = time.time()\n print( '\\t distance_for_actions {}'.format(key) )\n \n base_key = prefix + '_distance_'\n if key:\n base_key += key + '_'\n \n mask = log.action_type.isin( actions )\n mask = mask & ~log.distance_city.isnull()\n mask = mask & (log.hidden == 0)\n \n if not after:\n mask = mask & (log.exclude == 0)\n \n grouped = log[mask].drop_duplicates(['session_id','reference','action_type']).groupby( group )\n \n rating = pd.DataFrame()\n# rating[ base_key + 'city_min' ] = grouped['distance_city'].min()\n# rating[ base_key + 'city_max' ] = grouped['distance_city'].max()\n# rating[ base_key + 'city_mean' ] = grouped['distance_city'].mean()\n #rating[ base_key + 'last_min' ] = grouped['distance_last'].min()\n #rating[ base_key + 'last_max' ] = grouped['distance_last'].max()\n rating[ base_key + 'last_mean' ] = grouped['distance_last'].mean()\n \n examples = examples.merge( rating, left_on=group, right_index=True, how='left' )\n del rating\n \n# examples[base_key + 'city_mean_dist'] = examples['distance_city'] - examples[base_key + 'city_mean']\n# examples[base_key + 'city_min_dist'] = examples['distance_city'] - examples[base_key + 'city_min']\n# examples[base_key + 'city_max_dist'] = examples['distance_city'] - examples[base_key + 'city_max']\n examples[base_key + 'last_mean_dist'] = examples['distance_last'] - examples[base_key + 'last_mean']\n# examples[base_key + 'last_min_dist'] = examples['distance_last'] - examples[base_key + 'last_min']\n# examples[base_key + 'last_max_dist'] = examples['distance_last'] - examples[base_key + 'last_max']\n \n# if sum( np.isinf(examples[base_key + 'city_mean_dist']) ):\n# print('mean inf')\n# print( examples[np.isinf(examples[base_key + 'mean'])] )\n# examples[np.isinf(examples[base_key + 'mean'])].to_csv('debug.csv')\n# print( sum( np.isinf(examples[base_key + 'mean']) ) )\n# exit()\n if sum( np.isinf(examples[base_key + 'last_mean_dist']) ):\n print('mean inf')\n print( examples[np.isinf(examples[base_key + 'mean'])] )\n examples[np.isinf(examples[base_key + 'mean'])].to_csv('debug.csv')\n print( sum( np.isinf(examples[base_key + 'mean']) ) )\n exit()\n \n# examples[ base_key + 'city_mean' ].fillna( -1, inplace=True )\n# examples[ base_key + 'city_min' ].fillna( -1, inplace=True )\n# examples[ base_key + 'city_max' ].fillna( -1, inplace=True )\n# examples[ base_key + 'city_mean_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'city_min_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'city_max_dist' ].fillna( 0, inplace=True )\n examples[ base_key + 'last_mean' ].fillna( -1, inplace=True )\n# examples[ base_key + 'last_min' ].fillna( -1, inplace=True )\n# examples[ base_key + 'last_max' ].fillna( -1, inplace=True )\n examples[ base_key + 'last_mean_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'last_min_dist' ].fillna( 0, inplace=True )\n# examples[ base_key + 'last_max_dist' ].fillna( 0, inplace=True )\n \n print( '\\t distance_for_actions in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef counts_for_mask( log, examples, mask=None, after=False, decay=False, group=[], group_examples=None, key=None, step_key='step' ):\n \n tstart = time.time()\n print( '\\t counts_for_mask {}'.format(key) )\n \n base_key = key + '_'\n \n if group_examples is None:\n group_examples = group\n group_log = group\n \n if mask is None:\n mask = log.train > -1\n if after:\n mask = mask & (log.exclude == 1)\n else:\n mask = mask & (log.exclude == 0)\n \n grouped = log[mask].groupby( group_log )\n \n pop = pd.DataFrame()\n pop[base_key + 'count'] = grouped.size()\n pop[base_key + 'maxstep'] = grouped['maxstep_all'].max()\n if not after and decay:\n pop[base_key + 'mrr'] = grouped['mrr'].sum()\n pop[base_key + 'linear'] = grouped['linear'].sum()\n \n examples = examples.merge( pop, left_on=group_examples, right_index=True, how='left' )\n \n examples[base_key + 'count'].fillna( 0, inplace = True )\n if not after and decay:\n examples[base_key + 'count_rel'] = examples[base_key + 'count'] / examples[step_key]\n examples[base_key + 'mrr'] = examples[base_key + 'mrr'] / examples[step_key]\n examples[base_key + 'linear'] = examples[base_key + 'linear'] / examples[step_key]\n elif not after:\n examples[base_key + 'count_rel'] = examples[base_key + 'count'] / examples[step_key]\n else:\n examples[base_key + 'count_rel'] = examples[base_key + 'count'] / (examples[base_key + 'maxstep'] - examples[step_key])\n \n examples[ base_key + 'count_rel' ].fillna( 0, inplace=True )\n if not after and decay:\n examples[ base_key + 'mrr' ].fillna( 0, inplace=True )\n examples[ base_key + 'linear' ].fillna( 0, inplace=True )\n \n del examples[base_key + 'maxstep']\n \n print( '\\t counts_for_mask in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n \ndef counts_for_actions( log, examples, group=[], key=None ):\n \n base_key = 'session_'\n if key is not None:\n base_key += key + '_' \n \n tstart = time.time()\n print( '\\t counts_for_actions {}'.format(key) )\n \n mergeon = group + ['session_id']\n \n groupon = group + ['session_id', 'action_type']\n grouped = log.groupby( groupon ).size()\n grouped = grouped.unstack( level=len(groupon)-1 )\n grouped.rename( columns=lambda x: base_key + NAMES[x], inplace=True )\n grouped.fillna(0, inplace=True)\n \n examples = examples.merge( grouped, left_on=mergeon, right_index=True )\n \n print( '\\t counts_for_actions in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\ndef add_from_file( file, examples, col=['item_id'], to=None, filter=None ):\n tstart = time.time()\n print( '\\t add_from_file {}'.format(file) )\n \n keep = False\n if to is None:\n to = col\n keep = True\n \n toadd = pd.read_csv( file, index_col=0 )\n if filter is not None:\n toadd = toadd[col+filter]\n \n copy = False\n if col[0] in examples.columns:\n copy = True\n \n examples = examples.merge( toadd, left_on=to, right_on=col, how='left' )\n \n if copy and not keep: \n examples[col[0]] = examples[col[0]+'_y']\n del examples[col[0]+'_y']\n elif not copy and not keep:\n del examples[col[0]]\n \n print( '\\t add_from_file in {}s'.format( (time.time() - tstart) ) )\n \n return examples\n\n# vectorized haversine function\ndef haversine(lat1, lon1, lat2, lon2, to_radians=True, earth_radius=6371):\n \"\"\"\n slightly modified version: of http://stackoverflow.com/a/29546836/2901002\n\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees or in radians)\n\n All (lat, lon) coordinates must have numeric dtypes and be of equal length.\n\n \"\"\"\n if to_radians:\n lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2])\n\n a = np.sin((lat2-lat1)/2.0)**2 + \\\n np.cos(lat1) * np.cos(lat2) * np.sin((lon2-lon1)/2.0)**2\n\n return earth_radius * 2 * np.arcsin(np.sqrt(a))\n\nif __name__ == '__main__':\n main()\n ","sub_path":"featuregen/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":37035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189907129","text":"import sys\n\ns = input()\n\nfor i in range(2**3):\n localsum = s[0]\n for j in range(3):\n if (i >> j) & 1:\n localsum += \"+\"\n else:\n localsum += \"-\"\n localsum += s[j+1]\n sum = eval(localsum)\n if sum == 7:\n localsum += \"=7\"\n print(localsum)\n sys.exit()","sub_path":"Python_codes/p03545/s576659004.py","file_name":"s576659004.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423231777","text":"from __future__ import print_function\n\nimport numpy as np\nimport random\nimport json\nimport sys\nimport os\nfrom collections import namedtuple\n\nimport networkx as nx\nfrom networkx.readwrite import json_graph\n\nversion_info = list(map(int, nx.__version__.split('.')))\nmajor = version_info[0]\nminor = version_info[1]\n# assert (major <= 1) and (minor <= 11), \"networkx major version > 1.11\"\n\nWALK_LEN=5\nN_WALKS=50\n\ndef random_flip(class_map, G, ratio=0.1):\n for n in G.nodes():\n if G.node[n]['val'] == False and G.node[n]['test'] == False:\n feat = class_map[n]\n for i, v in enumerate(feat):\n if np.random.random() < ratio:\n feat[i] = 1 - v\n class_map[n] = feat\n return class_map\n \n\ndef load_data(prefix, feats_suf=\"\", \n normalize=True, \n load_walks=False, \n corrupt_label=None):\n # corrupt_label - Function to corrupt the labels of training data\n G_data = json.load(open(prefix + \"-G.json\"))\n G = json_graph.node_link_graph(G_data)\n if isinstance(G.nodes()[0], int):\n conversion = lambda n : int(n)\n else:\n conversion = lambda n : n\n\n if len(feats_suf) == 0 and os.path.exists(prefix + \"-feats.npy\"):\n feats = np.load(prefix + \"-feats.npy\")\n elif os.path.exists(prefix+\"-feats-\"+feats_suf+\".npy\"):\n print(\"Load an alternate feature set {}\".format(feats_suf))\n feats = np.load(prefix+\"-feats-\"+feats_suf+\".npy\")\n else:\n print(\"No features present.. Only identity features will be used.\")\n feats = None\n id_map = json.load(open(prefix + \"-id_map.json\"))\n id_map = {conversion(k):int(v) for k,v in id_map.items()}\n walks = []\n class_map = json.load(open(prefix + \"-class_map.json\"))\n if isinstance(list(class_map.values())[0], list):\n lab_conversion = lambda n : n\n else:\n lab_conversion = lambda n : int(n)\n\n class_map = {conversion(k):lab_conversion(v) for k,v in class_map.items()}\n if corrupt_label is not None:\n class_map = corrupt_label(class_map, G)\n\n ## Remove all nodes that do not have val/test annotations\n ## (necessary because of networkx weirdness with the Reddit data)\n broken_count = 0\n for node in G.nodes():\n if not 'val' in G.node[node] or not 'test' in G.node[node]:\n G.remove_node(node)\n broken_count += 1\n print(\"Removed {:d} nodes that lacked proper annotations due to networkx versioning issues\".format(broken_count))\n\n ## Make sure the graph has edge train_removed annotations\n ## (some datasets might already have this..)\n print(\"Loaded data.. now preprocessing..\")\n for edge in G.edges():\n if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or\n G.node[edge[0]]['test'] or G.node[edge[1]]['test']):\n G[edge[0]][edge[1]]['train_removed'] = True\n else:\n G[edge[0]][edge[1]]['train_removed'] = False\n\n if normalize and not feats is None:\n from sklearn.preprocessing import StandardScaler\n train_ids = np.array([id_map[n] for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']])\n train_feats = feats[train_ids]\n scaler = StandardScaler()\n scaler.fit(train_feats)\n feats = scaler.transform(feats)\n \n if load_walks:\n with open(prefix + \"-walks.txt\") as fp:\n for line in fp:\n walks.append(map(conversion, line.split()))\n\n return G, feats, id_map, walks, class_map\n\n\ndefault_config = namedtuple('config', ['g_func_name', 'g_func_args', 'eps'])\ndefault_config.g_func_name = 'barabasi_albert_graph'\ndefault_config.g_func_args = {'n': 200, 'm':10}\ndefault_config.eps = 0.5\ndefault_config.num_train_per_class = 5\ndefault_config.num_val_per_class = 5\ndefault_config.feat_dim = 50\ndef load_data_highfreq(config=default_config):\n \"\"\"Config is a namedtuple to config the random generation process\"\"\"\n g_gen_func = getattr(nx.random_graphs, config.g_func_name)\n G = g_gen_func(**config.g_func_args)\n labels_1 = frozenset(nx.algorithms.maximal_independent_set(G))\n labels_0 = frozenset([i for i in G.nodes() if i not in labels_1]) \n id_map = G.nodes() \n walks = None\n class_map = dict()\n for i in G.nodes():\n class_map[i] = [0, 1] if i in labels_1 else [1, 0]\n feats = np.array([np.random.normal(config.eps, 1.0, config.feat_dim) \\\n if i in labels_1 else \\\n np.random.normal(-config.eps, 1.0, config.feat_dim) \\\n for i in G.nodes()])\n labels_1 = list(labels_1)\n labels_0 = list(labels_0)\n np.random.shuffle(labels_1)\n np.random.shuffle(labels_0)\n train_idx = []\n train_idx.extend(labels_1[:config.num_train_per_class])\n train_idx.extend(labels_0[:config.num_train_per_class])\n val_idx = []\n val_idx.extend(labels_1[config.num_train_per_class:config.num_train_per_class+config.num_val_per_class])\n val_idx.extend(labels_0[config.num_train_per_class:config.num_train_per_class+config.num_val_per_class])\n val_set = set(val_idx)\n train_set = set(train_idx) \n for i in G.nodes():\n if i in train_set:\n G.node[i]['val'] = False\n G.node[i]['test'] = False\n elif i in val_set:\n G.node[i]['val'] = True\n G.node[i]['test'] = False\n else:\n G.node[i]['val'] = False\n G.node[i]['test'] = True\n ## Make sure the graph has edge train_removed annotations\n ## (some datasets might already have this..)\n print(\"Loaded data.. now preprocessing..\")\n for edge in G.edges():\n if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or\n G.node[edge[0]]['test'] or G.node[edge[1]]['test']):\n G[edge[0]][edge[1]]['train_removed'] = True\n else:\n G[edge[0]][edge[1]]['train_removed'] = False\n return G, feats, id_map, walks, class_map\n\n\ndef run_random_walks(G, nodes, num_walks=N_WALKS):\n pairs = []\n for count, node in enumerate(nodes):\n if G.degree(node) == 0:\n continue\n for _ in range(num_walks):\n curr_node = node\n for _ in range(WALK_LEN):\n next_node = random.choice(G.neighbors(curr_node))\n # self co-occurrences are useless\n if curr_node != node:\n pairs.append((node,curr_node))\n curr_node = next_node\n if count % 1000 == 0:\n print(\"Done walks for\", count, \"nodes\")\n return pairs\n\nif __name__ == \"__main__\":\n \"\"\" Run random walks \"\"\"\n graph_file = sys.argv[1]\n out_file = sys.argv[2]\n G_data = json.load(open(graph_file))\n G = json_graph.node_link_graph(G_data)\n nodes = [n for n in G.nodes() if not G.node[n][\"val\"] and not G.node[n][\"test\"]]\n G = G.subgraph(nodes)\n pairs = run_random_walks(G, nodes)\n with open(out_file, \"w\") as fp:\n fp.write(\"\\n\".join([str(p[0]) + \"\\t\" + str(p[1]) for p in pairs]))\n","sub_path":"graphsage/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270933914","text":"import gdbmi\nimport os\nimport re\nimport difflib\nimport pprint\nimport vim\n\n\ndef extract(keys, ls):\n res = []\n for es in ls:\n ks = {}\n for e in es:\n for k in keys:\n if k in e:\n ks[k] = e[k]\n if len(ks):\n res.append(ks)\n return res\n\n\ndef break_list(result):\n result = result.strip()\n if not result:\n return []\n table = gdbmi.parse(result + '\\n\\n', True)[1][2][0]['BreakpointTable']\n body = None\n for t in table:\n if 'body' in t:\n body = t['body']\n assert body is not None\n breaks = []\n for bp in body:\n bs = bp['bkpt']\n enabled = False\n file = None\n for b in bs:\n if b.get('type') not in (None, 'breakpoint'):\n return breaks\n if 'enabled' in b:\n enabled = b['enabled'] == 'y'\n if 'fullname' in b:\n file = b['fullname']\n if 'line' in b:\n line = b['line']\n if 'number' in b:\n number = b['number']\n if not enabled:\n continue\n if file is None:\n continue\n file = os.path.normpath(file).replace('\\\\', '/')\n breaks.append((file, line, number))\n return breaks\n\n\ndef callstack(result):\n result = result.strip()\n if not result:\n return []\n stack = gdbmi.parse(result + '\\n\\n', True)[1][2][0]['stack']\n output = []\n\n for s in stack:\n try:\n mapped = {}\n for f in s['frame']:\n mapped.update(f)\n output.append('{:<4}{}'.format(mapped['level'], mapped['func']))\n except (KeyError,):\n continue\n\n return output\n\n\ndef vars(result):\n result = result.strip()\n if not result:\n return\n variables = gdbmi.parse(result + '\\n\\n', True)[1][2][0]['variables']\n out = []\n for e in extract(('name', 'value', 'type'), variables):\n try:\n value = e['value']\n m = re.match(r'^0x[0-9a-f]* -(.*)-$', value)\n if m is not None:\n def format(w):\n try:\n return int(w, 8)\n except (ValueError,):\n return ord('?')\n bs = [(format(w) if len(w) != 1 else ord(w)) for w in re.split(r'//(\\d\\d\\d)', m.group(1)) if w]\n value = ('{:02X}' * len(bs)).format(*bs)\n if value and len(value) <= 8:\n value = '0x{} ({})'.format(value, int(value, 16))\n else:\n value = '0x' + value\n #elif value.startswith('{') and value.endswith('}'):\n #value = '\\n'.join('{:<15}{}'.format(k.strip(), v) for k, v in eval('{\"' + value.replace('= ', '\":\"').replace(', ', '\",\"')[1:-1] + '\"}').iteritems())\n if '\\n' in value:\n value = '\\n ' + '\\n '.join(value.split('\\n'))\n out = out + '{:<30}{}'.format(e['name'], value).split('\\n')\n except (KeyError,):\n pass\n\n return out\n\n\ndef parse_frame(frame):\n out = { 'file': '', 'line': '', 'fn': '', 'thread_id': -1 }\n info = {}\n for f in frame:\n info.update(f)\n out['file'] = info['fullname']\n out['line'] = info['line']\n out['fn'] = info['func']\n out['level'] = info['level']\n\n if out['file']:\n out['file'] = out['file'].replace('\\\\', '/').replace('//', '/')\n\n return out\n\n\ndef frame(result):\n result = result.strip()\n frame = gdbmi.parse(result + '\\n\\n', True)[1][2][0]['frame']\n return parse_frame(frame)\n\n\ndef stopped(result):\n result = result.strip()\n stopped = gdbmi.parse(result + '\\n^x\\n', True)[0][0][3]\n\n out = { 'file': '', 'line': '', 'fn': '', 'thread_id': -1 }\n\n for s in stopped:\n if 'frame' in s:\n frame = s['frame']\n if 'thread-id' in s:\n out['thread_id'] = s['thread-id']\n\n info = {}\n for f in frame:\n info.update(f)\n\n out['line'] = info.get('line')\n out['file'] = info.get('fullname')\n out['fn'] = info.get('func')\n\n if out['file']:\n out['file'] = out['file'].replace('\\\\', '/').replace('//', '/')\n\n return out\n","sub_path":"pythonx/vjodg.py","file_name":"vjodg.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"126007486","text":"\n\nfrom xai.brain.wordbase.nouns._ordinal import _ORDINAL\n\n#calss header\nclass _ORDINALS(_ORDINAL, ):\n\tdef __init__(self,): \n\t\t_ORDINAL.__init__(self)\n\t\tself.name = \"ORDINALS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"ordinal\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ordinals.py","file_name":"_ordinals.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552940523","text":"import json\n\nresult = json.load(open(\"result.txt\", \"r\"))\n\nstg_count = 0\ndev_count = 0\ndr_count = 0\nprd_count = 0\n\nstg = {}\ndev = {}\ndr = {}\nprd = {}\nfor item in result:\n source = result[item]['From']\n hostname = source[source.index('@')+1, source.index(\">\")]\n hostname_list = hostname.replace(\"-\",\".\").split(\".\")\n first = hostname_list[0]\n\n if 'stg' in hostname:\n stg[first] = stg.get(first, 0)\n stg_count = stg_count + 1\n elif 'dev' in hostname:\n dev[first] \n","sub_path":"work/cron/raw/env_graph.py","file_name":"env_graph.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534605025","text":"from selenium.webdriver.common.by import By\n\nfrom woniuboss.tools.service import Service\nfrom woniuboss.tools.utility import Utility\nimport unittest\nfrom parameterized import parameterized\n\n# 获取测试数据\ndata_config_info = Utility.get_json('..\\\\config\\\\testdata.conf')\n# 查询资源\ntest_qureyNetCus_info = Utility.get_excel_to_tuple(data_config_info[1])\n# 新增资源\ntest_addCus_info = Utility.get_excel_to_tuple(data_config_info[2])\n# 上传简历\ntest_validateUpload_info = Utility.get_excel_to_tuple(data_config_info[3])\n\n\n\nclass TestMarket(unittest.TestCase):\n\n @classmethod\n def setUp(cls):\n cls.driver=Service.get_driver('..\\\\config\\\\base.conf')\n from woniuboss.lib.market import Market\n cls.market=Market(cls.driver)\n\n @classmethod\n def tearDown(cls):\n cls.driver.quit()\n\n # 查询资源\n # @parameterized.expand(test_qureyNetCus_info)\n # def test_qureyNetCus(self,stime,etime,expect):\n # qureyNetCus_data = {'stime':stime,'etime':etime}\n # self.market.qureyNetCus('..\\\\config\\\\base.conf',qureyNetCus_data)\n # if Service.is_element_present(self.driver,By.CSS_SELECTOR,'#netCus-table > thead:nth-child(1) > tr:nth-child(1) > th:nth-child(1) > div:nth-child(1)') or Service.is_element_present(self.driver,By.CSS_SELECTOR,'.no-records-found > td:nth-child(1)'):\n # actual = 'query_successful'\n # else:\n # actual='query_failed'\n # # 断言\n # self.assertEqual(actual, expect)\n\n #默认查询\n def test_query_all(self):\n self.market.query_all('..\\\\config\\\\base.conf')\n if Service.is_element_present(self.driver,By.CSS_SELECTOR,'#netCus-table > thead:nth-child(1) > tr:nth-child(1) > th:nth-child(1) > div:nth-child(1)') or Service.is_element_present(self.driver,By.CSS_SELECTOR,'.no-records-found > td:nth-child(1)'):\n actual = 'query_successful'\n else:\n actual='query_failed'\n # 断言\n self.assertEqual(actual, 'query_successful')\n\n # # 新增资源\n # @parameterized.expand(test_addCus_info)\n # def test_addCus(self, cus_phone,cus_name, cus_email, cus_qq,cus_school,cus_major,cus_intent,cus_salary,cus_applposition,cus_age,cus_eduexp,cus_experience,cus_last_tracking,expect):\n # addResModel_data={'cus_phone':cus_phone,'cus_name':cus_name,'cus_email':cus_email,'cus_qq':cus_qq,\n # 'cus_school':cus_school,'cus_major':cus_major,'cus_intent':cus_intent,'cus_salary':cus_salary,\n # 'cus_applposition': cus_applposition, 'cus_age': cus_age,'cus_eduexp':cus_eduexp,'cus_experience':cus_experience,\n # 'cus_last_tracking': cus_last_tracking}\n # self.market.addCus('..\\\\config\\\\base.conf', addResModel_data)\n # if Service.is_element_present(self.driver,By.CSS_SELECTOR,'div.bootbox-body'):\n # actual='add_successful'\n # else:\n # actual='add_successful'\n # #断言\n # self.assertEqual(actual,expect)\n\n # # 上传简历\n # @parameterized.expand(test_validateUpload_info)\n # def test_validateUpload(self,files_path,expect ):\n # upload_files_data={'files_path':files_path}\n # self.market.upload_files('..\\\\config\\\\base.conf',upload_files_data)\n # if Service.is_element_present(self.driver,By.XPATH,'/html/body/div[9]/div/div/div[2]/div'):\n # actual = 'upload_successful'\n # else:\n # actual='upload_failed'\n # # 断言\n # self.assertEqual(actual, expect)\n\n #读取邮箱\n def test_read_email(self):\n self.market.read_email('..\\\\config\\\\base.conf')\n if Service.is_element_present(self.driver,By.CSS_SELECTOR,'p.loading-title.txt-textOneRow'):\n actual = 'read_successful'\n else:\n actual='read_failed'\n # 断言\n self.assertEqual(actual, 'read_successful')\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)","sub_path":"three snails/woniuboss2.5_UI/woniuboss_lj/woniuboss/bin/test_market.py","file_name":"test_market.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223665163","text":"# driver script for auto generation of magma input files\nimport induced_permutations as ip\nimport time\nimport os\nimport math\nfrom numpy import prod\nimport subprocess\nimport psutil\n\ndef atomvec_to_molstring(vec):\n letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P']\n string = ''\n for i, a in enumerate(vec):\n string += letters[i]\n if a > 1:\n string += str(a)\n return string\n\ndef kill(proc_pid):\n process = psutil.Process(proc_pid)\n for proc in process.children(recursive=True):\n proc.kill()\n process.kill()\n\n\n# for 3 atoms up to n atoms \nif not os.path.exists(\"./fundamental_invariants\"):\n os.mkdir(\"fundamental_invariants\")\nos.chdir(\"fundamental_invariants\")\nb = 1\na = 0\nfor system_size in range(3, 9):\n # make a directory for that number of atoms\n dirname = str(system_size) + \"_atom_system\"\n os.mkdir(dirname)\n atomvectors = ip.atom_combinations(system_size)\n \n os.chdir(\"./{}\".format(dirname))\n for vec in atomvectors:\n bond_indice_permutations = ip.permute_bond_indices(vec)\n IP = ip.induced_permutations(vec, bond_indice_permutations)\n if IP != []:\n #permutation_order = prod([math.factorial(i) for i in vec])\n #if permutation_order < 10000:\n if 1 == 1:\n singularinput = ip.write_singular_input(sum(vec), IP)\n mol = atomvec_to_molstring(vec) \n os.mkdir(mol)\n os.chdir(mol)\n with open(\"singular.inp\", \"w\") as f:\n f.write(singularinput)\n proc = subprocess.Popen([\"Singular -q singular.inp >> output\"], shell=True)\n b = time.time()\n try:\n # kill Singular if it takes longer than 1 hour to run\n proc.wait(timeout=3600)\n except subprocess.TimeoutExpired:\n kill(proc.pid)\n os.chdir(\"../\")\n\n os.chdir(\"../\")\nos.chdir(\"../\")\n","sub_path":"singular_generator/timings_test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"174170480","text":"from django.conf.urls import url\nfrom introduction import views\n\n\nurlpatterns = [\n url(r'^$', views.introduction, name='introduction'),\n url(r'^captain/$', views.captain, name='captain'),\n url(r'^activities/$', views.activities, name='activities'),\n url(r'^PR/$', views.PR, name='PR'),\n url(r'^device/$', views.device, name='device'),\n url(r'^instruments/$', views.instruments, name='instruments'),\n url(r'^center/$', views.center, name='center'),\n url(r'^teacher/$', views.teacher, name='teacher'),\n url(r'^inside/$', views.inside, name='inside'),\n url(r'^beginning/$', views.beginning, name='beginning'),\n url(r'^Christmas/$', views.Christmas, name='Christmas'),\n url(r'^home/$', views.home, name='home'),\n url(r'^send/$', views.send, name='send'),\n url(r'^category/(?P[0-9]+)/$', views.category, name='category'),\n]\n\n\n\n\n\n\n","sub_path":"introduction/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"245639334","text":"__author__ = 'Dan Mapes'\n__doc__ = 'Equipment Circuit Search\\n' \\\n 'Will find Equipment by Search\\\n finds and zooms to element selected.'\\\n 'v4.1 will find circuited equipment by panel and circuit'\n\nfrom System.Collections.Generic import List\nimport Autodesk.Revit.DB as DB\nfrom Autodesk.Revit.DB import WorksharingUtils, WorksharingTooltipInfo\nfrom pyrevit import forms, script\n\ndoc = __revit__.ActiveUIDocument.Document\nuidoc = __revit__.ActiveUIDocument\n\neq_circuits = []\neq_ids = []\n\nelec_fix = DB.FilteredElementCollector(doc) \\\n .OfCategory(DB.BuiltInCategory.OST_ElectricalFixtures) \\\n .WhereElementIsNotElementType() \\\n .ToElements()\n\nelec_equip = DB.FilteredElementCollector(doc) \\\n .OfCategory(DB.BuiltInCategory.OST_ElectricalEquipment) \\\n .WhereElementIsNotElementType() \\\n .ToElements()\n\nselected_equip = []\n\nfor eq in elec_fix:\n selected_equip.append(eq)\n\nfor eq in elec_equip:\n selected_equip.append(eq)\n\nfor eq in selected_equip:\n element = eq\n eq_id = element.Id\n category_name = element.Category.Name\n if category_name == 'Electrical Equipment':\n try:\n if element.MEPModel.ElectricalSystems.Size >= 1:\n for e in element.MEPModel.ElectricalSystems:\n selected_disconnect_circuit = e.Name\n except:\n not_circuited = 'Element not circuited.'\n # print not_circuited\n # script.exit()\n selected_disconnect_panel = element.LookupParameter('Supply From').AsString()\n else:\n selected_disconnect_circuit = element.LookupParameter('Circuit Number').AsString()\n selected_disconnect_panel = element.LookupParameter('Panel').AsString()\n circuit_info = selected_disconnect_panel +' '+ selected_disconnect_circuit\n eq_circuits.append(circuit_info)\n tip_info = 'Search... {0} Circuits in {1}'.format(eq_circuits.Count, doc.Title)\n\nnames_and_ids = zip(eq_circuits, eq_ids)\nnames_and_ids_dict = dict(names_and_ids)\n\nsearched_eq = forms.SearchPrompt.show(eq_circuits, switches=[], search_tip = tip_info)\nif len([searched_eq]) == 0:\n script.exit()\nelse:\n out = script.get_output()\n for e in selected_equip:\n element = e\n eq_id = element.Id\n category_name = element.Category.Name\n if category_name == 'Electrical Equipment':\n try:\n if element.MEPModel.ElectricalSystems.Size >= 1:\n for e in element.MEPModel.ElectricalSystems:\n selected_disconnect_circuit = e.Name\n except:\n not_circuited = 'Element not circuited.'\n # print not_circuited\n # script.exit()\n selected_disconnect_panel = element.LookupParameter('Supply From').AsString()\n else:\n selected_disconnect_circuit = element.LookupParameter('Circuit Number').AsString()\n selected_disconnect_panel = element.LookupParameter('Panel').AsString()\n circuit_info = selected_disconnect_panel +' '+ selected_disconnect_circuit\n if circuit_info == searched_eq:\n selected_eq = e\n selected_eq_id = e.Id.IntegerValue\n wti = WorksharingUtils.GetWorksharingTooltipInfo(doc, selected_eq.Id)\n workset = selected_eq.LookupParameter('Workset').AsValueString()\n eq_ids.append(e.Id)\n uidoc.Selection.SetElementIds(List[DB.ElementId](eq_ids))\n uidoc.ShowElements(selected_eq.Id)\n","sub_path":"pyMapes.extension/_deprecated tools/Elec Circuit Search_v4.1.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"491417053","text":"import heapq \nfrom math import inf, isfinite\nimport datetime\nfrom solver import solveTSP\nfrom mapDrawer import drawMap\nfrom tqdm import tqdm\n\nmapNodeToIdx = {} # map node ke index\nmapIdxToNode = {} # map idx ke node\n\n# fungsi untuk find shortest path antar node a dan b\n# metode djikstra berdasarkan referensi psudocode dari buku \"Pemrograman Kompetitif Dasar\"\ndef findShortestPath(a, b, adjListAll, cntAllNode):\n path = []\n \n dist = [inf for i in range(cntAllNode)]\n pred = [-1 for i in range(cntAllNode)]\n isVisited = [False for i in range(cntAllNode)]\n\n pq = []\n heapq.heapify(pq)\n heapq.heappush(pq,(0,a))\n dist[a] = 0\n while(len(pq)!=0):\n (curDist, u) = heapq.heappop(pq)\n if (not isVisited[u]):\n isVisited[u] = True\n for v in adjListAll[u]:\n if (dist[v] > dist[u] + adjListAll[u][v]):\n dist[v] = dist[u] + adjListAll[u][v]\n pred[v] = u\n heapq.heappush(pq, (dist[v],v))\n\n # find path dari a -> b\n nodeNow = b\n while(nodeNow!=-1):\n path.append(nodeNow)\n nodeNow = pred[nodeNow]\n path = path[::-1]\n \n return dist[b], path\n\n# fungsi untuk mengambil edge data dari file txt\ndef getEdgeDataFromFile(cityOption, cntAllNode):\n adjListAll = [{} for i in range(cntAllNode)]\n fEdge = open(f'../data/{cityOption}/EdgeData.txt', 'r').readlines()\n for i in range(cntAllNode):\n adjListAll[i][i] = 0\n\n for line in tqdm(fEdge):\n parseLine = [x for x in line.split()]\n u = int(parseLine[1])\n v = int(parseLine[2])\n weight = float(parseLine[3])\n adjListAll[u][v] = weight\n adjListAll[v][u] = weight\n \n return adjListAll\n\n# fungsi untuk inisialisasi sub graph\ndef initSubGraph(nodeKantor, listNodeTujuan, adjListAll, cntAllNode):\n listPath = [[[] for i in range(len(listNodeTujuan)+1)] for e in range(len(listNodeTujuan)+1)]\n adjMatrixSubGraph = [[inf for i in range(len(listNodeTujuan)+1)] for e in range(len(listNodeTujuan)+1)]\n\n listNodeTujuan.append(nodeKantor) #sementara aja\n\n for u in tqdm(listNodeTujuan):\n for v in listNodeTujuan:\n uMapped = mapNodeToIdx[u]\n vMapped = mapNodeToIdx[v]\n if (u==v):\n adjMatrixSubGraph[uMapped][vMapped] = 0\n listPath[uMapped][vMapped] = [u]\n else:\n if (isfinite(adjMatrixSubGraph[vMapped][uMapped])):\n adjMatrixSubGraph[uMapped][vMapped] = adjMatrixSubGraph[vMapped][uMapped]\n path = []\n for e in reversed(listPath[vMapped][uMapped]):\n listPath[uMapped][vMapped].append(e)\n else:\n dist, path = findShortestPath(u,v,adjListAll,cntAllNode)\n # print(dist)\n adjMatrixSubGraph[uMapped][vMapped] = dist\n listPath[uMapped][vMapped] = path \n\n \n\n listNodeTujuan.pop()\n\n return adjMatrixSubGraph, listPath\n\n# binary search, credit to geeksForGeeks\ndef binarySearch(arr, x, n):\n low = 0\n high = n-1\n while(low<=high):\n mid = (high+low)//2\n if arr[mid][0] < x: \n low = mid + 1\n elif arr[mid][0] > x: \n high = mid - 1\n else: \n return mid \n return -1\n\n# fungsi utama\ndef exec():\n # proses input\n print('masukkan pilihan kota (1. Oldenburg, 2. SanFrancisco): ', end='')\n optNumber = int(input())\n cityOption = \"\"\n cntAllNode = 0\n if (optNumber==2):\n cityOption = \"SanFrancisco\"\n cntAllNode = 174956\n else:\n cityOption = \"Oldenburg\"\n cntAllNode = 6105\n\n print('masukkan simpul kantor pusat:', end=' ')\n nodeKantor = int(input())\n print('masukkan jumlah titik tujuan:', end=' ')\n nTujuan = int(input())\n print('masukkan titik-titik tujuan (pisahkan dengan spasi)')\n listNodeTujuan = [int(x) for x in input().split()]\n print('masukkan jumlah kurir:', end=' ')\n jumlahKurir = int(input())\n \n #node mapping\n mapNodeToIdx[nodeKantor] = 0\n mapIdxToNode[0] = nodeKantor\n for i in range(nTujuan):\n mapNodeToIdx[listNodeTujuan[i]] = i+1\n mapIdxToNode[i+1] = listNodeTujuan[i]\n\n adjListAll = getEdgeDataFromFile(cityOption, cntAllNode)\n \n adjMatrixSubGraph, listPath = initSubGraph(nodeKantor, listNodeTujuan, adjListAll, cntAllNode)\n \n # print\n print(\"done\")\n\n # single TSP\n resSingleTSP = solveTSP(adjMatrixSubGraph, listPath, nodeKantor, listNodeTujuan, mapIdxToNode,mapNodeToIdx) \n print(resSingleTSP)\n\n # clustering\n\n # sort list node\n resSingleTSP.sort() # disort dulu supaya bisa di binary search nantinya\n print(resSingleTSP)\n idx = binarySearch(resSingleTSP,nodeKantor,len(resSingleTSP)) # cari idx node kantor\n orderedListNodeTujuan = []\n nextNode = resSingleTSP[idx][1] # simpen node setelah node kantor\n for i in range(1,len(resSingleTSP)):\n idx = binarySearch(resSingleTSP,nextNode,len(resSingleTSP))\n orderedListNodeTujuan.append(resSingleTSP[idx][0])\n nextNode = resSingleTSP[idx][1]\n print(orderedListNodeTujuan)\n\n # bagi node nya sama rata\n idxNow = 0\n listNodeEveryKurir = []\n for i in range(jumlahKurir):\n jumlahNode = nTujuan//jumlahKurir + (1 if i < nTujuan % jumlahKurir else 0) \n temp = []\n maxIdx = jumlahNode+idxNow\n while(idxNow \",pairNode[1],\" : \", listPath[mapNodeToIdx[pairNode[0]]][mapNodeToIdx[pairNode[1]]] )\n totalCost += adjMatrixSubGraph[mapNodeToIdx[pairNode[0]]][mapNodeToIdx[pairNode[1]]]\n print(f\"\\nTotal Cost : {totalCost}\")\n print(\"--------------------------------------------\")\n \n # draw map\n drawMap(adjListAll, mapNodeToIdx, listPath, jumlahKurir, resTSPkurir, cntAllNode, cityOption)\n\n\nif __name__ == \"__main__\":\n exec()","sub_path":"src/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"17128909","text":"# Authors: Karthik Ravindra Rao - raokarth@usc.edu & Praneet Kalluri - pkalluri@usc.edu\n\nimport random\nimport math\nimport numpy as np\nimport pylab as pl\n\nk = 3\nmaxiterations = 1000000\n\ndef getrandomcentrioids(listpoints,k):\n centroidlist = []\n for x in range(k):\n val = random.choice(listpoints)\n centroidlist.append(val)\n return centroidlist\n\ndef stopcheck(oldcentroids,updatedcentroids,iterations):\n if iterations > maxiterations:\n return True\n return oldcentroids == updatedcentroids\n\ndef pointlabel(listpoints,centroids):\n for pointvar in listpoints:\n x = pointvar['x']\n y = pointvar['y']\n mindist = 999999\n for centroidvar in centroids:\n cx = centroidvar['x']\n cy = centroidvar['y']\n dist = math.hypot(x-cx,y-cy)\n if dist < mindist:\n pointvar['lx'] = cx\n pointvar['ly'] = cy\n mindist = dist\n return listpoints\n\ndef makearray(listvariable):\n nplist = []\n for var in listvariable:\n tmplist =[]\n x = var['x']\n y = var['y']\n tmplist.append(x)\n tmplist.append(y)\n nplist.append(tmplist)\n return nplist\n\ndef calcentroid(listvar):\n a = np.array(listvar)\n var = np.mean(a, axis=0)\n return (var)\n\ndef getnewcentroid(labelpoint,centroids,k):\n centroidlist1 = []\n centroidlist2 = []\n centroidlist3 = []\n tmpvar = 0\n for centvar in centroids:\n cx = centvar['x']\n cy = centvar['y']\n for var in labelpoint:\n lx = var['lx']\n ly = var['ly']\n if ((cx == lx) and (cy ==ly)):\n if tmpvar == 0:\n centroidlist1.append(var)\n elif tmpvar == 1:\n centroidlist2.append(var)\n elif tmpvar == 2:\n centroidlist3.append(var)\n else:\n print('Point error!')\n tmpvar += 1\n nparr1=makearray(centroidlist1)\n nparr2=makearray(centroidlist2)\n nparr3=makearray(centroidlist3)\n newcentroid = []\n val1 = calcentroid(nparr1)\n tmpdict = {}\n x = val1[0]\n y = val1[1]\n tmpdict['x'] = x\n tmpdict['y'] = y\n newcentroid.append(tmpdict)\n val2 = calcentroid(nparr2)\n tmpdict = {}\n x = val2[0]\n y = val2[1]\n tmpdict['x'] = x\n tmpdict['y'] = y\n newcentroid.append(tmpdict)\n val3 = calcentroid(nparr3)\n tmpdict = {}\n x = val3[0]\n y = val3[1]\n tmpdict['x'] = x\n tmpdict['y'] = y\n newcentroid.append(tmpdict)\n return newcentroid\n\ndef printclusters(listpoints,centroids):\n tmpvar = 0\n for centvar in centroids:\n lxlist = []\n lylist = []\n cx = centvar['x']\n cy = centvar['y']\n for var in listpoints:\n lx = var['lx']\n ly = var['ly']\n if ((cx == lx) and (cy == ly)):\n lxlist.append(var['x'])\n lylist.append(var['y'])\n if tmpvar == 0:\n pl.plot(lxlist,lylist,'rx')\n elif tmpvar == 1:\n pl.plot(lxlist, lylist, 'gx')\n else:\n pl.plot(lxlist, lylist, 'bx')\n tmpvar += 1\n pl.plot(cx, cy, 'ko')\n pl.show()\n\ndef main():\n listpoints = []\n iterations = 0\n oldcentroids = None\n inputfile = open(\"clusters.txt\",\"r\")\n filecontent = inputfile.read().splitlines()\n for val in filecontent:\n datadict = {}\n indval = val.split(',')\n datadict['x'] = float(indval[0])\n datadict['y'] = float(indval[1])\n datadict['lx'] = -999999\n datadict['ly'] = -999999\n listpoints.append(datadict)\n centroids = getrandomcentrioids(listpoints,k)\n while not stopcheck(oldcentroids,centroids,iterations):\n oldcentroids = centroids\n iterations += 1\n labelpoint = pointlabel(listpoints,centroids)\n centroids = getnewcentroid(labelpoint,centroids,k)\n printclusters(listpoints,centroids)\n print('Centroids for K-means: ')\n for x in centroids:\n print(x)\n\nif __name__ == '__main__':\n main()","sub_path":"HW2/hw2-Kmeans.py","file_name":"hw2-Kmeans.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"150127832","text":"from datetime import datetime\nfrom django.core.urlresolvers import reverse\nfrom django.core.context_processors import csrf\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import auth\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.urlresolvers import reverse\nimport settings\nfrom forms import *\nfrom models import *\nfrom utils import *\nfrom utils.functions import *\n\ndef auxiliary_login_required(f):\n def wrap(request, *args, **kwargs):\n auxiliary_id = kwargs['auxiliary_id']\n auxiliary = Auxiliary.objects.get(pk = auxiliary_id)\n del kwargs['auxiliary_id']\n if auxiliary.person == request.user:\n return f(request, auxiliary, *args, **kwargs)\n else:\n return HttpResponseRedirect('/')\n \n wrap.__doc__ = f.__doc__\n wrap.__name__ = f.__name__\n return wrap\n\n@auxiliary_login_required\ndef index(request, auxiliary):\n roles = RoleList(request.user)\n roles.set_default(Auxiliary, auxiliary.id)\n return render_to_response('auxiliary/index.html', {\n 'roles': roles,\n 'auxiliary': auxiliary\n })\n \n@auxiliary_login_required\ndef report(request, auxiliary):\n roles = RoleList(request.user)\n roles.set_default(Auxiliary, auxiliary.id)\n report_descriptions = ReportDescription.objects.all()\n for report_description in report_descriptions:\n report_description.reports = Report.objects.filter(description = report_description).filter(group__auxiliary = auxiliary)\n return render_to_response('auxiliary/report.html', {\n 'roles': roles,\n 'auxiliary': auxiliary,\n 'report_descriptions': report_descriptions,\n })\n \n@auxiliary_login_required\ndef unmark_as_corrected(request, auxiliary, report_id):\n report = Report.objects.get(pk = report_id)\n if report.group.auxiliary != auxiliary:\n return HttpResponseRedirect('/')\n report.corrected = False\n report.save()\n url = reverse('prototipo.views_auxiliary.report', kwargs = {'auxiliary_id': auxiliary.id})\n return HttpResponseRedirect(url)\n \n@auxiliary_login_required\ndef mark_as_validated(request, auxiliary, report_id):\n report = Report.objects.get(pk = report_id)\n if report.group.auxiliary != auxiliary:\n return HttpResponseRedirect('/')\n if not report.corrected:\n return HttpResponseRedirect('/')\n report.validation_date = datetime.now()\n report.save()\n url = reverse('prototipo.views_auxiliary.report', kwargs = {'auxiliary_id': auxiliary.id})\n return HttpResponseRedirect(url)\n \n@auxiliary_login_required\ndef message(request, auxiliary):\n roles = RoleList(request.user)\n roles.set_default(Auxiliary, auxiliary.id)\n message_rings = []\n for group in auxiliary.group_set.all():\n group_message_rings = MessageRing.objects.filter(group = group).filter(include_assistant_and_auxiliary = True)\n message_rings.extend(group_message_rings)\n \n return render_to_response('auxiliary/message.html', {\n 'auxiliary': auxiliary,\n 'roles': roles, \n 'message_rings': message_rings,\n })\n@auxiliary_login_required\ndef message_details(request, auxiliary, message_ring_id):\n roles = RoleList(request.user)\n roles.set_default(Auxiliary, auxiliary.id)\n message_ring = MessageRing.objects.get(pk = message_ring_id)\n messages = message_ring.message_set.all().order_by('date')\n \n return render_to_response('auxiliary/message_details.html', {\n 'auxiliary': auxiliary,\n 'roles': roles, \n 'messages': messages,\n 'message_ring': message_ring,\n })\n\n@auxiliary_login_required\ndef message_add(request, auxiliary):\n roles = RoleList(request.user)\n roles.set_default(Auxiliary, auxiliary.id)\n \n if request.method == 'POST':\n form = AssistantAuxiliaryMessageRingForm(request.POST)\n if form.is_valid():\n message_ring = MessageRing()\n message_ring.group = form.cleaned_data['group']\n message_ring.category = form.cleaned_data['category']\n message_ring.include_coordinator = form.cleaned_data['include_coordinator']\n message_ring.include_assistant_and_auxiliary = True\n message_ring.include_group = form.cleaned_data['include_group']\n message_ring.title = form.cleaned_data['title']\n message_ring.save()\n \n message = Message()\n message.ring = message_ring\n message.body = form.cleaned_data['body']\n message.remitent = request.user\n message.save()\n \n send_notification_mails(auxiliary, message_ring)\n \n return HttpResponseRedirect(reverse('prototipo.views_auxiliary.message', kwargs = {'auxiliary_id': auxiliary.id}))\n else:\n form = AssistantAuxiliaryMessageRingForm.create_form(auxiliary)\n \n return render_to_response('auxiliary/message_add.html', {\n 'roles': roles,\n 'auxiliary': auxiliary,\n 'form': form,\n }, context_instance = RequestContext(request))\n \n@auxiliary_login_required\ndef message_reply(request, auxiliary, message_ring_id):\n roles = RoleList(request.user)\n roles.set_default(Auxiliary, auxiliary.id)\n \n if request.method == 'POST':\n form = MessageReplyForm(request.POST)\n if form.is_valid():\n message = Message()\n message.ring = MessageRing.objects.get(pk = message_ring_id)\n message.body = form.cleaned_data['body']\n message.remitent = request.user\n message.save()\n \n send_notification_mails(auxiliary, message.ring)\n \n return HttpResponseRedirect(reverse('prototipo.views_auxiliary.message_details', kwargs = {'auxiliary_id': auxiliary.id, 'message_ring_id': message_ring_id}))\n else:\n form = MessageReplyForm()\n return render_to_response('auxiliary/message_reply.html', {\n 'roles': roles,\n 'auxiliary': auxiliary,\n 'message_reply_form': form,\n }, context_instance = RequestContext(request))\n \ndef send_notification_mails(auxiliary, message_ring):\n send_new_message_mail(message_ring.group.assistant.person)\n if message_ring.include_group:\n group = message_ring.group\n for student in group.student_set.all():\n send_new_message_mail(student.person)\n if message_ring.include_coordinator:\n send_new_message_mail(auxiliary.course_instance.coordinator)\n","sub_path":"prototipo/views_auxiliary.py","file_name":"views_auxiliary.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167345490","text":"import os, sys, logging, datetime, time, random, simplejson, argparse\n\nsys.path.append('../')\n\nfrom app import db\nfrom app.models import *\n\nif __name__ == '__main__':\n r = Role()\n \n r.name = 'Administrator'\n \n # _allowed_b_level = []\n # _allowed_b = []\n # for b in BoundaryLevelDesc.query.all():\n # _allowed_b_level.append(b.id)\n # for b in Boundary.query.all():\n \n # _allowed_b.append( ( b.level_id, b.parent_id, b.id ) )\n \n # r.allowed_boundaries = {'BoundaryLevel' : _allowed_b_level, 'Boundaries' : _allowed_b}\n \n \n # _allowed_pg, _allowed_pg_map, _allowed_p = [], [], []\n # for pg in ProductGroup.query.all():\n # _allowed_pg.append((pg.parent_id, pg.id))\n \n # for pg_map in ProductGroupMap.query.all():\n # _allowed_pg_map.append((pg_map.group_id, pg_map.product_id))\n \n # for p in Product.query.all():\n # _allowed_p.append( (p.id) )\n \n # r.allowed_products = { 'ProductGroup' : _allowed_pg, 'ProductGroupMap' : _allowed_pg_map, 'Product' : _allowed_p }\n \n db.session.add(r)\n db.session.commit()\n \n u = UserAccount()\n u.username = 'localuser'\n u.account_status = AccountStatusType.ACTIVE\n u.password = 'localuser'\n # u.allowed_boundaries = r.allowed_boundaries\n # u.allowed_products = r.allowed_products\n # u.role_id = r.id\n u.auth_module_name = 'ruther_local'\n db.session.add(u)\n db.session.commit()\n\n ur = UserRole()\n ur.role_id = r.id\n ur.user_id = u.id\n db.session.add(ur)\n db.session.commit()\n \n # u = UserAccount()\n # u.username = 'ldapuser'\n # u.account_status = AccountStatusType.ACTIVE\n # u.password = 'ldapuser'\n # u.allowed_boundaries = r.allowed_boundaries\n # u.allowed_products = r.allowed_products\n # u.role_id = r.id\n # u.auth_module_name = 'ldap'\n # db.session.add(u)\n # db.session.commit()\n ","sub_path":"apps/scripts/create_user.py","file_name":"create_user.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"402704858","text":"#!/usr/bin/env python\n\nfrom os.path import join, realpath\nimport sys; sys.path.insert(0, realpath(join(__file__, \"../../\")))\n\nfrom collections import defaultdict\nfrom decimal import Decimal\nimport pandas as pd\nfrom typing import (\n List,\n Dict\n)\nimport unittest\n\nimport hummingsim\nfrom hummingsim.backtest.ddex_order_book_loader import DDEXOrderBookLoader\nfrom wings.clock import (\n Clock,\n ClockMode\n)\nfrom wings.events import (\n OrderBookTradeEvent,\n OrderBookEvent,\n)\nfrom wings.order_book import OrderBook\nfrom wings.order_book_message import (\n OrderBookMessage,\n OrderBookMessageType\n)\n\nfrom test import OrderBookUtils\nfrom wings.event_logger import EventLogger\n\n\nclass DDEXOrderBookLoaderUnitTest(unittest.TestCase):\n start_time: float = pd.Timestamp(\"2018-12-03\", tz=\"UTC\").timestamp()\n end_time: float = pd.Timestamp(\"2018-12-04\", tz=\"UTC\").timestamp()\n snapshot_time: float = pd.Timestamp(\"2018-12-03 01:19:15.563000\", tz=\"UTC\").timestamp()\n\n def setUp(self):\n self.clock: Clock = Clock(ClockMode.BACKTEST, start_time=self.start_time, end_time=self.end_time)\n self.order_book_loader: DDEXOrderBookLoader = DDEXOrderBookLoader(\"WETH-DAI\", \"WETH\", \"DAI\")\n self.order_book: OrderBook = self.order_book_loader.order_book\n self.clock.add_iterator(self.order_book_loader)\n\n def tearDown(self):\n self.order_book_loader.close()\n\n def test_messages_ordering(self):\n messages: List[OrderBookMessage] = self.order_book_loader.fetch_order_book_messages(self.start_time,\n self.end_time)\n snapshots: List[OrderBookMessage] = [m for m in messages if m.type.value == 1]\n diffs: List[OrderBookMessage] = [m for m in messages if m.type.value == 2]\n trades: List[OrderBookMessage] = [m for m in messages if m.type.value == 3]\n timestamps: pd.Series = pd.Series([m.timestamp for m in messages])\n\n timestamp_diffs: pd.Series = pd.Series(timestamps[1:].values - timestamps[0:-1].values)\n self.assertTrue(all(timestamp_diffs >= 0))\n self.assertGreater(timestamp_diffs.mean(), 0)\n self.assertGreater(len(snapshots), 0)\n self.assertGreater(len(trades), 0)\n self.assertGreater(len(diffs), 0)\n\n def test_order_book_snapshots(self):\n self.clock.backtest_til(int(self.snapshot_time))\n pre_bids, pre_asks = self.order_book.snapshot\n self.clock.backtest_til(int(self.snapshot_time) + 1)\n post_bids, post_asks = self.order_book.snapshot\n\n self.assertGreater(len(pre_bids), 0)\n matching, total = OrderBookUtils.compare_books(pre_bids, post_bids)\n self.assertLess(total - matching, 10)\n\n self.assertGreater(len(pre_asks), 10)\n matching, total = OrderBookUtils.compare_books(pre_asks, post_asks)\n self.assertLess(total - matching, 10)\n\n def test_order_book_diffs(self):\n start: int = int(self.snapshot_time) + 1\n end: int = int(self.snapshot_time) + 1800\n self.clock.backtest_til(start)\n pre_bids, pre_asks = self.order_book.snapshot\n self.clock.backtest_til(end)\n post_bids, post_asks = self.order_book.snapshot\n\n book_messages: List[OrderBookMessage] = [\n message\n for message in self.order_book_loader.fetch_order_book_messages(start - 1, end)\n if message.type in {OrderBookMessageType.DIFF, OrderBookMessageType.SNAPSHOT}\n ]\n bids_map: Dict[Decimal, Dict[str, Dict[str, any]]] = defaultdict(lambda: {})\n asks_map: Dict[Decimal, Dict[str, Dict[str, any]]] = defaultdict(lambda: {})\n\n for msg in book_messages:\n if msg.type is OrderBookMessageType.DIFF:\n book_map: Dict[Decimal, Dict[str, Dict[str, any]]] = bids_map\n if msg.content[\"side\"] == \"sell\":\n book_map = asks_map\n msg_type: str = msg.content[\"type\"]\n price: Decimal = Decimal(msg.content[\"price\"])\n order_id: str = msg.content[\"orderId\"]\n if msg_type == \"receive\":\n book_map[price][order_id] = msg.content\n elif msg_type == \"done\":\n if order_id in book_map[price]:\n del book_map[price][order_id]\n elif msg.type is OrderBookMessageType.SNAPSHOT:\n bids_map.clear()\n asks_map.clear()\n for bid_entry in msg.content[\"bids\"]:\n order_id: str = bid_entry[\"orderId\"]\n price: Decimal = Decimal(bid_entry[\"price\"])\n amount: str = bid_entry[\"amount\"]\n bids_map[price][order_id] = {\"orderId\": order_id, \"availableAmount\": amount}\n for ask_entry in msg.content[\"asks\"]:\n order_id: str = ask_entry[\"orderId\"]\n price: Decimal = Decimal(ask_entry[\"price\"])\n amount: str = ask_entry[\"amount\"]\n asks_map[price][order_id] = {\"orderId\": order_id, \"availableAmount\": amount}\n\n compare_bids: pd.DataFrame = OrderBookUtils.get_compare_df(pre_bids, post_bids, diffs_only=True)\n compare_asks: pd.DataFrame = OrderBookUtils.get_compare_df(pre_asks, post_asks, diffs_only=True)\n compare_bids.fillna(value=0.0, inplace=True)\n compare_asks.fillna(value=0.0, inplace=True)\n\n self.assertGreater(len(compare_bids), 0)\n self.assertGreater(len(compare_asks), 0)\n\n post_bids.set_index(\"price\", inplace=True)\n post_asks.set_index(\"price\", inplace=True)\n\n for price, orders in bids_map.items():\n total_amount: float = 0\n if len(orders) > 0:\n total_amount = sum(float(item[\"availableAmount\"]) for item in orders.values())\n if total_amount == 0:\n self.assertTrue(float(price) not in post_bids.index,\n f\"{price} should not exist in the post_bids snapshot.\")\n else:\n self.assertTrue(float(price) in post_bids.index,\n f\"{price} should exist in the post_bids snapshot.\")\n self.assertAlmostEqual(float(total_amount), post_bids.loc[float(price)].amount,\n f\"total amount for {price} should be {total_amount}.\")\n\n for price, orders in asks_map.items():\n total_amount: float = 0\n if len(orders) > 0:\n total_amount = sum(float(item[\"availableAmount\"]) for item in orders.values())\n if total_amount == 0:\n self.assertTrue(float(price) not in post_asks.index,\n f\"{price} should not exist in the post_asks snapshot.\")\n else:\n self.assertTrue(float(price) in post_asks.index,\n f\"{price} should exist in the post_asks snapshot.\")\n self.assertAlmostEqual(total_amount, post_asks.loc[float(price)].amount,\n f\"total amount for {price} should be {total_amount}.\")\n\n def test_order_book_trades(self):\n start: int = int(self.snapshot_time)\n end: int = int(self.snapshot_time) + 86400\n self.clock.backtest_til(start)\n\n event_recorder: EventLogger = EventLogger()\n self.order_book.add_listener(OrderBookEvent.TradeEvent, event_recorder)\n self.clock.backtest_til(end)\n\n trade_messages: List[OrderBookMessage] = [\n message\n for message in self.order_book_loader.fetch_order_book_messages(start, end)\n if message.type is OrderBookMessageType.TRADE\n ]\n\n events: List[OrderBookTradeEvent] = event_recorder.event_log\n self.assertGreater(len(trade_messages), 0)\n self.assertEqual(len(trade_messages), len(events))\n for trade_message, trade_event in zip(trade_messages, events):\n self.assertAlmostEqual(trade_message.timestamp, trade_event.timestamp)\n self.assertAlmostEqual(float(trade_message.content[\"price\"]), trade_event.price)\n self.assertAlmostEqual(float(trade_message.content[\"amount\"]), trade_event.amount)\n\n\nif __name__ == \"__main__\":\n hummingsim.set_data_path(realpath(join(__file__, \"../../data\")))\n unittest.main()\n","sub_path":"test/test_ddex_order_book_loader.py","file_name":"test_ddex_order_book_loader.py","file_ext":"py","file_size_in_byte":8382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47353595","text":"from wagtail.core.blocks import StreamBlock\n\nfrom theme.blocks import HeadingBlock, JumbotronBlock, ParagraphBlock\n\n\nclass HomeBodyBlock(StreamBlock):\n heading_block = HeadingBlock()\n paragraph_block = ParagraphBlock()\n class Meta:\n required = False\n block_counts = {\n 'heading_block': {'max_num': 1},\n 'paragraph_block': {'max_num': 1},\n }\n icon = ''\n template = \"home/blocks/home_body_block.html\"\n\n\nclass HomeJumbotronBlock(StreamBlock):\n jumbotron_block = JumbotronBlock()\n class Meta:\n required = False\n block_counts = {\n 'jumbotron_block': {'max_num': 1}\n }\n icon = ''\n template = \"home/blocks/home_jumbotron_block.html\"\n","sub_path":"home/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"214838593","text":"#!/usr/bin/python\nfrom collections import defaultdict\nimport sys\ntraining_file = open(sys.argv[1], \"r\")\ncounts = defaultdict(lambda: 0)\ntotal_count = 0\n\nfor line in training_file:\n line = line.strip()\n word_list = line.split(\" \")\n word_list.append(\"\")\n for word in word_list:\n counts[word] += 1\n total_count += 1\n\nmodel_file = open(\"train.txt\", \"w\")\nfor word, count in sorted(counts.items()):\n prob = float(counts[word]) / total_count\n model_file.write(\"%s %f\\n\" % (word, prob))\n\nmodel_file.close()\n","sub_path":"tuto01/pra01.py","file_name":"pra01.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"417973935","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport requests\n\ndef mkdir(path):\n path = path.strip()\n isExists = os.path.exists(path)\n if not isExists:\n print('创建文件夹:',path)\n os.makedirs(path)\n print('创建成功!')\n return True\n\n else:\n print('文件夹已经存在!')\n return False\n\n\ndef save_img(url,file_name):\n print('正在下载图片:',url)\n try:\n img = requests.get(url, timeout=5)\n except requests.exceptions.ConnectionError:\n print('【错误】图片无法下载')\n\n with open(file_name,'wb') as f:\n f.write(img.content)\n\ndef fn():\n mkdir('img')\n save_img('http://imgsrc.baidu.com/imgad/pic/item/9d82d158ccbf6c8154bdd5ccb63eb13533fa4008.jpg','img/1.jpg')\n\nfn()","sub_path":"py/crawer_libs.py","file_name":"crawer_libs.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627986397","text":"from django.shortcuts import render, redirect\nfrom .models import employee\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializer import employeeSerializer\n\n# Create your views here.\ndef insertform(request):\n context = {}\n return render(request, 'insertform.html', context)\n\ndef insert(request):\n emp = employee()\n emp.firstname = request.POST['fname']\n emp.lastname = request.POST['lname']\n emp.employeeid = request.POST['emp_id']\n emp.save()\n return redirect('getall')\n\ndef getall(request):\n employee1 = employee.objects.all()\n serializer = employeeSerializer(employee1, many=True)\n context = {\"data\": serializer.data}\n return render(request, 'loginsuccessful.html', context)\n\ndef updateform(request):\n context = {}\n return render(request,'updateform.html', context)\n\ndef updatecheck(request):\n emp = employee.objects.get(employeeid = request.POST['eid'])\n if emp is not None:\n context = {\n 'employeeid': emp.employeeid,\n 'fname': emp.firstname,\n 'lname':emp.lastname,\n }\n return render(request, 'updatecheckform.html', context)\n else:\n return render(request, 'updateform.html', {'error':'Emplyee id doesnot exist'})\n\ndef update(request):\n emp = employee.objects.get(employeeid=request.POST['emp_id'])\n emp.firstname = request.POST['fname']\n emp.lastname = request.POST['lname']\n emp.employeeid = request.POST['emp_id']\n emp.save()\n return redirect('getall')\n\n\ndef deleteform(request):\n context = {}\n return render(request, 'deleteform.html', context)\n\ndef deletecheck(request):\n emp = employee.objects.get(employeeid=request.POST['eid'])\n if emp is not None:\n context = {\n 'employeeid': emp.employeeid,\n 'fname': emp.firstname,\n 'lname':emp.lastname,\n }\n return render(request, 'deletecheckform.html', context)\n else:\n return render(request, 'deleteform.html', {'error':'Emplyee id doesnot exist'})\n\ndef delete(request):\n emp = employee.objects.get(employeeid=request.POST['emp_id'])\n return redirect('getall')\n\n","sub_path":"employee/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607459160","text":"import threading\nimport sublime\nimport sublime_plugin\n\nfrom .base import BaseBlame\nfrom .templates import blame_inline_phantom_css, blame_inline_phantom_html_template\nfrom .settings import (\n pkg_settings,\n PKG_SETTINGS_KEY_INLINE_BLAME_ENABLED,\n PKG_SETTINGS_KEY_INLINE_BLAME_DELAY,\n)\n\nINLINE_BLAME_PHANTOM_SET_KEY = \"git-blame-inline\"\n\n\nclass BlameInlineListener(BaseBlame, sublime_plugin.ViewEventListener):\n @classmethod\n def is_applicable(cls, settings):\n return pkg_settings().get(PKG_SETTINGS_KEY_INLINE_BLAME_ENABLED)\n\n def __init__(self, view):\n super().__init__(view)\n self.phantom_set = sublime.PhantomSet(view, INLINE_BLAME_PHANTOM_SET_KEY)\n self.timer = None\n self.delay_seconds = (\n pkg_settings().get(PKG_SETTINGS_KEY_INLINE_BLAME_DELAY) / 1000\n )\n\n def extra_cli_args(self, line_num):\n args = [\"-L\", \"{0},{0}\".format(line_num), \"--date=relative\"]\n return args\n\n def _view(self):\n return self.view\n\n def show_inline_blame(self):\n if self.view.is_dirty():\n # If there have already been unsaved edits, stop the git child process from being ran at all.\n return\n\n phantoms = []\n sels = self.view.sel()\n # @todo Support showing inline blame for multiple carets?\n # @body Maybe with a sanity check that there aren't too many (more than 10?)\n line = self.view.line(sels[0])\n if line.size() < 2:\n # avoid weird behaviour of regions on empty lines\n # < 2 is to check for newline character\n return\n pos = line.end()\n row, _ = self.view.rowcol(line.begin())\n anchor = sublime.Region(pos, pos)\n try:\n blame_output = self.get_blame_text(self.view.file_name(), line_num=row + 1)\n except Exception:\n return\n blame = next(\n (\n self.parse_line_with_relative_date(line)\n for line in blame_output.splitlines()\n ),\n None,\n )\n if not blame:\n return\n summary = \"\"\n # Uncommitted changes have only zeros in sha\n if blame[\"sha\"] != \"00000000\":\n try:\n summary = self.get_commit_message_first_line(\n blame[\"sha\"], self.view.file_name()\n )\n except Exception as e:\n return\n body = blame_inline_phantom_html_template.format(\n css=blame_inline_phantom_css,\n author=blame[\"author\"],\n date=blame[\"relative_date\"],\n qs_sha_val=blame[\"sha\"],\n summary_separator=\" · \" if summary else \"\",\n summary=summary,\n )\n phantom = sublime.Phantom(\n anchor, body, sublime.LAYOUT_INLINE, self.handle_phantom_button\n )\n phantoms.append(phantom)\n\n # Dispatch back onto the main thread to serialize a final is_dirty check.\n sublime.set_timeout(lambda: self.maybe_insert_phantoms(phantoms), 0)\n\n def maybe_insert_phantoms(self, phantoms):\n if not self.view.is_dirty():\n self.phantom_set.update(phantoms)\n\n def show_inline_blame_handler(self):\n self.view.erase_phantoms(INLINE_BLAME_PHANTOM_SET_KEY)\n if self.timer:\n self.timer.cancel()\n self.timer = threading.Timer(self.delay_seconds, self.show_inline_blame)\n self.timer.start()\n\n def on_selection_modified_async(self):\n self.show_inline_blame_handler()\n\n def on_post_save_async(self):\n # Redisplay the blame after the file is saved, because there will be\n # no call to on_selection_modified_async after save.\n self.show_inline_blame_handler()\n","sub_path":"src/blame_inline.py","file_name":"blame_inline.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442645298","text":"file = open('input-day2.txt','r')\r\nsum = 0\r\n\r\nfor line in file.readlines():\r\n row_min = 99999999\r\n row_max = 0\r\n for number in line.split('\\t'):\r\n if int(number) > row_max:\r\n row_max = int(number)\r\n if int(number) < row_min:\r\n row_min = int(number)\r\n sum += row_max - row_min\r\n\r\nprint(sum)\r\n\r\nfile = open('input-day2.txt','r')\r\nsum = 0\r\n\r\nfor line in file.readlines():\r\n for number1 in line.split('\\t'):\r\n for number2 in line.split('\\t'):\r\n if int(number1) != int(number2):\r\n if int(number1) % int(number2) == 0:\r\n sum += int(number1) / int(number2)\r\n\r\nprint(sum)\r\n","sub_path":"day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31280065","text":"import os\nimport json\n\nf = open('setting.json', encoding='utf-8')\nsetting_data = json.loads(f.read())\n\ndef obj_locate():\n res = [\"\", \"\"]\n\n for root, dirs, files in os.walk(\".\\\\pic\\\\\", topdown=False):\n for name in files:\n if -1 != name.find(f\"{setting_data['pic_prefix']}\") and -1 != name.find('.jpg'):\n if name >= res[0]:\n res[1] = res[0]\n res[0] = name\n continue\n if name >= res[1]:\n res[1] = name\n return res\n","sub_path":"os_file_search.py","file_name":"os_file_search.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"422914446","text":"# Remove Linked List Elements\n\n# Given the head of a linked list and an integer val, remove all the nodes of\n# the linked list that has Node.val == val, and return the new head.\n\n# Example 1:\n\n# Input: head = [1,2,6,3,4,5,6], val = 6\n# Output: [1,2,3,4,5]\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeElements(self, head: ListNode, val: int) -> ListNode:\n result = head\n while result and result.val == val:\n result = result.next\n curr = result\n while curr and curr.next is not None:\n if curr.next.val == val:\n curr.next = curr.next.next\n else: \n curr = curr.next\n return result","sub_path":"easy/remove_linked_list_elements.py","file_name":"remove_linked_list_elements.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"249497354","text":"from sql_connection import *\n\ndef get_total_by_type( p_type, p_date_min, p_date_max ):\n query = (\"SELECT pt_id FROM PACKAGE_TYPE WHERE pt_name='\"+p_type+\"'\")\n content = new_query( query ).fetchall()[0][0]\n query = (\"EXEC packageTypeAmount \"+str(content)+\", '\"+p_date_min+\"', '\"+p_date_max+\"'\")\n content = new_query( query ).fetchall()\n return content[0][1]\n\ndef get_total_amount( p_branch ):\n query = (\"EXEC branchTotalAmount '\"+p_branch+\"'\")\n content = new_query( query ).fetchall()\n return content[0][0]\n\ndef get_packages_per_client( p_date_min, p_date_max ):\n query = (\"EXEC packagesPerClient '\"+p_date_min+\"', '\"+p_date_max+\"'\")\n content = new_query( query ).fetchall()\n orders = []\n for i in content:\n orders += [{\"id_client\":i[0],\n \"name\":i[1],\n \"lname\":i[2],\n \"id_package_branch\":i[3],\n \"reception_date\":i[4] }]\n return str(orders)\n\ndef get_avg_packages( p_date_min, p_date_max ):\n query = (\"EXEC averageAmountPerClient '\"+p_date_min+\"', '\"+p_date_max+\"'\")\n content = new_query( query ).fetchall()\n clients = []\n for i in content:\n clients += [{\"name\":i[0],\"lname\":i[1],\"average\":i[2]}]\n return str(clients)","sub_path":"aplicacion/src/admin_controller.py","file_name":"admin_controller.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522741182","text":"# -*- coding: utf-8 -*-\nimport urllib.request # 요청을 보내서 html을 받아오는 lib\nimport os # 파일 생성 및 유지에 필요한 lib\nimport requests # get을 보내기 위해서 사용하는 lib\nimport re # 파일의 이름을 정규표현식을 사용하기 위해서 사용하는 lib\nimport time # 현재의 시간을 구하기 위해서 사용하는 lib\nimport datetime # 현재의 시간을 구하기 위해서 사용하는 lib\nimport hashlib # 파일제목을 hash를 통해서 재설정함\nimport socket # timeout 관련 예외처리 lib\nimport sys # sys.argv에 사용하기 위한 lib\nimport http # http.client.RemoteDisconnected오류를 처리하기 위한 lib\nimport ssl # ssl오류를 처리하기 위한 lib\nfrom bs4 import BeautifulSoup # 받아온 html을 분석하기 위한 lib\nimport json ## json으로 bing_request를 묶기 위해서 사용하는 lib\nimport argparse#입력변수를 parse하기위해서 사용하는 lib\nfrom _socket import gaierror#gaierror에러를 처리하기위해서 사용하는 lib\nimport io##utf-8로 파일을 저장시키기 위해서 io.open을 사용해주고 encoding='utf-8-sig'로 저장합니다.\nimport random\nfrom collections import OrderedDict#배열의 값들을 중복 제거하기 위한 lib\nsocket.setdefaulttimeout(15)#소켓의 timeout을 설정시켜놓음으로써 urlretrieve의 다운로드 도중에 끊기는 현상을 방지합니다.\n# 조건 :로그 폴더의 이름은 항상 logs이여야 한다.\nclass crawler_:\n def __init__(self):\n self.success_url_memory = [] # url을 파일에서 읽어와 메모리에 사용하도록 하는 list입니다. 모든 날짜를 받습니다.\n self.success_log_memory = [] # log를 메모리상에 올려서 append시켜서 사용하도록 하는 list입니다. 오늘 하루의 날짜를 받습니다. \n self.success_path_memory = [] # memory상에 성공한 path를 올리는 list입니다. 모든 날짜를 받습니다.\n self.total_log_memory = [] # log은 비교할 필요가 없기에 파일에서 읽어오지 않는다.\n # 두 개의 변수를 이용해서 메모리상에 url을 올려놓고\n # 1페이지가 넘어갈 때마다 일괄적으로 메모장에 작성을 하는 방식을 사용할 것 입니다.\n # 해당 텍스트들은 리스트를 사용하여 관리합니다.\n self.delete_log_memory=[]\n self.temp_total_log_memory=[]\n self.search_word_history_memory=[]#어떤 검색어를 입력했고 언제 실행이 됐으며 어떤 사이트를 사용했는 등에 관해서 저장하는 메모리.\n crawler_choice,DOWNLOAD_FOLDER_PATH,TARGET_FILE_NAME,TARGET_FILE_TYPE,freshness,AUTO_CHECK,DOWNLOAD_FILE_NUMBER,CLEAR_BOOL =self.parse_args()#크롤러 선택 변수, 폴더 경로 설정 변수, 파일 이름 변수, date에 관련된 변수\n self.freshness=freshness #Bing에서 사용하는 변수로 day,week,month,all이 옵니다.\n self.crawler_choice=crawler_choice[0] # daum | bing이 오는 변수. 크롤러를 선택하는 변수입니다.\n self.TARGET_FILE_NAME = TARGET_FILE_NAME[0] # 내가 검색하고자 하는 파일의 이름이 무엇인지 저장하는 변수입니다.\n self.TARGET_FILE_TYPE = TARGET_FILE_TYPE[0] # 내가 검색하고자 하는 파일의 유형이 무엇인지 저장하는 변수입니다.\n self.LOG_FILE_TYPE = TARGET_FILE_TYPE[0] # 로그에 작성되는 파일 타입입니다.\n self.extend_filetype_list=['XLSX','PPTX','DOCX']\n if(TARGET_FILE_TYPE[0].upper() in self.extend_filetype_list):\n self.LOG_FILE_TYPE=TARGET_FILE_TYPE[0]\n self.TARGET_FILE_TYPE=TARGET_FILE_TYPE[0][0:3]\n \n self.DOWNLOAD_FOLDER_PATH = self.download_folder_rename(DOWNLOAD_FOLDER_PATH[0]) # hwp파일을 담아놓는 폴더의 위치를 갖고 있는 변수입니다.\n self.LOG_FOLDER_PATH = self.DOWNLOAD_FOLDER_PATH + \"logs/\" # log를 담아놓을 수 있는 폴더의 위치를 갖고 있는 변수입니다.\n self.SUCCESS_LOG_FILE_NAME = '_success_log.txt' # 성공한 로그를 담아두는 파일의 이름을 지정하는 변수입니다.\n self.TOTAL_LOG_FILE_NAME = '_total_log.txt' # 실패한 로그를 담아두는 파일의 이름을 지정하는 변수입니다.\n self.SEARCH_WORD_HISTORY_FILE_NAME='_search_word_history.txt' #검색한 시간과 검색어, 검색 페이지, 검색 변수를 저장하는 로그이다.\n self.get_address = [] # def split_web에서 html연결을 통해서 얻어낸 url을 저장하는 배열이다.\n self.set_current_time() #현재의 시간을 가져오는 변수입니다.\n self.today_Ymd = self.today.strftime('%Y-%m-%d') # 오늘의 시간정보를 ymd식으로 표현합니다.\n self.today_YmdHMS = self.today.strftime('[%Y-%m-%d_%H:%M:%S]') # 오늘의 시간정보를 Ymd HMS로 표현입니다.\n self.SUCCESS_LOG_FILE_PATH = self.LOG_FOLDER_PATH + self.today_Ymd + self.SUCCESS_LOG_FILE_NAME # ex)c:\\경로\\2016-07-25_success_log_text.txt\n self.TOTAL_LOG_FILE_PATH = self.LOG_FOLDER_PATH + self.today_Ymd + self.TOTAL_LOG_FILE_NAME # 모든 로그 파일을 만들 경로를 설정합니다. ex)#c:\\경로\\2016-07-25_total_log_text.txt\n self.SEARCH_WORD_HISTORY_FILE_PATH = self.LOG_FOLDER_PATH + self.today_Ymd + self.SEARCH_WORD_HISTORY_FILE_NAME # 검색어의 히스토리를 저장하는 로그입니다. \n self.HEADER = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1', 'Accept-language':'fr'} # request.get에 보내기 위한 header저장 변수입니다. \n self.offset='0'#bing에서 사용하는 skip변수\n self.log_file_reset() # 해당 함수는 다운로드 폴더를 만들고 로그파일,폴더,다운로드 폴더를 만듭니다. \n self.success_log_temp_file_path, self.success_log_will_delete_file_path = self.atomic_create_file_reset(self.SUCCESS_LOG_FILE_PATH) # 해당 함수는 success_log_file이 atomic_create_file 도중에 종료시에 exist_file이 사라지고 will_delete만 있는 것을 정정합니다.\n self.total_log_temp_file_path, self.total_log_will_delete_file_path = self.atomic_create_file_reset(self.TOTAL_LOG_FILE_PATH) # 해당 함수는 total_log_file이 atomic_create_file 도중에 종료시에 exist_file이 사라지고 will_delete만 있는 것을 정정합니다.\n self.search_word_history_temp_file_path, self.search_word_history_delete_file_path = self.atomic_create_file_reset(self.SEARCH_WORD_HISTORY_FILE_PATH)\n self.success_url_file_to_memory() # 메모리에 올려서 사용을 하기위해서 사용해줍니다. #해당 함수는 self.success_url_memory의 내용을 채워줍니다. 한 번만 실행됩니다.\n self.log_file_to_memory() # 해당 함수는 self.success_log_memory의 내용을 채워줍니다. 한 번만 실행됩니다. \n self.success_path_file_to_memory() # self.success_path_file_to_memory는 성공한 로그의 파일 경로들을 list형태로\n # self.success_path_memory를 채워줍니다. 한 번만 실행됩니다. \n if(CLEAR_BOOL==True):\n self.check_disk(self.success_path_memory) # success_path_memory에는 모든 날짜의 파일 경로를 갖고 있습니다. 만약 이 momory에 없이 폴더 안에 파일이 있다면 삭제를 합니다. \n self.AUTO_CHECK=AUTO_CHECK #쿼리문을 자동적으로 입력해서 무한적으로 검색하기 위함을 사용하는 변수를 체크하는 변수\n self.last_page_bool=False #마지막페이지를 체크하는 bool변수입니다.\n self.bool_Bing_finish=False #마지막페이지를 체크하는 bool변수입니다.\n self.DOWNLOAD_FILE_NUMBER=int(DOWNLOAD_FILE_NUMBER)#다운로드 받을 파일의 수를 저장하는 변수입니다.\n self.DOWNLOAD_FILE_COUNT=0 #다운로드 받을 파일의 수를 체크하는 변수입니다.\n print(\"crawler\")\n \n def parse_args(self):\n parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description=\"크롤러 옵셜 설정 변수\",usage='''%(prog)s -s [bing|daum] -p FILE_PATH -q QUERY_STRING -t FILE_TYPE -d [all|month|week|day] (-a) (-n NUMBER) \\n\\nexample:%(prog)s -s bing -p c:/download/ -q 신문 -t hwp -d week\\nexample:%(prog)s -s bing -p /home/abc/download/ -q 신문 -t hwp -d day -a -n 500\\nexample:%(prog)s -s daum -p /home/abc/download/ -q 신문 -t hwp\n \\nexample:%(prog)s -s google -p e:/example/ -q \"\" -a -t hwp -d day -n 500 -d week''')\n parser.add_argument('-s','--site',nargs=1,action=\"store\",choices=['bing','daum','google'],required=True,metavar='SITE',help ='검색할 사이트 명. -s [daum | bing | google]')\n parser.add_argument('-p','--path',nargs=1, action='store',required=True,metavar='PATH', help = '저장시킬 폴더명. -p c:/download/')\n parser.add_argument('-q','--query',nargs=1, action='store',required=True,metavar='SEARCH_WORD', help = '검색할 검색어 명. -q \"검색어명\"')\n parser.add_argument('-t','--type',nargs=1, action='store',required=True,metavar='FILE_TYPE', help = '검색할 검색어의 파일 유형. -t hwp doc ppt xls ..')\n parser.add_argument('-d','--date', action='store',metavar='DATE',choices=['all','month','week','day'],help = '''bing, google에서만 사용가능. 검색어의 파일date를 설정한다. 필수값은 아닙니다.\\n-d all, month, week, day''')\n parser.add_argument('-a','--auto', action='store_true',help='''자동검색을 사용하기 위해서 -a,--atuo를 붙이면 query문에 자동적으로 단어를 입력해줍니다. 필수값은 아닙니다.''')\n parser.add_argument('-n','--number',default=1000,action='store',metavar='NUMBER',help='''몇 개의 파일을 받을 지에 대해서 카운트해줍니다. 기본 값은 1000입니다. 필수값은 아닙니다.''')\n parser.add_argument('-c','--cleardisk',action='store_true',help='''디스크를 검사하기 위해서 -c,--clear를 붙이면 시작할 때 디스크를 검사합니다. 디스크 검사 대상은 로그에 없는 파일을 삭제하거나 파일 용량이 3KB이하인 파일을 삭제합니다.''')\n parser.print_help()\n args = parser.parse_args()\n try:\n if(args.site[0]=='daum'):\n if args.date[0]:\n print(args.site[0]+\"에서 입력하신 -d 옵션인 \"+args.date+\"는 사용되지 않습니다.\")\n time.sleep(2)\n except:\n pass\n if(args.site[0]=='bing' or args.site[0]=='google'):\n if(args.date==None):\n print(\"-d [all, month, week, day] --date [all, month, week, day]를 입력하여 주십시오.\")\n exit(1)\n print(args.site,args.path,args.query,args.type,args.date)\n return args.site, args.path, args.query, args.type, args.date, args.auto, args.number, args.cleardisk\n \n def atomic_create_file_reset(self, exist_file_path): # atomic_create_file함수를 하던 도중에 종료시, 불온전한 경로들이 전개됩니다. #해당 함수는 경로를 바로잡아주기 위해서 사용합니다. \n temp_file_path = re.sub(self.today_Ymd, 'temp_' + self.today_Ymd, exist_file_path) # c:/경로/2016-07-25_success_log_text.txt -> c:/경로/temp_2016-07-25....txt\n will_delete_file_path = re.sub(self.today_Ymd, 'will_delete_file_' + self.today_Ymd, exist_file_path) # c:/경로/2016-07-25_success_log_text.txt -> c:/경로/will_delete_file_2016-07-25....txt \n if(os.path.isfile(temp_file_path)): # 경우의 수 두 가지이다. temp파일이 생성되고 기존exist가 will로 안바뀐 상태와 temp파일이 생성되고 exist가 will_delelte가 될 때 이다. \n os.remove(temp_file_path) # 우선 1번 temp파일이 있고 exist가 생성시에는 항상 temp파일은 지워줘야 하기 때문에 os.remove()를 이 함수에 들어오면 실행시킨다.\n if(os.path.isfile(will_delete_file_path)): # 2번은 exist가 will_delete가 될 때, exist가 존재하지 않을 때이다. 이 때 고려해야 할 것은 폴더를 새로만들어서 어떤 텍스트파일도 없을 경우이다. \n if(os.path.isfile(exist_file_path) == False): # 그렇기 때문에 exist_file_path이 없는 경우에 관한 if문이 밖으로 나오게 된다면 폴더에 빈 상태일 때 의도하지 않은 상황이 나타날 것이다. \n os.rename(will_delete_file_path, exist_file_path) # 그래서 will_delete로 검사를 한 후에 있다는 것이 확인되면 한번 더 기존 log파일이 없는 것을 확인한 후에 rename을 해야한다.\n else: # temp_file이 존재하고 exist_file이 존재하고 will_delete_file이 존재할 경우\n os.remove(will_delete_file_path)\n \n return temp_file_path, will_delete_file_path\n \n def atomic_create_file(self, exist_file_path, temp_file_path, will_delete_file_path, memory):\n # 파일을 write하는 도중에 오류를 방지하기 위해서, temp파일을 만들어서 작성을 하고 temp파일이 작성이 된다면\n # 기존 파일의 이름을 바꾼 후에 temp파일을 기존 파일의 이름으로 바꾸는 과정을 하는 함수입니다.\n with io.open(temp_file_path, \"w\",encoding='utf-8-sig') as log_file:\n #utf-8로 파일을 저장시키기 위해서 io.open을 사용해주고 encoding='utf-8-sig'로 저장합니다.\n # 성공한 로그 파일을 불러와 글을 작성합니다.\n for log_line in memory:\n # crawler객체에서 success_log_memory 리스트를 가져와서 한 줄 한 줄 읽습니다.\n log_file.write(log_line)\n # 성공한 로그의 파일 경로에 있는 log 파일에 각 한 줄 한 줄을 작성합니다.\n os.rename(exist_file_path, will_delete_file_path) # 지금 존재하는 파일을 삭제할 파일로 변경합니다.\n os.rename(temp_file_path, exist_file_path) # temp파일을 존재할 파일로 이름을 변경합니다. #os.rename은 os.rename(기존 경, 바꿀 경로)입니다.\n os.remove(will_delete_file_path) # temp파일이 기존 파일로 되었다면 지우기로 할 파일을 지워줍니다.\n \n def set_current_time(self): # 현재의 시간을 구하는 함수입니다.\n # 현재 시간을 구하는 함수입니다. #print_switch값이 True가 되면 현재 시간을 출력해줍니다.\n self.today = datetime.datetime.now() # 현재 시간에 대한 객체를 생성합니다.\n self.today_Ymd = self.today.strftime('%Y-%m-%d') # 오늘의 시간정보를 ymd로 표현\n self.today_YmdHMS = self.today.strftime('[%Y-%m-%d_%H:%M:%S]') # 오늘의 시간정보를 Ymd HMS로 표현\n \n def recode_current_time(self, search_word='',url='', file_path=''): # 현재 시간을 출력하고 log_text에 기록하는 함수입니다.\n # 반환 값으로 success_log_text와 fail_log_text를 반환합니다.\n self.set_current_time() # 현재 시간 정보를 가져옵니다.\n print(\"다운로드 time:\", self.today_YmdHMS) # 현재 시간을 출력합니다.\n success_log_text = self.today_YmdHMS + \"\\tDone\\t\" + search_word +\"\\t\" +url + \"\\t\" + file_path + \"\\n\"\n # 성공했을 때 작성해야할 로그의 내용\n # [현재 시간] \\t url \\t file_path\n fail_log_text = self.today_YmdHMS + \"\\tFail\\t\" + search_word + \"\\t\" + url + \"\\tERROR\\t\"\n # 실패했을 때 작성해야할 로그의 내용\n # [현재 시간] \\t url \\t file_path \\t err내용\n return success_log_text, fail_log_text\n\n def success_url_file_to_memory(self): # 성공한 로그파일의 url을 읽어오는 함수입니다.\n # 해당 함수는 __init__할 때만 사용되며 추후에 url값을 비교하기 위해서 사용합니다.\n # memory에서는 모든 날짜의 url을 갖고 있습니다.\n # 로그를 기록할 때에는 url값을 비교하여 이미 존재하는 url은 다운로드 받지 않게 합니다. 따라서 로그를 작성하는 것에는 영향을 미치지 않습니다.\n allday_success_log_file_list = self.allday_log_file_to_a_day_log_file() # allday_log_file_to_a_day_log_file은 폴더안의 모든 success_log의 경로값을 반환해줍니다.\n for a_day_success_log_file in allday_success_log_file_list: # 모든 날의 success_log값에서 하루의 success_log의 파일의 제목값을 하나씩 돌립니다.\n with open(a_day_success_log_file, \"rb\") as a_day_success_log_file_read: # a_day_success_log_file=하루의 log파일 제목값을 읽기모드로하여, a_day_success_log_file_read라는 파일 객체를 생성을 해줍니다.\n for text_line in a_day_success_log_file_read: # 하루의 로그 파일을 읽어서 한 줄 한 줄 for문을 돌리고 밑의 내용은 url을 읽어오는 과정입니다. \n try:\n temp_text = re.findall(\"http.+\\t\",str(text_line.decode('utf-8')))\n result_text = re.sub('\\t', '', temp_text[0])\n # 우리는 \\t를 사용할 필요가 없기 때문에 마지막 단어 \\t를 삭제하여줍니다. 또한 log가 불순물이 있을 수 있습니다.\n # 그럴 시에 그 열은 list에서 []로 처리되어 공백이기에 c[0]을 호출합니다. 이 때 []는 아무 내용이 없기 때문에\n # 오류를 출력합니다. 그렇기 때문에 except처리를 해줍니다.\n self.success_url_memory.append(result_text)\n # 메모리에 올리기 위해서 각 url을 리스트로 처리해줍니다. 이것을 갖고 다운 받을 url과 비교 파일 경로를 설정할 url과 비교를 합니다.\n except IndexError: # 없을 경우\n pass\n \n def success_path_file_to_memory(self): # 성공한 로그파일의 path를 읽어오는 함수입니다.\n # 해당 함수는 __init__할 때만 사용되며 시작시에 path값으로 파일들을 삭제할 때만 사용됩니다.\n allday_success_log_file_list = self.allday_log_file_to_a_day_log_file() # allday_log_file_to_a_day_log_file은 폴더안의 모든 log의 경로값을 반환해줍니다.\n for a_day_success_log_file in allday_success_log_file_list: # 모든 날의 success_log값에서 하루의 success_log의 파일의 제목값을 하나씩 돌립니다.\n with open(a_day_success_log_file, \"r\",encoding='utf-8') as a_day_success_log_file_read: # a_day_success_log_file=하루의 log파일 제목값을 읽기모드로하여, a_day_success_log_file_read라는 파일 객체를 생성을 해줍니다.\n # 성공한 로그파일의 파일 경로를 읽어옵니다.\n for text_line in a_day_success_log_file_read:\n try:\n temp_text=re.split(\"[\\t\\n]\",text_line)#\\t나 \\n을 지점으로 split을 함.\n #split을 했을 때 4번째에 파일경로가 오기에 인덱스 3번 호출\n self.success_path_memory.append(temp_text[4]) # success_path_memory에 넣어줍니다.\n except IndexError: # 없을 경우\n pass\n \n def allday_log_file_to_a_day_log_file(self): # 로그 폴더내의 모든 파일을 가져와서 success_log_file을 뽑아내는 함수입니다.\n allday_success_log_file_list = []\n allday_log_file = os.listdir(self.LOG_FOLDER_PATH)\n # 성공한 경로의 파일을 메모리로 올리는 함수입니다. 해당 함수는 __init__할 때만 사용되며 추후에 파일 경로를 비교하고 만약\n # log파일의 내용과 다르다는 것을 파일이 존재하지 않는다는 것으로 인식해서 삭제시킵니다.\n # 해당 path는 여러 날짜의 log를 다 검사해야합니다.\n for a_day_log_file in allday_log_file: # 모든 날의 로그파일을 for문을 돌려서 하루의 로그파일을 뽑아냅니다.\n if('success' in a_day_log_file): # 모든 날의 로그파일 중에서 success가 들어갔다면\n allday_success_log_file_list.append(self.LOG_FOLDER_PATH + a_day_log_file) # allday_success_log_file_list에 그 날을 넣어줍니다. \n return allday_success_log_file_list \n \n def make_search_word_history_log(self,page_number,memory,freshness):\n self.set_current_time()\n log_text=str(self.today_YmdHMS)+\"\\t\"+self.crawler_choice+\"\\t\"+self.TARGET_FILE_NAME+\"\\t\"+self.LOG_FILE_TYPE+\"\\t\"+str(page_number)+\"\\t\"+freshness+\"\\n\"\n memory.append(log_text)\n return memory\n \n def log_file_to_memory(self): # 로그 파일을 메모리에 올리는 함수입니다.\n self.write_to_memory(self.SUCCESS_LOG_FILE_PATH, self.success_log_memory) # success_log_file의 경로와 메모리를 입력시키면 경로에 대한 파일 객체를 생성해 메모리에 올립니다.\n self.write_to_memory(self.TOTAL_LOG_FILE_PATH, self.total_log_memory) # total_log_file의 경로와 메모리를 입력시키면 경로에 대한 파일 객체를 생성해 메모리에 올립니다.\n self.write_to_memory(self.SEARCH_WORD_HISTORY_FILE_PATH,self.search_word_history_memory)\n \n def write_to_memory(self, log_file_path, log_memory): # 로그 파일을 메모리에 올리는 기능을 하는 함수입니다.\n with open(log_file_path, \"r\",encoding='utf-8') as log_file_read: # 모든 로그파일을 읽어옵니다. 읽어온 것을 file_read객체를 생성해줍니다. \n for text_line in log_file_read:\n log_memory.append(text_line) # 각 객체를 한 줄 씩 읽어와서 total_log_memory에 입력시킵니다.\n \n def check_url(self, url): # 고유한 성질을 갖는 url을 체크합니다. #변수로 받은 url을 memory의 url과 비교해서 True/False로 존재여부를 반환합니다.\n return_value = url in self.success_url_memory # 메모리에 올라온 url중 성공한 것을 temp_url이라 지정합니다.\n return return_value # True/False 반환\n \n def check_disk(self, log_file_path): # 다운로드 폴더의 list를 만들어서 메모리에 올라온 파일 경로와 비교해서 메모리에 없는 파일이 있다면 삭제합니다.\n # 디스크 안에 파일이 log에 있는 파일 경로들 중 하나가 아니라면 디스크 안에 파일을 삭제한다. 처음 시작했을 때 file_path들이 메모리 상에 올라오고\n # 파이썬 라이브러리를 통해서 폴더 안의 내용들을 각 값들로 하여서 그 값들을 메모리의 값과 비교하는 구조이다.\n files_in_folder = os.listdir(self.DOWNLOAD_FOLDER_PATH)#폴더안에 파일들을 리스트로 저장시킨다.\n # os.listdir은 list형식으로 해서 폴더 안의 내용들을 return한다. 반환값은 파일명만 반환한다. 따라서 파일 경로를 지정해줘야한다.\n print(\"잠시만 기다려주십시오. 디스크를 검사하고 있습니다.\\n\")\n for index_number_A in range(len(files_in_folder)):# 폴더 안에 있는 파일들을 0번인덱스부터 불러냅니다.\n myfile_check = False # 내가 갖고 있는 파일들을 True/False로 구분합니다.\n #files_in_folder[index_number_A] #파일만 있는 경로\n #self.DOWNLOAD_FOLDER_PATH + files_in_folder[index_number_A]#파일의 절대경로\n try:#해당 라인부터는 용량이 3KB이하인 파일을 삭제하는 코드입니다.\n if(os.path.getsize(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A])<=3072):#폴더안의 파일들을 index순으로 뽑아내서 3072byte이하라면 삭제합니다.\n print(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A]+\" :%dByte 파일 삭제.\\n\"%os.path.getsize(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A]))#print\n os.remove(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A])#해당 파일들을 삭제합니다.\n self.find_del_log(files_in_folder[index_number_A])#파일을 지우고 또한 로그도 지워야하기 때문에 해당 파일경로를 delete_log_memory에 저장시켜놓습니다.\n except (PermissionError,FileNotFoundError,IndexError) as e:#만약 파일을 찾지 못 할 경우에 오류를 출력합니다.\n print(e) \n for index_number_B in range(len(log_file_path)): # 메모리에 올라온 파일 list들을 0번 인덱스부터 불러냅니다. \n myfile_check = files_in_folder[index_number_A] in self.split_log_file_path_slash(log_file_path[index_number_B]) # 메모리에 올라온 파일 경로들과 폴더안에 있는 파일을 비교합니다. 만약 둘이 같다면 True로 반환합니다.\n if myfile_check: # 둘이 같다면 break.\n break \n if(os.path.isdir(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A])): # 검사하는 것들 중에 폴더라면 삭제하지 않습니다.\n myfile_check = True \n if(myfile_check == False): # myfile_check가 False인 것들을 삭제합니다.\n try:\n print(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A], \":삭제.\\n\") \n os.remove(self.DOWNLOAD_FOLDER_PATH+files_in_folder[index_number_A])\n except:\n pass # os.remove는 해당 특정경로의 폴더안에 폴더를 제외하고 파일들만 삭제합니다.\n \n temp_log_file_path_list,base_log_file_path_list,will_delete_log_file_path_list=self.write_total_log_to_log_file()#위의 로그들을 로그파일로 작성합니다.\n self.change_temp_file_to_current_file(temp_log_file_path_list,base_log_file_path_list,will_delete_log_file_path_list)\n exit(1)\n def split_log_file_path_slash(self,log_file_path):\n split_log_file_path=log_file_path.split('/')\n only_log_file_name=split_log_file_path[-1]\n return only_log_file_name\n \n def find_del_log(self, path_log):#로그를 입력받으면 __init__에 있는 메모리에 append시킵니다.\n self.delete_log_memory.append(path_log)\n \n def write_total_log_to_log_file(self):#삭제할 로그를 제외하고 모든 로그를 로그파일로 작성합니다. #리턴 값으로 atomic 저장할 로그 경로들을 받습니다.\n current_log_path_list=[]#현재 로그 파일 경로를 리스트로 저장시킵니다.\n temp_log_path_list=[] #temp 로그 파일 경로를 리스트로 저장시킵니다.\n will_del_log_path_list=[]#앞으로 삭제할 로그 파일 경로를 리스트로 저장시킵니다.\n log_list_in_folder=os.listdir(self.LOG_FOLDER_PATH)#로그 폴더 경로를 입력시, 해당 파일 경로의 파일 리스트를 리스트로 저장합니다.\n for log_number in range(len(log_list_in_folder)):#로그 파일의 갯수만큼 파일을 비교합니다.\n if not(os.path.isdir(self.LOG_FOLDER_PATH+log_list_in_folder[log_number])): #폴더가 아니라면 실행하는 if문 \n base_log_file_path=self.LOG_FOLDER_PATH+log_list_in_folder[log_number]#기존 로그 파일의 경로\n temp_log_file_path=self.LOG_FOLDER_PATH+'temp'+log_list_in_folder[log_number]#바꿔서 temp로 저장할 로그 파일 경로\n will_del_log_file_path=self.LOG_FOLDER_PATH+'will'+log_list_in_folder[log_number]#기존 로그 파일을 삭제시키기 위해 임시로 저장하는 파일 경로\n self.append_log_path_list(base_log_file_path, current_log_path_list)#기존 로그 파일 경로를 로그 파일 경로를 저장시켜놓는 리스트에 저장시킵니다.\n self.append_log_path_list(temp_log_file_path,temp_log_path_list) #temp 로그 파일 경로를 로그 파일 경로를 저장시켜놓는 리스트에 저장시킵니다.\n self.append_log_path_list(will_del_log_file_path, will_del_log_path_list)#will_del 로그 파일 경로��� 로그 파일 경로를 저장시켜놓는 리스트에 저장시킵니다.\n with io.open(base_log_file_path,\"r+\",encoding='utf-8-sig') as log_file:#기존 로그를 읽기 위해서 객체를 생성합니다.\n for log_file_line in log_file.readlines():#한 줄 씩 나눈 list를 for문을 통해서 한 줄의 string을 불러냅니다.\n self.temp_total_log_memory.append(log_file_line)#위에서 나눈 string들을 메모리에 저장시킵니다. \n with io.open(base_log_file_path,\"r+\",encoding='utf-8-sig') as log_file,io.open(temp_log_file_path,\"w\",encoding='utf-8-sig') as temp_log_file:\n #기존 로그를 읽어서 파일 경로만 따기 위해서 'r+'을 통해서 파일 객체를 생성해주고, 지울 log가 저장된 메모리와 기존 로그의 파일 경로를 비교해서 삭제한 로그들을 temp로 저장시키기 위해서 파일 객체를 생성해줍니다.\n for log_file_line in log_file.readlines():#기존 log파일을 한 줄씩 읽습니다.\n split_log_file_line=log_file_line.split(\"\\t\")#한 줄을 \\t을 기준으로 split을 해줍니다. \n for index_ in range(len(self.delete_log_memory)):#지워야 할 로그의 갯 수 만큼 index를 잡아줍니다.\n try:\n if(self.delete_log_memory[index_] in split_log_file_line[4]):#0번 인덱스부터 n-1번 인덱스까지 비교를 하는데 한 개 한 개씩 \\t를 기준으로 나눈 line에 path와 같다면\n self.temp_total_log_memory.remove(log_file_line)#해당 로그의 라인을 삭제시켜줍니다.\n except:\n pass\n self.temp_total_log_memory=self.remove_overlap(self.temp_total_log_memory)#여러번 for문을 통해서 여러번 작성이 되기 대문에 중복을 제거해줍니다.\n for aday_total_log in self.temp_total_log_memory:#삭제하고 난 후에 메모리에 저장 시킨 한 줄 한 줄을 리스트로 저장한 것을 다시 한 줄 한 줄 작성을 해줍니다.\n temp_log_file.write(aday_total_log)\n del self.temp_total_log_memory[:]#다음에 들어오게 될 경로를 사용하기 위해서 해당 메모리를 삭제해줍니다.\n return temp_log_path_list,current_log_path_list,will_del_log_path_list#리턴 값으로는 로그 파일들이 들어있는 경로의 list를 반환합니다.\n\n def append_log_path_list(self,log_path,log_path_list):\n log_path_list.append(log_path)\n return log_path_list\n \n def change_temp_file_to_current_file(self,temp_log_list,current_log_list,will_delete_log_list):\n for index_ in range(len(current_log_list)):\n os.rename(current_log_list[index_], will_delete_log_list[index_]) # 지금 존재하는 파일을 삭제할 파일로 변경합니다.\n os.rename(temp_log_list[index_], current_log_list[index_]) # temp파일을 존재할 파일로 이름을 변경합니다. #os.rename은 os.rename(기존 경, 바꿀 경로)입니다.\n os.remove(will_delete_log_list[index_]) # temp파일이 기존 파일로 되었다면 지우기로 할 파일을 지워줍니다.\n \n def download_folder_rename(self, argv_text): # cmd로 받는 파일 경로를 오류없이 처리하기 위한 함수입니다.\n if(argv_text[-1]!='/'):\n print(\"파일 경로는 c:/example/나 /home/downloads/와 같이 /를 이용하여 합니다.\")\n sys.exit()\n return argv_text\n \n def log_file_reset(self): # 로그 파일을 작성하기 위해 폴더 및 txt파일을 만드는 함수입니다. 시작시에만 한 번 사용 되어서 초기 경로 설정을 해줍니다. \n # 1. A=self.DOWNLOAD_FOLDER_PATH B=self.SUCCESS_LOG_FILE_PATH\n # 2. A=self.LOG_FOLDER_PATH B=self.TOTAL_LOG_FILE_PATH \n self.some_folder_create(self.DOWNLOAD_FOLDER_PATH) \n self.some_folder_create(self.LOG_FOLDER_PATH)\n self.log_file_create(self.SUCCESS_LOG_FILE_PATH)\n self.log_file_create(self.TOTAL_LOG_FILE_PATH)\n self.log_file_create(self.SEARCH_WORD_HISTORY_FILE_PATH)\n \n def some_folder_create(self, some_folder_path): # 경로를 입력하면 그 경로에 대한 폴더를 확인하고 없다면 만듭니다.\n if os.path.isdir(some_folder_path): # 같은 경로에 다운로드를 할 폴더가 있는지 확인합니다.\n print(some_folder_path, \"에 폴더 존재\")\n else:\n try:\n os.mkdir(some_folder_path) # 해당 경로에 hwp다운로드를 담아놓는 폴더를 생성합니다.\n print('폴더 생성\\n')\n except: # 해당 경로를 가지 못 해, 폴더를 만들지 못 할 경우 일어나는 오류입니다. \n print(\"폴더 생성 오류\") \n \n def log_file_create(self, some_log_file_path): # 경로를 입력하면 해당 경로에 변수로 받은 이름을 가진 파일이 없다면 만들거나 존재한다면 확인합니다.\n with io.open(some_log_file_path, \"a\",encoding='utf-8-sig') as log_file_info: # 해당 경로 밑에날짜 이름을 가진 파일을 만들어 줍니다.\n #utf-8로 파일을 저장시키기 위해서 io.open을 사용해주고 encoding='utf-8-sig'로 저장합니다.\n print(\"로그파일 설정\\n\") \n \n def file_name_create(self, url): # 파일의 이름을 만들어주는 함수입니다. hash를 통해서 url을 변수로 해서 받은 것을 파일 경로를 지정해줍니다.\n hash_text = hashlib.md5(url.encode(\"UTF-8\")) # url을 인코딩한 후에 md5 암호화 해시의 객체를 생성합니다.\n hash_title = hash_text.hexdigest() # 생성한 객체를 16진수로 반환합니다.\n savePath = os.path.join(self.DOWNLOAD_FOLDER_PATH + hash_title + \".\" + self.LOG_FILE_TYPE)\n # 파일 경로+url명+file type으로 파일의 저장위치를 설정하기 위해서 os.path.join함수로 파일 이름을 만듭니다.\n time_stamp = self.file_name_check(savePath)\n # hash한 값의 이름을 가진 파일이 있는 지 확인합니다.\n # file_name_check는 return 값으로 현재 시간의 마지막 자리 네자리를 반환합니다.\n # 하지만 그 조건은 폴더에 같은 hash값을 가질 때만 이고 그 외에 return값은 \"\"입니다.\n if (time_stamp != ''): # hash 값이 같은 것이 있어 time_stamp가 찍히면 새로 경로를 설정해줍니다.\n savePath = os.path.join(self.DOWNLOAD_FOLDER_PATH + hash_title + time_stamp + \".\" + self.LOG_FILE_TYPE)\n return savePath\n # 해당 경로에 다운받으려고 하는 파일을 받았는 지 여부에 관해서 비교합니다.\n # 해당 함수에서는 memory에 올라온 url 값을 비교하여 if / else를 통해서\n # 같은 url이 존재한다면 다운로드하지 않고 url이 존재하지 않는다면 다운로드를 합니다.\n \n def replace_safe_string(self, error_text): # 파일 다운에 실패시 url값을 텍스트파일에 저장\n error_text = re.sub('[ㄱ-ㅣ 가-힣<>,]+', '', str(error_text))\n # 파일명에 들어가지 말아야할 특수문자들을 치환, re.sub([들어가지 말아야 할 문자],'바꿀 문자',검사 문자) 입니다.\n # [ㄱ-ㅣ 가-힣<>,]+는 ㄱ에서 ㅣ까지 가에서 힣까지 모두 콤마와 <>,까지 모두 들어갈 수 없습니다.\n return error_text\n # error_text를 return합니다.\n \n def file_name_check(self, savePath): # 폴더 내에 똑같은 파일이 존재한다면 타임 스탬프를 붙여서 이름을 반환합니다.\n if os.path.isfile(savePath):\n time_stamp = str(time.time())[-6:-1]\n # 만약에 hash한 값의 savePath가 같다면 current타임의 시간을 뒤에다 붙여서 파일이름을 작성한다.\n return time_stamp\n return \"\"\n \n def file_download(self, file_path, url): # 파일을 다운받기위한 함수입니다.\n \n if self.check_url(url): # 같은 경로의 파일이 있는지 확인. check_url은 True와 False를 반환합니다.\n print(\"\\n똑같은 파일 발견\")\n else: # 만약 경로에 파일이 없다면 다운로드.\n self.DOWNLOAD_FILE_COUNT=self.DOWNLOAD_FILE_COUNT+1#다운로드 받은 갯수를 올립니다.\n success_log_text, fail_log_text = self.recode_current_time(self.TARGET_FILE_NAME,url, file_path) # 현재 시간을 print를 사용하고, success_log_text,fail_log_text를 반환받습니다. \n url_info = \"\" # url의 정보를 출력해주기 위해서 사용합니다.\n print(\"다운로드 url:\", str(url.encode(encoding='utf_8')))\n try:\n try:\n url_info = urllib.request.urlopen(url, timeout=10) # html의 정보를 얻기위한 연결해, url의 객체를 생성한다. \n except ValueError as err:\n url = urllib.request.quote(url, \":/\")\n # urllib.request.quote는 url을 encode하기 위해서 사용합니다.\n # urllib.request.quote(url명, \"encode하지 않을 단어\")로 구성되어집니다.\n url_info = urllib.request.urlopen(url, timeout=10) \n \n # urllib.reqeust.urlopen(URL,[,data][,time out])으로 데이터를 보낼 수도 있으며 timeout설정을 할 수 있다.\n fname, header = urllib.request.urlretrieve(url, file_path,reporthook=self.download_report) # 다운로드를 받고 파일의 path와 header를 반환합니다. \n download_size = url_info.headers.get('content-length') # 연결한 url의 객체에서 header.get(키), 키의 내용을 출력한다. #해당 라인도 value error 발생\n print(\"\\n다운로드 size:\", download_size) # 다운받을 파일 길이 확인 \n print(\"다운로드 path:\", fname,\"\\n\") # 파일이름 \n self.success_log_memory.append(success_log_text) # 오늘의 날짜를 가진 log_file에 입력하기 위한 메모리 입력\n self.success_url_memory.append(url) # 모든 url을 갖도록 메모리를 모든 url의 메모리에 입력\n self.total_log_memory.append(success_log_text) # 오늘의 날짜를 가진 total_log_file에 입력하기 위한 메모리 입력\n \n except (Exception,ConnectionResetError,socket.timeout, urllib.error.HTTPError, urllib.error.URLError, ssl.CertificateError, ConnectionAbortedError,ValueError) as err:\n #httplib.IncompleteRead는 http.client.IncompleteRead를 고치기 위해서 사용합니다.\n #해당 오류는 IncompleteRead(0 Bytes read)라고 뜨며 urllib.request.urlretrieve(url, file_path)에서 오류가 발생합니다.\n #urllib.request.urlopen(url, timeout=10)에서 오류가 발생합니다.\n #ConnectionResetError는 [WinError 10054] 현재 연결은 원격 호스트에 의해서 강제로 끊겼습니다. 라는 오류를 냅니다.\n # ConnectionAbortedError는 fname,header = urllib.request.urlretrieve(url,file_path)에서 오류 발생합니다.\n # ConnectionAbortedError는 Winerror 10053 현재 연결이 호스트 시스템의 소프트웨어에 의해 종료시에 오류 발생.\n # ssl.CertificateError는 hostname 'abc.go.kr' doesn't match '*.argc.go.kr'라는 오류를 입니다.\n # ssl.CertificateError는 a=urllib.request.urlopen(url,timeout=10)에서 발생합니다.\n # urllib.error.URLError는 과 같은 오류를 발생시킵니다.\n # urllib.error.URLError는 urllib.request.urlopen()에서 발생합니다.\n # socket.timeout는 urllib.request.urlopen()에서 설정한 timeout보다 시간이 초과되면 발생하는 오류입니다.\n # socket.timeout는 a=urllib.request.urlopen(url,timeout=10)에서 발생합니다.\n # urllib.error.HTTPError는 http error가 일어난다면 오류가 발생한다. ex) HTTP Error 404: Not Found\n # urllib.error.HTTPError는 a=urllib.request.urlopen(url,timeout=10)에서 발생합니다.\n # 해당 오류는 a=urllib.request.urlretrieve()에서도 발생합니다.\n self.total_log_memory.append(fail_log_text + self.replace_safe_string(err) + \"\\n\")\n print('다운로드 error:', str(err).encode(encoding='utf_8'),\"\\n\")\n \n def download_report(self, blocknum, blocksize, totalsize):#progress bar를 표현하는 함수입니다.\n readsofar = blocknum * blocksize #블락 사이즈와 블락 갯수를 곱하여 갖고 있는 파일 만큼을 표시합니다.\n surplus = totalsize - readsofar #잔여량을 뽑습니다. readsofar는 항상 blocksize만큼 일정하게 더하기 때문에 totalsize보다 커집니다. 그렇기 때문에 잔여량을 저장해놓고 음수가 될 경우에 기존 totalsize에 음수량을 더해 정확한 수치를 계산합니다.\n if totalsize > 0:\n if surplus < 0:\n readsofar = readsofar + surplus\n percent = readsofar * 1e2 / totalsize\n s = \"\\r다운로드 prog: %5.1f%% %*d / %d\" % (\n percent, len(str(totalsize)), readsofar, totalsize)\n sys.stdout.write(s)\n if readsofar >= totalsize: \n sys.stdout.write(\"\")\n else: \n sys.stdout.write(\"\\r다운로드 prog: read %d\" % (readsofar))\n \n def check_last_page(self,prev_page_check_memory,current_page_check_memory):\n #copy_prev_page_to_current_page에서 얻어온 current_memory를 갖고 last_page_get_url에서 얻은 prev_page_check_memory를 비교한다.\n #이것을 비교한다는 것은 전의 url을 저장시켜놓은 current_memory와 방금의 url을 저장시켜놓은 prev_memory를 비교하는 것이다.\n if(prev_page_check_memory==current_page_check_memory):\n self.last_page_bool=True\n if(crawler.DOWNLOAD_FILE_COUNT>=crawler.DOWNLOAD_FILE_NUMBER):#\n print(\"크롤러 종료.\")\n exit(1)\n \n def copy_prev_page_to_current_page(self,current_page_check_memory):#prev_page_memory를 current_page_memory로 저장시키는 함수, prev_page_memory또한 지운다.\n prev_page_check_memory=current_page_check_memory #prev_page_memory를 current_page_memory에 저장시킨다.\n current_page_check_memory=[] #prev_page_memory를 지운다.\n return prev_page_check_memory, current_page_check_memory #두 개의 메모리를 리턴한다.\n\n def last_page_get_url(self,url,current_page_check_memory):# 마지막 받은 페이지를 입력받기위한 메모리 입력\n current_page_check_memory.append(url) #해당 함수는 for문 속에서 url을 하나씩 받아와서 메모리에 저장만 한다.\n \n def check_URL_params(self,query,COUNT,offset,freshness):\n #Bing함수에서 사용하는 url의 parameter들을 체크하는 것입니다.\n #가장 큰 목적은 freshness=파일의 시간순 정렬을 위해서 사용합니다.\n if(freshness.upper()=='ALL'):\n return {'q': query,'count':COUNT,'offset':offset}\n else:\n return {'q': query,'count':COUNT,'offset':offset,'freshness':freshness}\n \n def not_use_URL_to_use_URL(self,not_use_url):#해당 함수는 Bing에서만 사용되어지는 함수입니다.\n #해당 함수의 목적은 Bing에서 추출되어지는 url의 값이 실시간 값이 담겨져 있기 때문에 그 값을 제외시킨 URL을 만들기 위함입니다.\n not_use_url=re.sub(\"IG.+&CID=\\w+\",\"\",not_use_url)#실시간 값은 IG이하 &CID이하의 문자들입니다.\n use_url=re.sub(\"&p=.+\",\"\",not_use_url)\n return use_url\n \n def get_URL_List_Daum(self,search_page_number): # 다운 받아야할 사이트를 뽑아내기 위한 함수이다.\n URL_List = [] # 배열을 초기화 시킨다.\n if(self.LOG_FILE_TYPE.upper() in self.extend_filetype_list):\n target_url = \"http://search.daum.net/search?nil_suggest=btn&w=web&lpp=10&q=\" + self.TARGET_FILE_NAME+\" \"+self.LOG_FILE_TYPE + \"&file_type=\" + self.TARGET_FILE_TYPE + \"&p=\" + str(search_page_number) + \"&DA=STC\"\n else:\n target_url = \"http://search.daum.net/search?nil_suggest=btn&w=web&lpp=10&q=\" + self.TARGET_FILE_NAME + \"&file_type=\" + self.TARGET_FILE_TYPE + \"&p=\" + str(search_page_number) + \"&DA=STC\"\n print(target_url)\n try:\n html_data = requests.get(target_url)\n except socket.gaierror as e:\n print(e)#host의 name이 ' '일 경우에 발생합니다.\n # 내가 찾고자하는 target_url을 get한다.\n # data.text:HTML 문서 내용\n # data.headers:HTML 헤더\n soup_text = html_data.text\n # 내가 요청한 사이트의 HTML 내용을 soup_text로 넣는다.\n soup = BeautifulSoup(soup_text, \"html.parser\", from_encoding='utf-8')\n # urllilb.request.urlopen을 통해서 url을 오픈후 해당 html을 읽어와\n # data에 저장한 것을 Soup에 저장시킨다. 그리고 저장 내용 중에\n # =html_data의 HTML문서내용 중에서 a태그를 가진 클래스명이 f_url인 것을 뽑아낸다.\n temp_result_text = soup.find_all('div', {'id':'webdocColl'})\n #프리미엄 링크를 제외한 url을 따오기 위해서 10개의 검색결과만 나오는 부분을 1차적으로 가려냄\n result_text= temp_result_text[0].find_all('a',class_='f_url',href=True)\n # 위의 검색 결과중에 f_url에 관련된 링크만 1차적으로 걸러냄.\n # 1차적으로 걸러낸 것들은 많은 태그가 있는데 href태그만 다시 찾음.\n \n for address in result_text:\n # href태그만 찾아낸 것들 모두를 for문을 통해 각 객체의 원소들을 address에 저장해서 돌린다.\n URL_List.append(address['href'])\n # del self.get_address[0:3]#필요없는 url이 저장된 것을 삭제합니다. [0:3]은 광고 url\n if (len(result_text)!=10):#마지막 페이지의 조건은 url의 lpp의 수와 일치해야 합니다. 마지막 장 체크를 위해서 lpp의 수와 다를 때 마지막 페이지 bool을 True로 변환시킵니다.\n self.last_page_bool=True\n self.make_search_word_history_log(search_page_number,self.search_word_history_memory,\"\")\n return URL_List # url 리스트를 리턴함.\n \n def get_URL_List_Bing(self,search_page_number): \n URL_List = [] # 배열을 초기화 시킨다.\n target_url = \"https://api.cognitive.microsoft.com/bing/v5.0/search\"\n if(self.LOG_FILE_TYPE in self.extend_filetype_list):\n query=self.TARGET_FILE_NAME+\" \"+self.LOG_FILE_TYPE+\" FileType:\"+self.TARGET_FILE_TYPE\n else:\n query=self.TARGET_FILE_NAME+\" FileType:\"+self.TARGET_FILE_TYPE\n freshness=self.freshness # 내가 검색하고자 하는 데이터의 시간순 정렬을 위해서 사용. BING에서만 사용가능한 변수. #Day,Week,Month,All\n COUNT=50#TOP=50, 화면당 출력 갯수, 최대 50까지 출력 가능.\n offset=self.offset\n payload = self.check_URL_params(query, COUNT, offset, freshness)\n #반환값 sample:{'q': query,'count':COUNT,'offset':offset, 'freshness':freshness}\n headers = {'Ocp-Apim-Subscription-Key': '2026d73832ba4415ac2f838eecc30175'}\n #키 확인 : https://www.microsoft.com/cognitive-services/en-US/subscriptions\n r = requests.get(target_url,params=payload,headers=headers)#request를 보냄, url에 변수들과 헤더에 key값을 적어서 보냄\n result_text = json.loads(r.text)#결과 값은 json으로 넘어온다.\n try:\n for url_number in range(COUNT):\n download_url=self.not_use_URL_to_use_URL(result_text['webPages']['value'][url_number]['url'])\n URL_List.append(download_url)\n #json으로 넘어온 값들 중에서 webPage의 value에 넘어온 number 중에서 url이 다운로드 받을 url을 갖고 있는곳이다.\n print(url_number)\n self.offset=str(int(self.offset)+50)\n except:\n if(self.bool_Bing_finish==True):\n sys.exit()\n self.bool_Bing_finish=True\n pass##만약에 except가 난다는 것은 더이상 웹사이트에서 출력할 것이 없다는 것을 의미하기 때문에 해당 웹크롤러만 긁고 종료해야 한다.\n self.make_search_word_history_log(search_page_number,self.search_word_history_memory,freshness) #검색어 결과를 저장하는 로그입니다.\n return URL_List # url 리스트를 리턴함.\n \n def get_URL_List_GOOGLE(self,search_page_number):\n URL_List=[]\n day_=self.make_using_date()\n if(search_page_number==1):#start의 값은 0이면 start 변수를 없애줘야 한다. 그렇기 때문에 if문으로 처리한다.\n target_url = 'https://www.googleapis.com/customsearch/v1?key=AIzaSyCo1p1FP0qA2YRY0uJNV5RC4oF5NXfc0ec&cx=002673536718529979084:tliqfa-m8im&q='+self.TARGET_FILE_NAME+' filetype:'+self.LOG_FILE_TYPE+'&dateRestrict='+day_#'&start='+str((int(search_page_number)-1)*10)\n else:\n target_url = 'https://www.googleapis.com/customsearch/v1?key=AIzaSyCo1p1FP0qA2YRY0uJNV5RC4oF5NXfc0ec&cx=002673536718529979084:tliqfa-m8im&q='+self.TARGET_FILE_NAME+' filetype:'+self.LOG_FILE_TYPE+'&dateRestrict='+day_+'&start='+str((int(search_page_number)-1)*10)\n print(target_url)\n #filetype은 query문을 통해서 제어를 해야합니다.\n #dateRestrict는 d,w,m,y의 변수를 갖고 앞에 숫자n을 붙인다. 예를 들면 dn이면 몇 일 전까지의 결과만 출력, wn이라면 주 전까지만의 결과만 출력을 하도록 한다.\n #cx는 검색엔진의 ID를 뜻합니다. https://cse.google.com/cse에서 결과값을 얻어올 수 있습니다.\n #검색엔진 수정-> 만들어진 엔진 -> 설정 -> 세부정보 -> 검색 엔진 ID\n #key는 사용자인증정보입니다. https://console.developers.google.com/apis/credentials에서 API키값입니다.\n #결과값은 json으로 오기때문에 json으로 처리를 해주어야합니다.\n #the API provides 100 search queries per day for free. Additional requests cost $5 per 1000 queries, up to 10k queries per day.\n #하루에 100쿼리, num=화면당 출력할 수 있는 갯수는 10개까지 입니다.\n #따라서 하루에 처리할 수 있는 최대의 처리량은 1000개가 될 수 있습니다.\n #params 정보:https://developers.google.com/custom-search/json-api/v1/reference/cse/list\n html_data = requests.get(target_url)\n soup_text = html_data.text\n if(html_data.status_code==403):#403에러는 인증에러 or 하루 쿼리양을 다 썼을 때 일어난다.\n json_soup=json.loads(soup_text)#결과 값은 json으로 넘어온다.\n print(json_soup['error']['message'])\n exit(1)\n # 내가 요청한 사이트의 HTML 내용을 soup_text로 넣는다.\n soup = BeautifulSoup(soup_text, \"html.parser\")\n soup=str(soup)\n find_result=re.findall('\"link\":.+\"',soup)#원래는 josn으로 파싱을 해야하지만 넘어오는 html에 오류가 종종 있어서 정규식표현을 통해서 link를 파싱합니다.\n for index_ in range(len(find_result)):#위의 결과에서 얻은 배열값을 불러냅니다.\n split_find_results=find_result[index_].split(': ')#\"link\" : \"http://example.com/\"으로 오기 떄문에 \": \"로 구간을 나눠줍니다.\n split_find_result=split_find_results[1].replace('\"','')\n URL_List.append(split_find_result)#split_find_result에는 리스트로 1번인덱스에 링크가 옵니다.\n #002673536718529979084:tliqfa-m8im ::::cx1\n #AIzaSyCo1p1FP0qA2YRY0uJNV5RC4oF5NXfc0ec :::key1\n #010368119512456933412:-ffgk5pq_ym ::::cx2\n #AIzaSyDDzNkCDfq8HTVZJyzyXzB0QPZcKgReCj0 :::key2\n #100쿼리라면 제한을 받을 일이 없음.\n self.make_search_word_history_log(search_page_number,self.search_word_history_memory,self.freshness) #검색어 결과를 저장하는 로그입니다.\n if (len(URL_List)!=10):#마지막 페이지의 조건은 url의 lpp의 수와 일치해야 합��다. 마지막 장 체크를 위해서 lpp의 수와 다를 때 마지막 페이지 bool을 True로 변환시킵니다.\n self.last_page_bool=True\n return URL_List\n \n def make_using_date(self):\n if(self.freshness.upper()=='DAY'):\n return 'd1'\n elif(self.freshness.upper()=='WEEK'):\n return 'w1'\n elif(self.freshness.upper()=='MONTH'):\n return 'm1'\n elif(self.freshness.upper()=='ALL'):\n return 'y100' \n def extract_random_text(self):\n url='http://m.naver.com/'#naver url을 에서 \n naver_html=self.make_url_html(url)#naver url의 html을 생성합니다.\n data_news_html=naver_html.findAll('a',{'data-area':'NEWS'})#a 태그 이하의 data-area가 NEWS인 구역에서\n random_text_list=[] #random_text_list를 생성합니다.\n for a in range(len(data_news_html)): #a 태그 이하의 data-area가 잡히는 갯수에 따라서\n random_text_list.append(data_news_html[a].get_text()) #random_text_list에 data-area의 news구역의 텍스트를 append시킵니다.\n random.shuffle(random_text_list) #random_text_list를 뒤섞은 후에\n unsafe_word_list=self.slice_list_text(random_text_list) #random_text_list에서 단어만 갖고 있는 list를 뽑아냅니다.\n safe_word_list=self.change_unsafe_word_to_safe_word(unsafe_word_list)\n if(safe_word_list[0]!=''):\n return safe_word_list[0] #word_list에 15번째에 있는 원소 아무거나를 리턴합니다. 해당 변수는 검색어로 사용됩니다.\n else:\n return(safe_word_list[15])\n \n def slice_list_text(self,list_):\n #extract_random_text에서 list들에는 문장들만 들어있습니다.\n #그렇기 때문에 그 문장들을 스페이스바 단위로 하여 split을 하고 나온 단어들을 word_list라는 곳에 집어넣고\n #중복된 값이 없는지 검사해서 리턴해줍니다.\n word_list=[]\n for text_list_in_list in list_:\n for text_in_text_list_ln_list in text_list_in_list.split(' '):\n text_in_text_list_ln_list=re.sub(\"[\\s,\\'\\\"\\n]|\",\"\",text_in_text_list_ln_list)\n word_list.append(text_in_text_list_ln_list)\n word_list=self.remove_overlap(word_list)\n return word_list\n \n def remove_overlap(self,memory):#중복제거, set으로 바꾸고 list로 전환\n list_memory=list(OrderedDict.fromkeys(memory))\n return list_memory\n \n def change_unsafe_word_to_safe_word(self,unsafe_word_list):\n #사용하지 말아야할 단어들을 안전한 단어로 바꿔준다.\n safe_word_list=[]\n for unsafe_word in unsafe_word_list:\n safe_word_list.append(re.sub(\"[^0-9,.ㄱ-ㅣ가-힣a-zA-Z]\",\"\",unsafe_word))\n for index_ in range(len(safe_word_list)):\n try:\n if(safe_word_list[index_]==''):\n del safe_word_list[index_]\n except:\n pass\n return safe_word_list\n \n def make_url_html(self, search_url):#url이 입력되면 해당 url의 html을 생성하고 리턴합니다.\n try:\n html_data = requests.get(search_url, self.HEADER)\n if(html_data.status_code==403):\n print(\"403 Error!!!\\n\")\n print(\"다시 실행해주세요.\")\n exit(1)\n except socket.gaierror as e:\n print(e)\n soup_text = html_data.text\n html_soup = BeautifulSoup(soup_text, \"html.parser\")\n return html_soup\n \n def find_search_history_log(self,log_folder_path):\n #log폴더 내에 search_log를 가져오는 함수입니다.\n log_folder_path_list=os.listdir(log_folder_path)#로그 폴더에 있는 파일들을 리스트로 받는다.\n temp_use_index_list=[]\n use_index_list=[]\n use_log_list=[]\n for index_ in range(len(log_folder_path_list)):#로그 폴더에 있는 파일들의 갯수만큼 index로 잡아준다.\n if('search' in log_folder_path_list[index_]):#파일 명에 search가 들어간다면 list로 넣는다.\n temp_use_index_list.append(index_)\n for index_ in temp_use_index_list:\n log_born_day=self.make_date_info(str(log_folder_path_list[index_][0:10]))#로그가 생성된 날을 date자료형으로 가져온다.\n today_Ymd=self.make_date_info(self.today_Ymd) #오늘의 날짜를 date자료형으로 가져온다.\n before_week_day=today_Ymd-datetime.timedelta(7) #1주일 전의 날짜를 date자료형으로 가져온다.\n if(log_born_day>=before_week_day): #로그가 만들어진 날이 1주일 안이라면 리스트에 넣는다.\n use_index_list.append(index_)\n for index_ in use_index_list:\n use_log_list.append(log_folder_path_list[index_])\n return use_log_list\n \n def check_already_search_word(self,log_folder_path_list,search_word,search_site,search_type,already_search_word_bool):\n #이미 사용한 검색어인지 검사하는 함수입니다. \n for index_ in range(len(log_folder_path_list)):\n with open(crawler.LOG_FOLDER_PATH+log_folder_path_list[index_],\"r\",encoding='utf-8') as log_file:\n log_file_readlines= log_file.readlines()\n for log_text_line in log_file_readlines:\n split_log_text=log_text_line.split('\\t')\n if(search_site in split_log_text[1] and search_word in split_log_text[2] and search_type in split_log_text[3]):\n already_search_word_bool=True\n return already_search_word_bool\n \n def make_date_info(self,string_day):\n try:\n split_date=string_day.split('-')\n target_day=datetime.date(int(split_date[0]),int(split_date[1]),int(split_date[2]))\n return target_day\n except:\n return \n\n \ncrawler = crawler_() # crawler객체를 생성해줍니다. \ncurrent_page_check_memory=[]#현재 돌고 있는 for문에서 다운 받은 url을 리스트로 저장합니다.\nprev_page_check_memory=[] #이전에 돌았던 for문에서 다운 받은 url을 리스트로 저장합니다.\nfor infinit in range(9999):\n if(crawler.AUTO_CHECK==True):\n already_search_word_bool=False\n safe_word=crawler.extract_random_text()\n crawler.TARGET_FILE_NAME=safe_word\n log_folder_path_list=crawler.find_search_history_log(crawler.LOG_FOLDER_PATH)\n already_search_word_bool=crawler.check_already_search_word(log_folder_path_list, safe_word, crawler.crawler_choice, crawler.LOG_FILE_TYPE,already_search_word_bool)\n for search_page_number in range(1,1000):#페이지수가 넘어가게함.\n try:\n if(already_search_word_bool==True):\n break\n except NameError as e:pass\n #크롤러의 시간,크롤러의 사이트,검색어,페이지를 저장하는 변수이다.\n print(\"next page\")\n if(crawler.crawler_choice.upper()=='DAUM'):\n target_urls = crawler.get_URL_List_Daum(search_page_number)#해당 함수를 거쳐서 다운로드 할 url 리스트를 받아옵니다.\n elif(crawler.crawler_choice.upper()=='BING'):\n target_urls = crawler.get_URL_List_Bing(search_page_number)#해당 함수를 거쳐서 다운로드 할 url 리스트를 받아옵니다. \n elif(crawler.crawler_choice.upper()=='GOOGLE'):\n target_urls = crawler.get_URL_List_GOOGLE(search_page_number)#해당 함수를 거쳐서 다운로드 할 url 리스트를 받아옵니다. \n prev_page_check_memory, current_page_check_memory = crawler.copy_prev_page_to_current_page(current_page_check_memory)\n \n #current_page_check_memory를 변수로 집어넣습니다. prev_page에 current_page를 넣고 current_page를 비워줍니다.\n #그리고 prev_page와 current_page를 반환받습니다. \n for slice_url in target_urls: # list로 얻은 url의 값을 하나씩 돌린다.\n savePath = crawler.file_name_create(slice_url) # 파일의 이름을 만들어주는 함수입니다. hash를 통해서 url을 변수로 해서 받은 것을 파일 경로를 지정해줍니다. \n crawler.file_download(savePath, slice_url) # 파일을 다운받기위한 함수입니다.\n crawler.last_page_get_url(slice_url,current_page_check_memory) #마지막 페이지를 구하기 위해서 페이지의 url을 한 장 단위로 가져오는 함수입니다. \n crawler.atomic_create_file(crawler.SUCCESS_LOG_FILE_PATH, crawler.success_log_temp_file_path, crawler.success_log_will_delete_file_path, crawler.success_log_memory)\n crawler.atomic_create_file(crawler.TOTAL_LOG_FILE_PATH, crawler.total_log_temp_file_path, crawler.total_log_will_delete_file_path, crawler.total_log_memory)\n crawler.atomic_create_file(crawler.SEARCH_WORD_HISTORY_FILE_PATH,crawler.search_word_history_temp_file_path,crawler.search_word_history_delete_file_path,crawler.search_word_history_memory)\n # 얻은 url에 관련된 로그를 작성하는 것을 안정성있게 하기위해서 atomic create file을 하는 함수입니다.\n # 해당 함수는 로그파일의 이름과 로그파일의 경로를 입력하게 되면 로그의 temp파일과 삭제할 로그파일을 만듭니다.\n # temp파일에 url을 작성합니다.\n # 기존의 로그파일을 삭제할 로그파일로 변경하면 temp파일을 기존의 로그파일로 변경하고 기존의 로그파일은 삭제합니다.\n crawler.check_last_page(prev_page_check_memory,current_page_check_memory) # 마지막 페이지 url을 가져오면 list를 ���교합니다.\n if(crawler.last_page_bool==True): #마지막페이지의 조건은 1. url의 갯수가 lpp의 갯수와 다를 경우, 2. 지난 번에 받은 파일과 이번에 받은 파일이 같을 경우입니다.\n if(crawler.AUTO_CHECK==False):#auto를 입력안했을 경우에 정상 종료, auto를 입력시에는 다음 검색어 검사\n print(\"크롤러 종료.\")\n exit(1)\n crawler.last_page_bool=False #auto가 아닐경우엔 다시 False로 돌리고 break을 시켜서 해당 for문을 빠져나옵니다.\n break\n \n print(\"크롤링 종료\")","sub_path":"Crawaler/문서크롤러/crawler_V_1.py","file_name":"crawler_V_1.py","file_ext":"py","file_size_in_byte":64919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327054759","text":"import os\nimport sys\nimport numpy as np\nfrom scipy.optimize import curve_fit\nimport argparse\nsys.path.append('lib/')\nimport analysis_utils as au\nI=np.complex(0,-1)\nmatr=np.matrix\ng5g1=matr(au.g5_uk)*matr(au.g1_uk)\ng5g2=matr(au.g5_uk)*matr(au.g2_uk)\ng5g3=matr(au.g5_uk)*matr(au.g3_uk)\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n parser = argparse.ArgumentParser(prog='Compare Methods for Loops', description='This code performs ')\n parser.add_argument('--listConfs', help='Takes a list of configurations', default = 'list_confs.txt')\n parser.add_argument('--loopPath', help='Path where to find loops',default = 'loops/')\n parser.add_argument('--T', help='Spatial Lattice Extent', default = 48)\n parser.add_argument('--kappa', help='The value of the kappa parameter', default = 0.1373)\n parser.add_argument('--mu', help = 'The mu value', default = 0.003)\n parser.add_argument('--maxMomSq', help = 'Maximum momentum square', default=1)\n parser.add_argument('--binsize', help = 'Binsize for the jackknife', default=1)\n parser.add_argument('--NprocsT',help = 'Number of files in temporal direction',default=2)\n parser.add_argument('--Nsrcs',help = 'Number of stochastic noise vectors',default=10)\n parser.add_argument('--Nhad', help = 'Number of Hadamard vectors', default = 32)\n\n args=vars(parser.parse_args())\n p={} # parameters\n p['listConfs']=args['listConfs']\n p['loopPath']=args['loopPath']\n p['T']=int(args['T'])\n p['kappa']=float(args['kappa'])\n p['mu']=float(args['mu'])\n p['maxMomSq']=int(args['maxMomSq'])\n p['NprocsT']=int(args['NprocsT'])\n p['Nsrcs']=int(args['Nsrcs'])\n p['Nhad']=int(args['Nhad'])\n p['binsize']=int(args['binsize'])\n listMom=au.momComb(p['maxMomSq'])\n\n lenConf=au.file_len(p['listConfs'])\n with open(p['listConfs'],'r') as fp:\n listConfs = list(map(lambda x: x.strip(),fp.readlines()))\n if len(set(listConfs)) != lenConf:\n sys.stderr.write('Error there are duplicate confs in the list')\n sys.exit(-1)\n\n if lenConf % p['binsize'] != 0:\n sys.stderr.write('Error number of configurations is not divisible with the binsize: Manually discard confs to make it')\n sys.exit(-1) \n\n mul_fac_gen=-4*p['kappa']\n mul_fac_std=-I*8.*p['mu']*p['kappa']**2\n \n ############################################### Generalized one-end trick ####################################################################\n Lgen_exact=[]\n Lgen_stoch=[]\n if p['NprocsT'] == 1:\n for iconf in listConfs:\n Lgen_exact.append(np.loadtxt(p['loopPath']+iconf+'loopProbingNc32_exact_NeV50_dOp.loop.'+'.1_0', usecols=(5,6))\n .reshape(len(listMom),p['NprocsT'],16,2))\n for isc in range(0,p['Nsrcs']):\n Lgen_stoch.append(np.loadtxt(p['loopPath']+iconf+'/loopProbingNc32_stoch_NeV50_dOp.loop.'+str('%04d'%(isc+1))+'.1_0', usecols=(5,6))\n .reshape(len(listMom),p['NprocsT'],16,2))\n else:\n for iconf in listConfs:\n tmp_e=[]\n for ip in range(p['NprocsT']):\n tmp_e.append(\n np.loadtxt(\n p['loopPath']+iconf+'/loopProbingNc32_exact_NeV50_dOp.loop.'+str(p['NprocsT'])+'_'+str(ip),usecols=(5,6))\n .reshape(len(listMom),int(p['T']/p['NprocsT']),16,2))\n for ip in range(1,p['NprocsT']):\n tmp_e[0]=np.concatenate((tmp_e[0],tmp_e[ip]),axis=1) # concatenate the temporal direction\n Lgen_exact.append(np.array(tmp_e[0]))\n for isc in range(0,p['Nsrcs']):\n tmp_s=[]\n for ip in range(p['NprocsT']):\n tmp_s.append(\n np.loadtxt(\n p['loopPath']+iconf+'/loopProbingNc32_stoch_NeV50_dOp.loop.'+str('%04d'%(isc+1))+'.'+str(p['NprocsT'])+'_'+str(ip),usecols=(5,6))\n .reshape(len(listMom),int(p['T']/p['NprocsT']),16,2))\n for ip in range(1,p['NprocsT']):\n tmp_s[0]=np.concatenate((tmp_s[0],tmp_s[ip]),axis=1) # concatenate the temporal direction\n tmp_s[0] /= (isc+1)*p['Nhad']\n Lgen_stoch.append(np.array(tmp_s[0]))\n Lgen_stoch=np.array(Lgen_stoch).reshape(lenConf,p['Nsrcs'],len(listMom),p['T'],16,2)\n Lgen_stoch=Lgen_stoch[:,:,:,:,:,0] + I*Lgen_stoch[:,:,:,:,:,1]\n Lgen_exact=np.array(Lgen_exact).reshape(lenConf,len(listMom),p['T'],16,2)\n Lgen_exact=Lgen_exact[:,:,:,:,0] + I*Lgen_exact[:,:,:,:,1]\n\n Lgen=np.array([mul_fac_gen*(Lgen_exact + Lgen_stoch[:,x,:,:,:]) for x in range(p['Nsrcs'])])\n Lgen=Lgen.transpose(1,0,2,3,4)\n Lgen=np.average(Lgen,axis=3) #average the loops over the time-slices\n loopAxial=[]\n for i in range(lenConf):\n for isc in range(p['Nsrcs']):\n loopAxial.append(np.array(list(map(lambda x: (matr(au.g3_uk)*matr((x.reshape(4,4)).T)).trace(), Lgen[i,isc]))).reshape(len(listMom)))\n loopAxial=np.abs(np.array(loopAxial).reshape(lenConf,p['Nsrcs'],len(listMom))) # take the absolute value of the loop\n loopAxial_sq=loopAxial**2\n loopAxial_binning=np.array(au.binning(loopAxial,p['binsize']))\n loopAxial_sq_binning=np.array(au.binning(loopAxial_sq,p['binsize']))\n\n varAxial_Binning=loopAxial_sq_binning - loopAxial_binning**2\n Nbins=len(varAxial_Binning)\n varAxial_bmean=np.average(varAxial_Binning,axis=0)\n varAxial_error=np.sqrt(Nbins-1)*np.std(varAxial_Binning,axis=0)\n\n for isc in range(p['Nsrcs']):\n print('%d %+f %+f' % (isc+1,varAxial_bmean[isc,0],varAxial_error[isc,0]))\n #############################################################################################################################################\n \n############################\nif __name__ == \"__main__\":\n sys.exit(main())\n \n","sub_path":"assess_probing.py","file_name":"assess_probing.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206277577","text":"#!/usr/bin/env python3\n\"\"\"Base class for a generic LTE Empower Message\"\"\"\n#\n# Copyright (c) 2018 FBK-CREATENET\n# AUTHOR- Abin Ninan Thomas\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport ctypes as ct\nfrom v1.lte.trigger import DIRECTION_REPLY\nfrom v1.lte.trigger.uemeas import _EnbIdT\nfrom v1.lte.trigger.uemeas import _CellIdT\nfrom v1.lte.trigger.uemeas import _ModIdT\nfrom v1.lte.trigger.uemeas import _UeReport\n\nfrom v1.lte.trigger.uemeas.ltetuemeas import LTETUeMeas\n\n\nclass LTETUeMeasRep(LTETUeMeas):\n \"\"\"Class that deals with a trigger UE Report reply\"\"\"\n def __init__(self, **kwargs):\n super().__init__()\n self.proto = ct.CDLL(\"libemproto.so\")\n self._direction = DIRECTION_REPLY\n self._oper = 0\n self._sequence = 0\n\n #The optional keyword arguments are if the user\n #wants to parse on creating an instance\n if kwargs:\n if 'buf' in kwargs:\n buf = kwargs.get('buf')\n if 'buflen' in kwargs:\n buflen = kwargs.get('buflen')\n\n self.parse(buf, buflen)\n\n\n def format(self, buf, size, uerep):\n \"\"\"A generic trigger event UE Measure message format\"\"\"\n repformat = self.proto.epf_trigger_uemeas_rep\n repformat.restype = ct.c_int\n repformat.argtypes = [ct.c_char_p, ct.c_uint, _EnbIdT, _CellIdT,\n _ModIdT, ct.POINTER(_UeReport)]\n return repformat(buf, size, self._enbid, self._pci, self._modid,\n (uerep))\n\n def parse(self, buf, size):\n \"\"\"parse a trigger event UE Measure reply\"\"\"\n repparse = self.proto.epp_trigger_uemeas_rep\n repparse.restype = ct.c_int\n repparse.argtypes = [ct.c_char_p, ct.c_uint, ct.POINTER(_UeReport)]\n return repparse(buf, size, ct.pointer(_UeReport()))\n\n def failformat(self, buf, size):\n \"\"\"A trigger UE Measure format fail event message\"\"\"\n repfailformat = self.proto.epf_trigger_uemeas_rep_fail\n repfailformat.restype = ct.c_int\n repfailformat.argtypes = [ct.c_char_p, ct.c_uint, _EnbIdT, _CellIdT,\n _ModIdT]\n return repfailformat(buf, size, self._enbid, self._pci, self._modid)\n","sub_path":"bindings/python/v1/lte/trigger/uemeas/ltetuemeasrep.py","file_name":"ltetuemeasrep.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555139343","text":"from django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import serializers\n\nfrom .models import UserProfile\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = UserProfile\n exclude = ['id']\n read_only_fields = ['user']\n\n\nclass UserSerializer(serializers.ModelSerializer):\n last_login = serializers.DateTimeField(format=settings.DATE_FORMAT, required=False, read_only=True)\n date_joined = serializers.DateTimeField(format=settings.DATE_FORMAT, required=False, read_only=True)\n userprofile = serializers.SerializerMethodField()\n\n sidemenu = serializers.SerializerMethodField()\n\n class Meta:\n model = get_user_model()\n exclude = [\n 'password',\n 'id',\n ]\n\n def get_userprofile(self, object):\n return ProfileSerializer(object.userprofile_set).data\n\n def get_sidemenu(self, inst):\n sidemenu_group = [{\n 'group_icon': 'dashboard',\n 'group_name': 'dashboard',\n 'group_list': [\n {\n 'route': '/dashboard',\n 'name': 'User',\n 'icon': 'user'\n }\n ]\n }]\n\n if inst.userprofile_set.get().seacret_room:\n menu_item = {\n 'group_icon': 'map-marker',\n 'group_name': 'secret',\n 'group_list': [\n {\n 'route': '/secret/component',\n 'name': 'secret',\n 'icon': 'view-cards'\n },\n ]\n }\n\n sidemenu_group.append(menu_item)\n return sidemenu_group\n","sub_path":"backend/apps/profile/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"328984127","text":"#author Daniel Torres A.\n\n#Hacer un juego de adivinanzas\n#el número a adivinar es un aleatorio\n# entre 0 y 20\n# el código debe genear automáticamente el aleatorio\n# y preguntarle al usuario con un input el numero que quiere adivinar\n# el juego debe tener un máximo número de intentos.\n# si me paso del máximo sin adivinar, pierdo\n# si adivino, gano\n\n\n#Adivinanzas\nimport random\n\nlist = []\ndef fillList(numElements, list):\n\t'''LLenamos la lista con N(int) numero de elementos '''\n\tfor element in range(0,numElements):\n\t\tlist.append(element)\n\ndef getRandomIntNumber(min,max):\n\t'''Obtenemos un entero aleatorio entre min y max '''\n\trandomNumber = random.randint(min,max)\n\treturn randomNumber\n\ndef findElementInList(element,list):\n\tpos = list.index(element)\n\t#print(pos)\n\treturn pos\n\ndef guessNumber(randomNum,list,retries):\n\t'''Adivina un numero contenido en la lista con [retries] intentos'''\n\n\tprint(\"\\n\\n\\n\\n###################################################################################\")\n\tprint(\"Adivinanzas\\n\")\n\tprint(f\"Adivina un número entre [{min(list)} y {max(list)}]. Tienes {retries} intentos\")\n\n\tguessed = -9999\n\twhile(guessed != randomNum and retries != 0):\n\t\tprint(f\"\\nIntentos restantes: {retries}\")\n\t\ttry:\n\t\t\tguessed = int(input(\"Cual es tu número?: \"))\n\t\t\tpos = findElementInList(guessed,list)\n\t\texcept:\n\t\t\tprint(\"El dato que ingresaste no se encuentra en el rango\")\n\t\tretries = retries - 1\n\n\tprint(\"Felicitaciones\" if retries != 0 else \"Mejor suerte la proxima vez\")\n\n#configuracion\nnumberOfElements = 21 # entre 0 y 20 hay 21 elementos\nretries = 3\n#inicializacion\nfillList(numberOfElements,list)\nrandomNumber = getRandomIntNumber(min(list),max(list))\n\n#debug\nprint(list)\nprint(randomNumber)\n\n#Inicio App\n#guessNumber(randomNumber,list,retries)\n\n#__________________________________________________________________________________________________________________\n\n#hacer un juego de piedra papel o tijera contra\n#el computador. El computador escoge aleatoriamente piedra, papel o tijera\n# y debe pedir al usuario su input\n#el juego debe terminar cuando alguno de los dos gane 2 de 3 partidas\nprint(\"###################################################################################\")\nprint(\"Piedra, Papel o Tijera\\n\")\n\nopciones = [\"R\",\"T\",\"P\"] # lo agrego un ultimo elemento para no tener que hacer overlap del index\ndef gana(eleccion,random,opciones):\n\t''' Define si gana la eleccion sobre el computador : gana(1) pierde(-1) o empata(0) '''\n\t#numOptions = len(opciones)\n\tlength = len(opciones)\n\t#obtengo la posicion del elemento elegido y verifico si gana o empata\n\tposEleccion = eleccion\n\tposRandom = random\n\tif posEleccion == posRandom :\n\t\treturn 0 #empatan\n\telif (0 if posEleccion + 1 == length else (posEleccion + 1)) == posRandom:\n\t\treturn 1 #gana jugador,pirde computador\n\telse :\n\t\treturn -1 # pierde jugador, gana computador\n\n\ndef juego(points,opciones):\n\t'''points: cuantos puntos debe acumular un jugador para ganar'''\n\n\tacumJugador = 0\n\tacumComputador = 0\n\n\twhile acumComputador != points and acumJugador != points :\n\t\t#obtengo la opcione del computador\n\t\tcomputador = opciones[getRandomIntNumber(0,2)] \n\t\t#debug\n\t\tprint(\"El computador eligio \",computador)\n\t\t#enddebug\n\n\t\tcomputador = findElementInList(computador,opciones)\n\t\t#obtengo la opcione del jugador\n\t\tjugador = findElementInList(input(\"Elija Roca[R] | Papel [P] | Tijera [T]: \").upper(),opciones)\n\t\n\t\t#verifico si gana, empata o pierde\n\t\tresult = gana(jugador,computador,opciones)\n\n\t\tprint(\"El computador eligio \",opciones[computador])\n\t\tprint(\"El jugador eligio : \" ,opciones[jugador])\n\t\tprint(\"\\n\")\n\t\t\n\t\tif result > 0:\n\t\t\tacumJugador = acumJugador + 1\n\t\t\tprint(f\"Ganas. Computador lleva {acumComputador} | Jugador lleva {acumJugador} puntos\")\n\t\telif result < 0:\n\t\t\tacumComputador +=1\n\t\t\tprint(f\"Pierdes. Computador lleva {acumComputador} | Jugador lleva {acumJugador} puntos\")\n\t\telse:\n\t\t\tprint(f\"Empate. Computador lleva {acumComputador} | Jugador lleva {acumJugador} puntos\")\n\t\tprint(\"\\n\")\n\n\t#fin del juego\n\tprint(\"Perdiste\" if acumJugador -points else \" Ganaste\")\n\tprint(f\"Resultados: Computador= {acumComputador} | Jugador ={acumJugador} puntos\")\n\n\n#Inicio App\nprint(\"\\n\\n\\n\\nJuego Roca, Papel y Tijera: Roca le gana a tijera, tijera le gana a papel y papel le gana a toca\")\nprint(\"Gana 2 de 3 partidas contra el computador.\\n\")\njuego(3,opciones)\n\n\n\n\n#pruebas\n#opciones = [\"R\",\"T\",\"P\"]\n#print(gana(0,0,opciones))\n#print(gana(0,1,opciones))\n#print(gana(0,2,opciones))\n#print(gana(1,0,opciones))\n#print(gana(1,1,opciones))\n#print(gana(1,2,opciones))\n#print(gana(2,0,opciones))\n#print(gana(2,1,opciones))\n#print(gana(2,2,opciones))\n\n\n\t\t","sub_path":"Semana 3/Clase 2/ejercitación.py","file_name":"ejercitación.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"608410245","text":"# class Sample(self):\n# x = 5\n# pass\n\n\n# Create an object or instance\n# y = Sample()\n\n\n# Classes contain attributes and methods.\n# - Methods are operation that can be performed the object\n# - Attributes are characteristics of an object\n\n# Creating an attribute\nclass Dog():\n def __init__(self, breed, color, size):\n self.breed = breed\n self.color = color\n self.size = size\n\n\nmaxim = Dog('Rottweiler', 'Black', 'Big')\nprint(maxim.breed)\n\n\n# CLASS ATTRIBUTES\nclass Chicken:\n # This is a class attribute\n species = 'birds'\n\n def __init__(self, breed, kind):\n self.breed = breed\n self.kind = kind\n\n\nprint(Chicken.species)\n\n\n# METHODS\nclass Circle:\n pi = 3.142\n\n # instantiation\n def __init__(self, diameter, radius=1):\n self.diameter = diameter\n self.radius = radius\n\n def area(self):\n return (self.radius ** 2) * Circle.pi\n\n\nc = Circle(4)\n\n# INHERITANCE\n","sub_path":"oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296275639","text":"import argparse\nimport os\nimport pandas as pd\nfrom azureml.studio.core.io.data_frame_directory import load_data_frame_from_directory, save_data_frame_to_directory\n\nprint(\"Replace undefined values to relavant values and rename columns to meaningful names\")\n\nparser = argparse.ArgumentParser(\"normalize\")\nparser.add_argument(\"--filtered_data\", type=str, help=\"filtered taxi data\")\nparser.add_argument(\"--output_normalize\", type=str, help=\"replaced undefined values and renamed columns\")\n\nargs = parser.parse_args()\ncombined_converted_df = load_data_frame_from_directory(args.filtered_data).data\nprint(\"Argument (output normalized taxi data path): %s\" % args.output_normalize)\n\n# These functions replace undefined values and rename to use meaningful names.\nreplaced_stfor_vals_df = (combined_converted_df.replace({\"store_forward\": \"0\"}, {\"store_forward\": \"N\"})\n .fillna({\"store_forward\": \"N\"}))\n\nreplaced_distance_vals_df = (replaced_stfor_vals_df.replace({\"distance\": \".00\"}, {\"distance\": 0})\n .fillna({\"distance\": 0}))\n\nnormalized_df = replaced_distance_vals_df.astype({\"distance\": 'float64'})\n\ntemp = pd.DatetimeIndex(normalized_df[\"pickup_datetime\"])\nnormalized_df[\"pickup_date\"] = temp.date\nnormalized_df[\"pickup_time\"] = temp.time\n\ntemp = pd.DatetimeIndex(normalized_df[\"dropoff_datetime\"])\nnormalized_df[\"dropoff_date\"] = temp.date\nnormalized_df[\"dropoff_time\"] = temp.time\n\ndel normalized_df[\"pickup_datetime\"]\ndel normalized_df[\"dropoff_datetime\"]\n\nnormalized_df.reset_index(inplace=True, drop=True)\n\nif not (args.output_normalize is None):\n os.makedirs(args.output_normalize, exist_ok=True)\n print(\"%s created\" % args.output_normalize)\n save_data_frame_to_directory(args.output_normalize, normalized_df)","sub_path":"components/nyc-taxi-fare-prediction/normalize/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62613120","text":"import torch\nimport scipy.io\nimport numpy as np\nimport random\nfrom torch.utils.data import random_split, TensorDataset, DataLoader\n\n'''\n# data | keys | size | feature | class |\n# esardata.mat | 3+1+1 | (1300, 1200) | 22 | 3+1 |\n# esar_data.mat | 3+5+1 | (1300, 1200) | 6,3,3,11,4(27) | 3+1 |\n# flevoland_data.mat | 3+5+1 | (750,1024) | 6,3,3,11,4(27) | 15+1 |\n# san_data.mat | 3+5+1 | (900,1024) | 6,3,3,11,4(27) | 4+1 |\n\n# esardata.mat : 'ESARfeat', 'ESARgrth'\n# esar_data.mat : 'esar_t', 'esar_pau', 'esar_kro', 'esar_haa', 'esar_yam', 'esargrth'\n# flevoland_data.mat : 'fle_t'6, 'fle_pau'3, 'fle_kro'3, 'fle_haa'11, 'fle_yam'4, 'flegrth'\n# san_data.mat : 'san_t', 'san_pau', 'san_kro', 'san_haa', 'san_yam', 'sargrth'\nbackground\t255 255 255\nlabels 1-15\n[252, 38, 38], [245, 130, 54], [253, 205, 11], [211, 254, 35], [109, 245, 19], \n[21, 244, 21], [40, 247, 123], [8, 253, 204], [56, 207, 245], [13, 108, 251], \n[33, 33, 245], [117, 30, 249], [206, 35, 249], [253, 50, 213], [248, 49, 129]\n'''\ndef normalize(data):\n maxValue = torch.max(torch.max(data))\n minValue = torch.min(torch.min(data))\n data = (data - minValue) / (maxValue - minValue)\n return data\n\ndef mat_data(args):\n data_load = scipy.io.loadmat(args.data_folder + str(args.data_name) + '.mat')\n data_values = list(data_load.values())\n # key = list(data_load.keys())\n # 特征导入\n stack_t = torch.Tensor(data_values[3].astype(np.float32))\n stack_pau = torch.Tensor(data_values[4].astype(np.float32))\n stack_kro = torch.Tensor(data_values[5].astype(np.float32))\n stack_haa = torch.Tensor(data_values[6].astype(np.float32))\n stack_yam = torch.Tensor(data_values[7].astype(np.float32))\n stack = torch.cat([stack_t, stack_yam], 2) \n # 根据需要的特征进行整合\n # stack = normalize(stack)\n # print('stack1: ', stack.shape) # (750, 1024, 27)\n gt = torch.from_numpy(data_values[8])\n return stack, gt\n\ndef reshape_data(stack,gt):\n patches_h = stack.shape[0]\n patches_v = stack.shape[1]\n stacks = torch.zeros(patches_h*patches_v, stack.shape[2])\n gts = torch.zeros(patches_h*patches_v).int()\n i = 0\n filled = 0\n for h in range(patches_h):\n for v in range(patches_v):\n for layer in range (stack.shape[2]):\n stacks[i][layer] = stack[h, v, layer]\n gts[i] = gt[h, v]\n i += 1\n return stacks, gts\n\ndef reshape_rectangle_data(stack, gt, size_1, size_2, size_3):\n h_patches = np.arange(size_3//2, stack.shape[0]-(size_3//2))\n h_size = len(h_patches)\n v_patches = np.arange(size_3//2, stack.shape[1]-(size_3//2))\n v_size = len(v_patches)\n\n stacks_1 = torch.zeros(h_size * v_size, stack.shape[2], size_1 , size_1)\n stacks_2 = torch.zeros(h_size * v_size, stack.shape[2], size_2 , size_2)\n stacks_3 = torch.zeros(h_size * v_size, stack.shape[2], size_3 , size_3)\n gts = torch.zeros(h_size * v_size).int()\n\n i = 0\n for h in h_patches:\n for v in v_patches:\n for layer in range (stack.shape[2]):\n stacks_1[i][layer] = stack[(h-size_1//2):(h+size_1//2+1), (v-size_1//2):(v+size_1//2+1), layer]\n stacks_2[i][layer] = stack[(h-size_2//2):(h+size_2//2+1), (v-size_2//2):(v+size_2//2+1), layer]\n stacks_3[i][layer] = stack[(h-size_3//2):(h+size_3//2+1), (v-size_3//2):(v+size_3//2+1), layer]\n gts[i] = gt[h, v]\n i += 1\n print(f\"stacks_1: {stacks_1.shape}\") # (739872,27,5,5)\n print(f\"stacks_2: {stacks_2.shape}\") # (739872,27,11,11)\n print(f\"stacks_3: {stacks_3.shape}\") # (739872,27,17,17)\n print(f\"gts: {gts.shape}\") # (750360)\n return stacks_1, stacks_2, stacks_3, gts\n\ndef sar_datesets(args):\n stack, gt = mat_data(args)\n stack = normalize(stack) # 特征归一化\n stacks_1, stacks_2, stacks_3, gts = reshape_rectangle_data(stack,gt,args.sar_size1,args.sar_size2,args.sar_size3)\n '''\n # 去掉背景\n index = torch.arange(0, gts.size(0))\n index_0 = index[gts != 0]\n stacks_1 = torch.index_select(stacks_1, 0, index_0) # (184156,27,5,5)\n stacks_2 = torch.index_select(stacks_2, 0, index_0)\n stacks_3 = torch.index_select(stacks_3, 0, index_0)\n gts = torch.index_select(gts, 0, index_0) # (184156)\n '''\n print(f\"Resizing image of size {stack.shape} to image patches {stacks_1.shape}, and grund-truth of size {gt.shape} to ground-truth patches {gts.shape}\")\n\n num_class = torch.max(gts).numpy().astype(int)\n print(\"num_class: \", num_class)\n stacks_1 = stacks_1.numpy()\n stacks_2 = stacks_2.numpy()\n stacks_3 = stacks_3.numpy()\n gts = gts.numpy()\n\n \n np.save('./data/' + args.data_name + '/stacks_1.npy', stacks_1)\n np.save('./data/' + args.data_name + '/stacks_2.npy', stacks_2)\n np.save('./data/' + args.data_name + '/stacks_3.npy', stacks_3)\n np.save('./data/' + args.data_name + '/gts.npy', gts)\n \n return 1\n\ndef sar_dataloader(args, gts_class, gts, stacks_1, stacks_2, stacks_3, split='train', form='support', shuffle=True):\n # init parameters\n if split == 'train':\n if form == 'support':\n n_shot = args.train_n_shot\n elif form == 'query':\n n_shot = args.train_n_query\n else:\n print(\"form error\")\n elif split == 'test':\n if form == 'support':\n n_shot = args.test_n_shot\n elif form == 'query':\n n_shot = args.test_n_query\n else:\n print(\"form error\")\n else:\n print(\"split error\")\n stack_index = np.arange(0, gts.size(0)) # 生成stack的索引\n index = np.zeros((1, 2), dtype=int) # 生成一个零数组,方便for循环\n class_num = np.zeros(args.test_n_way).astype(int)\n j = 0\n for i in gts_class:\n stack_index_i = stack_index[gts == i]\n gts_index_i = np.ones(n_shot, dtype=int)*j\n gts_index_i = gts_index_i[:, np.newaxis] # 增加维度\n class_num[i] = len(stack_index_i)\n # print(i, \":\", len(stack_index_i))\n stack_index_i = np.random.choice(stack_index_i, n_shot, False)\n # print(\"stack_index_i: \", stack_index_i)\n stack_index_i = stack_index_i[:, np.newaxis]\n index_i = np.concatenate((stack_index_i, gts_index_i), axis=1)\n index = np.concatenate((index, index_i), axis=0)\n j += 1\n \n if shuffle :\n index = np.random.permutation(np.delete(index, 0 , 0)) # 去除第一个值并打乱顺序\n else:\n index = np.delete(index, 0 , 0) # 不打乱顺序\n # print(\"index: \", index)\n # print(\"gts: \", gts[133829], gts[181901], gts[21650], gts[51858])\n epoch_stacks_1 = []\n epoch_stacks_2 = []\n epoch_stacks_3 = []\n epoch_gts = torch.from_numpy(index[:,1])\n for item in list(index[:,0]):\n epoch_stacks_1.append(stacks_1[item].unsqueeze(0)) # 每一行需要增加一维,拼接时保证维度正确\n epoch_stacks_2.append(stacks_2[item].unsqueeze(0))\n epoch_stacks_3.append(stacks_3[item].unsqueeze(0))\n epoch_stacks_1 = torch.cat(epoch_stacks_1, dim=0) # (25,27,5,5)\n epoch_stacks_2 = torch.cat(epoch_stacks_2, dim=0)\n epoch_stacks_3 = torch.cat(epoch_stacks_3, dim=0)\n return epoch_stacks_1, epoch_stacks_2, epoch_stacks_3, epoch_gts, class_num\n","sub_path":"duoyuan/test_sar_data.py","file_name":"test_sar_data.py","file_ext":"py","file_size_in_byte":7445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"233931273","text":"# 1. Manually arrange the sequence [2 7 9 4 1 5 3 6 0 8] in ascending order using insertion sort, bubble sort and selection sort, showing at each step the new configuration of the sequence.\n\ndef insertionSort(l):\n for i in range(1, len(l)):\n currentValue = l[i]\n print(l)\n while i > 0 and l[i - 1] > currentValue:\n l[i] = l[i - 1]\n i = i - 1\n\n l[i] = currentValue\n\n\n# l = [2, 7, 9, 4, 1, 5, 3, 6, 0, 8]\n# insertionSort(l)\n# print(l)\n\n\ndef bubbleSort(l):\n for passNumber in range(len(l) - 1, 0, -1):\n for i in range(passNumber):\n print(l)\n if l[i] > l[i + 1]:\n var = l[i]\n l[i] = l[i + 1]\n l[i + 1] = var\n\n\n# l = [2, 7, 9, 4, 1, 5, 3, 6, 0, 8]\n# bubbleSort(l)\n# print(l)\n\n\ndef selectionSort(l):\n for i in range(len(l) - 1, 0, -1):\n maxIndex = 0\n for currentLocation in range(1, i + 1):\n print(l)\n if l[currentLocation] > l[maxIndex]:\n maxIndex = currentLocation\n\n var = l[i]\n l[i] = l[maxIndex]\n l[maxIndex] = var\n\n\n# l = [2, 7, 9, 4, 1, 5, 3, 6, 0, 8]\n# selectionSort(l)\n# print(l)\n\ndef guessingGame():\n import random as random\n binary = False \n lowNumber, highNumber = 1, 20000\n\n number = random.randint(lowNumber, highNumber)\n print(\"A number has been selected from\", lowNumber, \"and\", highNumber)\n\n lo = 1\n hi = highNumber\n guesses = 0\n\n for i in range(lowNumber, highNumber):\n #guess = int(input(\"What is your guess: \"))\n if binary:\n guess = lo + (hi - lo) // 2 # integer division\n else:\n guess = random.randint(lo, hi)\n print(\"Guess:\", guess)\n guesses += 1 \n \n #check the guessed number\n if guess > number:\n print(\"Lower:\")\n hi = guess # bring down the upper bound\n elif guess < number:\n print(\"Higher:\")\n lo = guess # push up the lower bound\n else:\n break\n\n print(\"That took\", guesses, \"guesses\")\n print(\"That took {0} guesses\".format(guesses))\n\nguessingGame()\n","sub_path":"Week_4/Week4.py","file_name":"Week4.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"153554389","text":"# You toss a fair coin three times write a program to find following:\n# What is the probability of three heads, HHHHHH?\n# What is the probability that you observe exactly one heads?\n# Given that you have observed at least one heads, what is the probability that you observe at least two heads?\n\nsample_set = [\"HHH\", \"HHT\", \"HTH\", \"THH\", \"THT\", \"TTH\",\"TTT\", \"HTT\"]\ntotal_possible_events = len(sample_set)\nrequired_event = [event for event in range(total_possible_events) if sample_set[event] == \"HHH\"]\n# required_event = [event for event in range(total_possible_events) if \"HHH\" in sample_set]\n\n# number_of_required_events = len(required_event)\n# print(len(required_event))\n# probability = number_of_required_events / total_possible_events\n# print(\"probability of three heads = \",number_of_required_events, '/', total_possible_events, \"=\", probability)\n#\n# exactly_one_head = [event for event in range(total_possible_events) if sample_set[event].count(\"H\") == 1]\n# event = len(exactly_one_head)\n# probability_of_one_head = event / total_possible_events\n# print(\"probability of exactly one heads = \",event, '/', total_possible_events, \"=\", probability)\n\nrequired_event1 = [sample_set[event] for event in range(total_possible_events) if sample_set[event].count(\"H\") >= 1]\n# print(required_event2)\nevent1 = set()\n# a |= set(l)\nevent1 |= set(required_event1)\nprint(event1)\n# event1 = len(required_event2)\n\nrequired_event2 = [required_event1[item] for item in range(len(required_event1)) if required_event1[item].count(\"H\") >= 2]\nevent2 = set()\nevent2 |= set(required_event2)\nprint(event2)\n\nevent1_and_event2 = event1 & event2\nprint(event1_and_event2)\n\nprobability_of_event1_and_event2 = len(event1_and_event2) / len(sample_set)\nprint(\"probability_of_event1_and_event2\", len(event1_and_event2), \"/\", len(sample_set), \" = \", probability_of_event1_and_event2)\nprobability_of_event2 = len(event1) / len(sample_set)\nprint(\"probability_of_event2 = \", len(event1) , \" / \" , len(sample_set), \" = \", probability_of_event2)\n\n\ntotal_probability = probability_of_event1_and_event2 / probability_of_event2\nprint(total_probability)","sub_path":"Probability/Probability_4.py","file_name":"Probability_4.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41802536","text":"import pickle\nimport struct\n\nint_size = struct.calcsize('>I')\n\ntypes = {\n 0 : 'NetworkQueryMessage',\n 'NetworkQueryMessage' : 0,\n 1 : 'NetworkReplyMessage',\n 'NetworkReplyMessage' : 1,\n 2 : 'RoutingMessage',\n 'RoutingMessage' : 2,\n 3 : 'IdRequest',\n 'IdRequest' : 3,\n 4 : 'IdReply',\n 'IdReply' : 4,\n 5 : 'IdentifyRequest',\n 'IdentifyRequest' : 5,\n 6 : 'IdentifyReply',\n 'IdentifyReply' : 6,\n 7 : 'NetworkMessage',\n 'NetworkMessage' : 7,\n 8 : 'IpRequest',\n 'IpRequest' : 8,\n 9 : 'IpRelease',\n 'IpRelease' : 9,\n 10 : 'IpResponse',\n 'IpResponse' : 10,\n 11 : 'SimulationMessage',\n 'SimulationMessage' : 11,\n 12 : 'PlatformStartedMessage',\n 'PlatformStartedMessage' : 12,\n 13 : 'PlatformStopMessage',\n 'PlatformStopMessage' : 13,\n 14 : 'StartNodes',\n 'StartNodes' : 14,\n 15 : 'VmStarted',\n 'VmStarted' : 15,\n 16 : 'ChangeNetworkMessage',\n 'ChangeNetworkMessage' : 16,\n}\n\nclass SerializedMessage:\n def __init__(self, message, serialize = True):\n if serialize:\n self.string = pickle.dumps(message, -1)\n else:\n self.string = message\n self.size = len(self.string)\n\n def __str__(self):\n return struct.pack('>I', self.size) + self.string\n\n def get_message(self):\n return pickle.loads(self.string)\n\nclass NetworkQueryMessage:\n def __init__(self, address, netmask):\n self.type = types['NetworkQueryMessage']\n self.address = address\n self.netmask = netmask\n\nclass NetworkReplyMessage:\n def __init__(self, address, netmask, switch):\n self.type = types['NetworkReplyMessage']\n self.address = address\n self.netmask = netmask\n self.switch = switch\n\nclass RoutingMessage:\n def __init__(self, source_id, dest_id, msg):\n self.type = types['RoutingMessage']\n self.source = source_id\n self.dest_id = dest_id\n self.message = msg\n \nclass IdRequest:\n def __init__(self):\n self.type = types['IdRequest']\n\nclass IdReply:\n def __init__(self, id):\n self.type = types['IdReply']\n self.id = id\n \nclass IdentifyRequest:\n def __init__(self, id):\n self.type = types['IdentifyRequest']\n self.id = id\n \nclass IdentifyReply:\n def __init__(self, id, address):\n self.type = types['IdentifyReply']\n self.id = id\n self.address = address\n \nclass NetworkMessage:\n def __init__(self, key, msg):\n self.type = types['NetworkMessage']\n self.key = key\n self.msg = msg\n\nclass IpRequest:\n def __init__(self):\n self.type = types['IpRequest']\n\nclass IpRelease:\n def __init__(self, ip):\n self.type = types['IpRelease']\n self.ip = ip\n\nclass IpResponse:\n def __init__(self, ip):\n self.type = types['IpResponse']\n self.ip = ip\n \nclass SimulationMessage:\n def __init__(self, msg):\n self.type = types['SimulationMessage']\n self.msg = msg\n\nclass PlatformStartedMessage:\n def __init__(self):\n self.type = types['PlatformStartedMessage']\n \nclass PlatformStopMessage:\n def __init__(self):\n self.type = types['PlatformStopMessage']\n \nclass StartVmsMessage:\n def __init__(self, vms):\n self.type = types['StartNodes']\n self.vms = vms\n \nclass VmStartedMessage:\n def __init__(self, vm, status):\n self.type = types['VmStarted']\n self.vm = vm\n self.status = status\n \nclass ChangeNetworkMessage:\n def __init__(self, vm, address, netmask):\n self.type = types['ChangeNetworkMessage']\n self.vm = vm\n self.address = address\n self.netmask = netmask","sub_path":"proiect/simulation_platform/message/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119729676","text":"import asyncio\nimport json\nimport threading\n\nfrom multicp import multicp_logger_nr\nfrom multicp.pool.ConnectionClass import ConnectionClass\n\n\n\nclass AsyncConnectionClass(ConnectionClass):\n def run(self):\n loop = asyncio.get_event_loop()\n loop.create_task(self._init_con())\n self._lock = asyncio.Lock()\n\n async def _init_con(self):\n while len(self._connection_dict) < self._init_num:\n if not await self._new_connection():\n raise RuntimeError(\"Init connection num over\")\n\n async def _new_connection(self):\n async with self._lock:\n if self._max_count == \"auto\" or self._max_count > len(self._connection_dict):\n con = self._con_class(**self._params)\n self._connection_dict[str(con.id)] = con\n await con.connect()\n self._ready_connection_queue.append(con)\n\n return True\n else:\n return False\n\n async def release(self, connection):\n async with self._lock:\n if str(connection.id) in self._connection_dict:\n self._ready_connection_queue.append(connection)\n else:\n raise Exception(\"Connection not find in running queue ,%s\" % str(self._name))\n # multicp_logger_nr.info(json.dumps(self.connection_info()))\n\n async def clean(self, clean_num=None):\n wait_close = []\n async with self._lock:\n if not clean_num:\n clean_num = len(self._ready_connection_queue)\n if not clean_num <= len(self._ready_connection_queue):\n raise RuntimeError(\n \"ready queue only have {} connection, but clean {}\".format(len(self._ready_connection_queue),\n clean_num))\n while clean_num>0:\n clean_num -= 1\n con = self._ready_connection_queue.pop()\n del self._connection_dict[str(con.id)]\n wait_close.append(con)\n\n multicp_logger_nr.info(json.dumps(self.connection_info()))\n for i in wait_close:\n try:\n await i.disconnect()\n except Exception as e:\n multicp_logger_nr.info(\"Connection can't disconnect, remove it , id= {}\".format(i.id))\n\n async def _pop_connection(self):\n async with self._lock:\n if len(self._ready_connection_queue) > 0:\n con = self._ready_connection_queue.popleft()\n return con\n else:\n return False\n\n async def get(self):\n while True:\n con = await self._pop_connection()\n if con is False:\n await self._new_connection()\n else:\n check_result = await con.check()\n if check_result is not False:\n return con\n else:\n await self.release(con)\n await self.clean(1)\n await asyncio.sleep(0.01)\n\n def with_connection(self):\n return WithObj(self)\n\n\nclass WithObj:\n def __init__(self, con_class):\n self.con_class = con_class\n self.con = None\n\n async def __aenter__(self):\n self.con = await self.con_class.get()\n return self.con\n\n async def __aexit__(self, exc_type, exc, tb):\n await self.con_class.release(self.con)","sub_path":"multicp/pool/AsyncConnectionClass.py","file_name":"AsyncConnectionClass.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197691995","text":"#!/usr/bin/env python3\nimport click\n\nfrom .Solver import Solver\nfrom .functions import run\n\n\n@click.command()\n@click.argument(\n 'input_file',\n type=click.File('r'),\n)\ndef cli(path):\n \"\"\"\n This programs allows you to solve any sudoku puzzle,\n if it's possible to solve.\n \"\"\"\n solution = run(Solver, path)\n click.echo(solution)\n","sub_path":"sudoku/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"264274684","text":"import xml.etree.ElementTree as ET\r\nimport fileinput, errno, sys, re, types, pprint\r\n\r\n'''\r\n\r\n\tSystem calls class - contains various utility functions for\r\n\tmain loop. Analyses data, types, variables.\r\n\r\n'''\r\n\r\nclass sycalls:\r\n\t\r\n\t'''\r\n\t\tIf something went wrong, this function terminates\r\n\t\tsession with debug message (if flag set) and \r\n\t\tfitting return code.\r\n\t'''\r\n\t\r\n\tdef failed(self, flag, inst, code):\r\n\t\tif flag:\r\n\t\t\tprint(\"Error \"+inst)\r\n\t\tsys.exit(code)\r\n\r\n\t'''\r\n\t\tFind if frame exists.\r\n\t'''\r\n\t\t\r\n\tdef isframe(self, frame):\r\n\t\tif frame == 'TF':\r\n\t\t\tif 'tframe' in globals():\r\n\t\t\t\treturn True\r\n\t\telif frame == 'LF':\r\n\t\t\tif 'lframe' in globals() and len(lframe) > 0:\r\n\t\t\t\treturn True\r\n\t\telif frame == 'GF':\r\n\t\t\treturn True\r\n\t\treturn False\r\n\r\n\t'''\r\n\t\tReturn adjacent frame from string with\r\n\t\t@ delimiter.\r\n\t'''\r\n\t\r\n\tdef disassemble(self, frame):\r\n\t\tframe = frame.split(\"@\")[0]\r\n\t\tif frame == 'TF':\r\n\t\t\treturn tframe\r\n\t\telif frame == 'LF':\r\n\t\t\treturn lframe[0]\r\n\t\telif frame == 'GF':\r\n\t\t\treturn gframe\r\n\t\r\n\t'''\r\n\t\tFind if frame exists and if variable is\r\n\t\tin frame.\r\n\t'''\r\n\t\r\n\tdef complexvar(self, var, inst):\r\n\t\tdata = var.split(\"@\")\r\n\t\tif self.isframe(data[0]):\r\n\t\t\tif data[1] not in self.disassemble(var):\r\n\t\t\t\tself.failed(DEBUG, inst, 54)\r\n\t\telse:\r\n\t\t\tself.failed(DEBUG, inst, 55);\r\n\r\n\t'''\r\n\t\tUsed for triplet-based array indexing\r\n\t\tjumps.\r\n\t'''\r\n\t\r\n\tdef tetrajump(self, index):\r\n\t\treturn (index-1)*3\r\n\r\n\t'''\r\n\t\tFind if variable has correct type with\r\n\t\trespect to its value.\r\n\t'''\r\n\t\r\n\tdef goodtype(self, value, type, inst):\r\n\t\tself.complexvar(value,inst)\r\n\t\ttemp = sc.disassemble(value)[ut.splitter(value,1)]\r\n\t\tif not isinstance(temp,type):\r\n\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\telse:\r\n\t\t\treturn temp\r\n\r\n\t'''\r\n\t\tOperations (ADD,SUB,MUL,IDIV) simulation.\r\n\t'''\r\n\t\r\n\tdef operate(self, types, values, inst):\r\n\t\tsc.complexvar(values[0],inst)\r\n\t\tvals = []\r\n\t\tresult = None\r\n\t\tfor i in range(1,3):\r\n\t\t\tif types[i] == 'var':\r\n\t\t\t\tvals.append(sc.goodtype(values[i],int,inst))\r\n\t\t\telif types[i] == 'int':\r\n\t\t\t\tvals.append(values[i])\r\n\t\t\telse:\r\n\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\telse:\r\n\t\t\tif inst == 'ADD':\r\n\t\t\t\tresult = int(vals[0]) + int(vals[1])\r\n\t\t\telif inst == 'SUB':\r\n\t\t\t\tresult = int(vals[0]) - int(vals[1])\r\n\t\t\telif inst == 'MUL':\r\n\t\t\t\tresult = int(vals[0]) * int(vals[1])\r\n\t\t\telif inst == 'IDIV':\r\n\t\t\t\tif int(vals[1]) == 0:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 57)\r\n\t\t\t\tresult = int(vals[0]) // int(vals[1])\r\n\t\t\tself.disassemble(values[0])[ut.splitter(values[0],1)] = result\r\n\r\n\t'''\r\n\t\tDynamically assume type from variable\r\n\t\tvalue.\r\n\t'''\r\n\t\r\n\tdef fetchtype(self, value):\r\n\t\tif (value == False or value == True) and type(value) == type(True):\r\n\t\t\treturn 'bool'\r\n\t\telif isinstance(value, int):\r\n\t\t\treturn 'int'\r\n\t\telif isinstance(value, str):\r\n\t\t\treturn 'string'\r\n\t\telif value is None:\r\n\t\t\treturn 'none'\r\n\r\n\t'''\r\n\t\tLT, GT, EQ comaprison functions simulation.\r\n\t'''\r\n\t\r\n\tdef compare(self, types, values, inst, ret):\r\n\t\tif not ret:\r\n\t\t\tsc.complexvar(values[0], inst)\r\n\t\tptype = None\r\n\t\tresult = None\r\n\t\tvals = []\r\n\t\tfor i in range(1,3):\r\n\t\t\tif types[i] == 'var':\r\n\t\t\t\tsc.complexvar(values[i], inst)\r\n\t\t\t\tcurrtype = sc.fetchtype(self.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\tif ptype is not None:\r\n\t\t\t\t\tif currtype != ptype:\r\n\t\t\t\t\t\tself.failed(DEBUG, inst, 53)\r\n\t\t\t\tptype = currtype\r\n\t\t\t\tvals.append(self.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\telse:\r\n\t\t\t\tcurrtype = types[i]\r\n\t\t\t\tif ptype is not None:\r\n\t\t\t\t\tif currtype != ptype:\r\n\t\t\t\t\t\tself.failed(DEBUG, inst, 53)\r\n\t\t\t\tptype = currtype\r\n\t\t\t\tvals.append(ut.justify_value(types[i],values[i]))\r\n\t\telse:\r\n\t\t\tframe = self.disassemble(values[0])\r\n\t\t\tif inst == 'LT':\r\n\t\t\t\tif ret:\r\n\t\t\t\t\treturn vals[0] < vals[1]\r\n\t\t\t\tframe[ut.splitter(values[0],1)] = vals[0] < vals[1]\r\n\t\t\telif inst == 'GT':\r\n\t\t\t\tif ret:\r\n\t\t\t\t\treturn vals[0] > vals[1]\r\n\t\t\t\tframe[ut.splitter(values[0],1)] = vals[0] > vals[1]\r\n\t\t\telif inst == 'EQ':\r\n\t\t\t\tif ret:\r\n\t\t\t\t\treturn vals[0] == vals[1]\r\n\t\t\t\tframe[ut.splitter(values[0],1)] = vals[0] == vals[1]\r\n\r\n\t'''\r\n\t\tAND, OR, NOT logic functions simulation.\r\n\t'''\r\n\t\r\n\tdef logic(self, types, values, inst):\r\n\t\tsc.complexvar(values[0], inst)\r\n\t\tvals = []\r\n\t\tfor i in range(1,3):\r\n\t\t\tif types[i] == 'var':\r\n\t\t\t\tcurrtype = sc.fetchtype(self.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\tif currtype != 'bool':\r\n\t\t\t\t\tsc.vailed(DEBUG, inst, 53)\r\n\t\t\t\telse:\r\n\t\t\t\t\tvals.append(self.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\telif types[i] == 'bool':\r\n\t\t\t\tvals.append(justify_value(types[i],values[i]))\r\n\t\t\telse:\r\n\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\tif inst == 'NOT':\r\n\t\t\t\tbreak;\r\n\t\tframe = self.disassemble(values[0])\r\n\t\tif inst == 'AND':\r\n\t\t\tframe[ut.splitter(values[0],1)] = (vals[0] == True and vals[1] == True)\r\n\t\telif inst == 'OR':\r\n\t\t\tframe[ut.splitter(values[0],1)] = (vals[0] == True or vals[1] == True)\r\n\t\telif inst == 'NOT':\r\n\t\t\tframe[ut.splitter(values[0],1)] = (not vals[0])\r\n\t\r\n\t'''\r\n\t\tFind if label exists.\r\n\t'''\r\n\t\r\n\tdef emptylabel(self, name, inst):\r\n\t\tif name not in labels:\r\n\t\t\tself.failed(DEBUG, inst, 52)\r\n\r\n'''\r\n\tUtility functions with general purpose. They are\r\n\tnot program-type related and can be reused.\r\n'''\r\n\t\t\t\r\nclass utils: \r\n\t\r\n\t'''\r\n\t\tJustify value with respect to its type.\r\n\t\tFor example: when type is string, change\r\n\t\tvalue of variable to actual python string.\r\n\t'''\r\n\t\r\n\tdef justify_value(self, type, value):\r\n\t\tif type == 'string':\r\n\t\t\tif value is None:\r\n\t\t\t\tvalue = ''\r\n\t\t\treturn str(value)\r\n\t\telif type == 'int':\r\n\t\t\treturn int(value)\r\n\t\telif type == 'bool':\r\n\t\t\tif value == 'true':\r\n\t\t\t return True\r\n\t\t\telif value == 'false':\r\n\t\t\t\treturn False;\r\n\t\t\telse: return None\r\n\r\n\t\r\n\t'''\r\n\t\tEscape sequences evaluator.\r\n\t'''\r\n\t\r\n\tdef escaper(self, value):\r\n\t\tmatch = re.findall(\"\\\\\\\\\\d{3}\",str(value))\r\n\t\tfor m in match:\r\n\t\t\tvalue = value.replace(m[0:],chr(int(m[1:].lstrip(\"0\"))))\r\n\t\telse:\r\n\t\t\treturn value\r\n\t\r\n\t'''\r\n\t\tCheck if escape is valid (good range).\r\n\t'''\r\n\t\r\n\tdef isvalidescape(self, value):\r\n\t\tfld = re.findall(\"(\\\\\\\\[^\\d\\\\\\\\]+\\\\\\\\\\d{1}[^\\d]|\\\\\\\\\\d{1,2}[^\\d]|\\\\\\\\\\D|\\\\\\\\\\d{1,2}$)\",str(value))\r\n\t\tif len(fld) == 0:\r\n\t\t\treturn True;\r\n\t\telse:\r\n\t\t\treturn False;\r\n\r\n\t'''\r\n\t\tSplit array by delimiter and return given index.\r\n\t'''\r\n\t\r\n\tdef splitter(self, source, index):\r\n\t\treturn source.split(\"@\")[index]\r\n\r\n\t'''\r\n\t\tReturn value after equality symbol.\r\n\t'''\r\n\t\r\n\tdef syarghandle(self, arg):\r\n\t\tfor val in sys.argv:\r\n\t\t\tif val.split(\"=\")[0] == arg:\r\n\t\t\t\treturn val.split(\"=\")[1]\r\n\r\n'''\r\n\tSystem variables and main data carriers, frame simulators.\r\n'''\r\n\r\ntree = ''\r\nDEBUG = False\r\n\r\ndataset = []\r\nargs = []\r\nargvals = []\r\ngframe = {}\r\nlabels = {}\r\nvalstack = []\r\nretjumps = []\r\n\r\n'''\r\n\tInstatify core classes.\r\n'''\r\n\r\nsc = sycalls();\r\nut = utils();\r\n\r\n'''\r\n\tXML tree loading attempt.\r\n'''\r\n\r\nfile = ut.syarghandle(\"--source\")\r\n\r\ntry:\r\n\twith open(sys.argv[1].split(\"=\")[1]) as myfile:\r\n\t\ttree = myfile.read()\r\nexcept FileNotFoundError:\r\n\tsc.failed(DEBUG, \"FOPEN\", 31)\r\n\r\nif tree == '' : sc.failed(DEBUG, 'XMLERR', 31)\r\n\r\ntry:\r\n\txmltree = ET.fromstring(tree)\r\nexcept ET.ParseError:\r\n\tsc.failed(DEBUG, \"XMLERR\", 31)\r\n\r\n'''\r\n\tOperations collection.\r\n'''\r\n\r\noperations = ['MOVE',\r\n 'CREATEFRAME',\r\n 'PUSHFRAME',\r\n 'POPFRAME',\r\n 'DEFVAR',\r\n 'CALL',\r\n 'RETURN',\r\n 'PUSHS',\r\n 'POPS',\r\n 'ADD',\r\n 'SUB',\r\n 'MUL',\r\n 'IDIV',\r\n 'LT',\r\n 'GT',\r\n 'EQ',\r\n 'AND',\r\n 'OR',\r\n 'NOT',\r\n 'INT2CHAR',\r\n 'STRI2INT',\r\n 'READ',\r\n 'WRITE',\r\n 'CONCAT',\r\n 'STRLEN',\r\n 'GETCHAR',\r\n 'SETCHAR',\r\n 'TYPE',\r\n 'LABEL',\r\n 'JUMP',\r\n 'JUMPIFEQ',\r\n 'JUMPIFNEQ',\r\n 'DPRINT',\r\n 'BREAK'\r\n ];\r\n\r\n'''\r\n\tParse xml tree.\r\n'''\r\n\r\nstarted = False\r\nfor child in xmltree:\r\n\tif started:\r\n\t\tdataset.append(args)\r\n\t\tdataset.append(argvals)\r\n\t\targs = []\r\n\t\targvals = []\r\n\tdataset.append(child.attrib[\"opcode\"])\r\n\tstarted = True\r\n\r\n\tfor subchild in child:\r\n\t\tif child.attrib[\"opcode\"] == 'LABEL':\r\n\t\t\tif subchild.text not in labels:\r\n\t\t\t\tlabels[subchild.text] = int(child.attrib[\"order\"])\r\n\t\t\telse:\r\n\t\t\t\tsc.failed(DEBUG, child.attrib[\"opcode\"], 52)\r\n\r\n\t\targs.append(subchild.attrib[\"type\"])\r\n\t\targvals.append(subchild.text)\r\n\r\n\t'''\r\n\t\tMain loop with commands processing and evaluating.\r\n\t'''\r\n\r\nelse:\r\n\tdataset.append(args)\r\n\tdataset.append(argvals)\r\n\tinc = -1\r\n\twhile inc < len(dataset)-1:\r\n\t\tinc+=1\r\n\t\tif inc%3 == 0:\r\n\t\t\tinst = dataset[inc]\r\n\t\t\ttypes = dataset[inc+1]\r\n\t\t\tvalues = dataset[inc+2]\r\n\r\n # MOVE\r\n\r\n\t\t\tif inst == 'MOVE':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tacceptor = sc.disassemble(values[0])\r\n\r\n\t\t\t\tif types[1] == 'var':\r\n\t\t\t\t\tsc.complexvar(values[1],inst)\r\n\t\t\t\t\tdonor = sc.disassemble(values[1])\r\n\t\t\t\t\tacceptor[ut.splitter(values[0],1)] = donor[ut.splitter(values[1],1)]\r\n\t\t\t\telse:\r\n\t\t\t\t\tacceptor[ut.splitter(values[0],1)] = ut.justify_value(types[1],values[1])\r\n\r\n # CREATEFRAME @void\r\n\r\n\t\t\telif inst == 'CREATEFRAME':\r\n\t\t\t\ttframe = {}\r\n \r\n # PUSHFRAME @void\r\n\r\n\t\t\telif inst == 'PUSHFRAME':\r\n\t\t\t\tif 'lframe' not in globals():\r\n\t\t\t\t\tlframe = []\r\n\t\t\t\tif sc.isframe('TF'):\r\n\t\t\t\t\tlframe.insert(0, tframe)\r\n\t\t\t\t\tdel tframe;\r\n\t\t\t\telse:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 55)\r\n \r\n # POPFRAME @void\r\n\r\n\t\t\telif inst == 'POPFRAME':\r\n\t\t\t\tif sc.isframe('LF'):\r\n\t\t\t\t\ttframe = lframe[0]\r\n\t\t\t\t\tlframe.pop(0)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 55)\r\n \r\n # DEFVAR @var\r\n\r\n\t\t\telif inst == 'DEFVAR':\r\n\t\t\t\tif sc.isframe(ut.splitter(values[0],0)):\r\n\t\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = None\r\n\t\t\t\telse:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 55)\r\n \r\n # CALL @label\r\n\t\t\t\r\n\t\t\telif inst == 'CALL':\r\n\t\t\t\tretjumps.insert(0, (inc//3)+1)\r\n\t\t\t\tsc.emptylabel(values[0], inst)\r\n\t\t\t\tinc = sc.tetrajump(labels[values[0]])\r\n\r\n # RETURN @void\r\n\r\n\t\t\telif inst == 'RETURN':\r\n\t\t\t\tif len(retjumps) > 0:\r\n\t\t\t\t\tinc = sc.tetrajump(retjumps[0])\r\n\t\t\t\t\tretjumps.pop(0)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsc.failed(DEBUG, inst,56)\r\n\r\n # PUSHS @sym\r\n\r\n\t\t\telif inst == 'PUSHS':\r\n\t\t\t\tif types[0] == 'var':\r\n\t\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\t\tvalstack.insert(0,sc.disassemble(values[0])[ut.splitter(values[0],1)])\r\n\t\t\t\telse:\r\n\t\t\t\t\tvalstack.insert(0,ut.justify_value(types[0],ut.splitter(values[0],0)))\r\n \r\n # POPS @var\r\n\t\t\t\r\n\t\t\telif inst == 'POPS':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tif len(valstack) == 0:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 56)\r\n\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\tframe[ut.splitter(values[0],1)] = valstack[0]\r\n\t\t\t\tvalstack.pop(0)\r\n\r\n\t\t # ADD/SUB/MUL/IDIV/LT/GT/EQ/AND/OR/NOT @var @sym @sym\r\n\r\n\t\t\telif inst == 'ADD':\r\n\t\t\t\tsc.operate(types,values,inst)\r\n\t\t\telif inst == 'SUB':\r\n\t\t\t\tsc.operate(types,values,inst)\r\n\t\t\telif inst == 'MUL':\r\n\t\t\t\tsc.operate(types,values,inst)\r\n\t\t\telif inst == 'IDIV':\r\n\t\t\t\tsc.operate(types,values,inst)\r\n\t\t\telif inst == 'LT':\r\n\t\t\t\tsc.compare(types,values,inst,False)\r\n\t\t\telif inst == 'GT':\r\n\t\t\t\tsc.compare(types,values,inst,False)\r\n\t\t\telif inst == 'EQ':\r\n\t\t\t\tsc.compare(types,values, inst,False)\r\n\t\t\telif inst == 'AND':\r\n\t\t\t\tsc.logic(types, values, inst)\r\n\t\t\telif inst == 'OR':\r\n\t\t\t\tsc.logic(types, values, inst)\r\n\t\t\telif inst == 'NOT':\r\n\t\t\t\tsc.logic(types, values, inst)\r\n\r\n # INT2CHAR @var @sym\r\n\r\n\t\t\telif inst == 'INT2CHAR':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\tif types[1] == 'var':\r\n\t\t\t\t\tsc.complexvar(values[i],inst)\r\n\t\t\t\t\tcurrtype = sc.fetchtype(sc.disassemble(values[1])[ut.splitter(values[1],1)])\r\n\t\t\t\t\tif currtype == 'int':\r\n\t\t\t\t\t\tval = sc.disassemble(values[1])[ut.splitter(values[1],1)]\r\n\t\t\t\t\t\tif val <= 1114112:\r\n\t\t\t\t\t\t\tframe[ut.splitter(values[0],1)] = chr(val)\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 58)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telif types[1] == 'int':\r\n\t\t\t\t\tval = ut.justify_value(types[1],values[1])\r\n\t\t\t\t\tif val <= 1114112:\r\n\t\t\t\t\t\tframe[ut.splitter(values[0],1)] = chr(val)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\r\n # STRI2INT @var @sym @sym\r\n\t\t\t\r\n\t\t\telif inst == 'STRI2INT':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tdata = []\r\n\t\t\t\tfor i in range(1,3):\r\n\t\t\t\t\tif types[i] == 'var':\r\n\t\t\t\t\t\tsc.complexvar(values[i],inst)\r\n\t\t\t\t\t\tcurrtype = sc.fetchtype(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\tif currtype == 'string' and i == 1:\r\n\t\t\t\t\t\t\tdata.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telif currtype == 'int' and i == 2:\r\n\t\t\t\t\t\t\tdata.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif types[i] == 'string' and i == 1:\r\n\t\t\t\t\t\t\tdata.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\t\telif types[i] == 'int' and i == 2:\r\n\t\t\t\t\t\t\tdata.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telse:\r\n\t\t\t\t\tif data[1] > len(data[0])-1 or data[1] < 0:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 58)\r\n\t\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = ord(data[0][data[1]])\r\n\r\n # READ @var @type\r\n\r\n\t\t\telif inst == 'READ':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\ttype = values[1]\r\n\t\t\t\tnewval = None\r\n\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\ttry:\r\n\t\t\t\t\tnewval = ut.justify_value(type,input())\r\n\t\t\t\t\tif newval is None or ut.isvalidescape(newval) is not True:\r\n\t\t\t\t\t\traise ValueError('')\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tif type == 'int':\r\n\t\t\t\t\t\tnewval = 0\r\n\t\t\t\t\telif type == 'string':\r\n\t\t\t\t\t\tnewval = ''\r\n\t\t\t\t\telif type == 'bool':\r\n\t\t\t\t\t\tnewval = False\r\n\t\t\t\tframe[ut.splitter(values[0],1)] = newval\r\n\r\n # WRITE @sym\r\n\r\n\t\t\telif inst == 'WRITE':\r\n\t\t\t\tif types[0] == 'var':\r\n\t\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\t\ttoprint = sc.disassemble(values[0])[ut.splitter(values[0],1)]\r\n\t\t\t\t\tif toprint is not None:\r\n\t\t\t\t\t\tprint(ut.escaper(toprint),end=\"\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 56)\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(ut.escaper(ut.splitter(values[0],0)),end=\"\")\r\n\r\n # CONCAT @var @sym @sym\r\n\r\n\t\t\telif inst == 'CONCAT':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tvals = []\r\n\t\t\t\tfor i in range(1,3):\r\n\t\t\t\t\tif types[i] == 'var':\r\n\t\t\t\t\t\tsc.complexvar(values[i],inst)\r\n\t\t\t\t\t\tcurrtype = sc.fetchtype(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\tif currtype == 'string':\r\n\t\t\t\t\t\t\tvals.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\t\telif types[i] == 'string':\r\n\t\t\t\t\t\tvals.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telse:\r\n\t\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = vals[0] + vals[1]\r\n\r\n # STRLEN @var @sym\r\n\r\n\t\t\telif inst == 'STRLEN':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tlength = 0\r\n\t\t\t\tif types[1] == 'var':\r\n\t\t\t\t\tsc.complexvar(values[1],inst)\r\n\t\t\t\t\tcurrtype = sc.fetchtype(sc.disassemble(values[1])[ut.splitter(values[1],1)])\r\n\t\t\t\t\tif currtype == 'string':\r\n\t\t\t\t\t\tlength = len(sc.disassemble(values[1])[ut.splitter(values[1],1)])\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telif types[1] == 'string':\r\n\t\t\t\t\tlength = len(values[1])\r\n\t\t\t\telse:\r\n\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\tframe[ut.splitter(values[0],1)] = length\r\n\r\n\r\n # GETCHAR @var @sym @sym\r\n\r\n\t\t\telif inst == 'GETCHAR':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tdata = []\r\n\t\t\t\tfor i in range(1,3):\r\n\t\t\t\t\tif types[i] == 'var':\r\n\t\t\t\t\t\tsc.complexvar(values[i],inst)\r\n\t\t\t\t\t\tcurrtype = sc.fetchtype(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\tif currtype == 'string' and i == 1:\r\n\t\t\t\t\t\t\tdata.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telif currtype == 'int' and i == 2:\r\n\t\t\t\t\t\t\tdata.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif types[i] == 'string' and i == 1:\r\n\t\t\t\t\t\t\tdata.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\t\telif types[i] == 'int' and i == 2:\r\n\t\t\t\t\t\t\tdata.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telse:\r\n\t\t\t\t\tif data[1] > len(data[0])-1 or data[1] < 0:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 58)\r\n\t\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = data[0][data[1]]\r\n\r\n # SETCHAR @var @sym @sym\r\n\r\n\t\t\telif inst == 'SETCHAR':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tdata = []\r\n\t\t\t\tfor i in range(1,3):\r\n\t\t\t\t\tif types[i] == 'var':\r\n\t\t\t\t\t\tsc.complexvar(values[i],inst)\r\n\t\t\t\t\t\tcurrtype = sc.fetchtype(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\tif currtype == 'int' and i == 1:\r\n\t\t\t\t\t\t\tdata.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telif currtype == 'string' and i == 2:\r\n\t\t\t\t\t\t\tdata.append(sc.disassemble(values[i])[ut.splitter(values[i],1)])\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tif types[i] == 'int' and i == 1:\r\n\t\t\t\t\t\t\tdata.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\t\telif types[i] == 'string' and i == 2:\r\n\t\t\t\t\t\t\tdata.append(ut.justify_value(types[i],values[i]))\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tsc.failed(DEBUG, inst, 53)\r\n\t\t\t\telse:\r\n\t\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\t\tif data[0] > len(frame[ut.splitter(values[0],1)])-1 or data[0] < 0:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 58)\r\n\t\t\t\t\ttemp = list(frame[ut.splitter(values[0],1)])\r\n\t\t\t\t\ttemp[data[0]] = data[1]\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = ''.join(temp)\r\n\r\n # TYPE @var @sym\r\n\r\n\t\t\telif inst == 'TYPE':\r\n\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\tframe = sc.disassemble(values[0])\r\n\t\t\t\tif types[1] == 'var':\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = sc.fetchtype(sc.disassemble(values[1])[ut.splitter(values[1],1)])\r\n\t\t\t\telse:\r\n\t\t\t\t\tframe[ut.splitter(values[0],1)] = types[1]\r\n\r\n # LABEL @label\r\n\r\n\t\t\telif inst == 'LABEL':\r\n\t\t\t\tpass\r\n\r\n # JUMP @label\r\n\r\n\t\t\telif inst == 'JUMP':\r\n\t\t\t\tsc.emptylabel(values[0], inst)\r\n\t\t\t\tinc = sc.tetrajump(labels[values[0]])\r\n\r\n # JUMPIFEQ @label\r\n\r\n\t\t\telif inst == 'JUMPIFEQ':\r\n\t\t\t\tequals = sc.compare(types,values,'EQ',True)\r\n\t\t\t\tif equals:\r\n\t\t\t\t\tsc.emptylabel(values[0], inst)\r\n\t\t\t\t\tinc = sc.tetrajump(labels[values[0]])\r\n\r\n # JUMPIFNEQ @label\r\n\r\n\t\t\telif inst == 'JUMPIFNEQ':\r\n\t\t\t\tequals = sc.compare(types,values,'EQ',True)\r\n\t\t\t\tif not equals:\r\n\t\t\t\t\tsc.emptylabel(values[0], inst)\r\n\t\t\t\t\tinc = sc.tetrajump(labels[values[0]])\r\n\r\n # DPRINT @sym\r\n\r\n\t\t\telif inst == 'DPRINT':\r\n\t\t\t\tif types[0] == 'var':\r\n\t\t\t\t\tsc.complexvar(values[0],inst)\r\n\t\t\t\t\ttoprint = sc.disassemble(values[0])[ut.splitter(values[0],1)]\r\n\t\t\t\t\tif toprint is not None:\r\n\t\t\t\t\t\tsys.stderr.write(ut.escaper(toprint))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsc.failed(DEBUG, inst, 56)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsys.stderr.write(ut.escaper(ut.splitter(values[0],0)))\r\n\r\n # BREAK @void\r\n\r\n\t\t\telif inst == 'BREAK':\r\n\t\t\t\tpass\r\n\r\nsc.failed(DEBUG, \"OK\", 0)\r\n","sub_path":"interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":19115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201587014","text":"import re\nimport pprint\n\ndef searchByLemma(q, cwn, exact=True, printExamples=True, printSense=True, printLemma=True):\n \"\"\"\n Search by lemma, can use regex in `q`.\n Prints out the hierarchy from lemma to senses to examples.\n `exact=True`: whether to perform exact (i.e., not regex) search. \n Set `exact=False` to search with regex.\n `print*=True`: whether to print examples, sense, or lemma.\n \"\"\"\n if exact: \n q = f\"^{q}$\"\n \n lemmas = cwn.find_lemma(q)\n for i, lemma in enumerate(lemmas):\n print()\n if printLemma:\n print(f\"{i+1}.\", lemma.lemma, lemma.id, end=\"\\n\")\n for j, sense in enumerate(lemma.senses):\n if printSense:\n print(\"\\t\" * printLemma, end=\"\")\n print(f\"{j+1:>3}) {sense.id:<10} pos: {sense.pos:<8} def: {sense.definition}\")\n #print(f\"{j+1:>3}) {lemma.ljust(4, ' ')} id: {sense.id:<10} pos: {sense.pos:<8} def: {sense.definition}\")\n for s, sent in enumerate(sense.examples):\n if printExamples:\n print(\"\\t\" * sum([printSense, printLemma]), end=\"\")\n print(f\"{s+1}) {sent}\")\n print()\n\n\ndef compareLemma(l1, l2, cwn):\n \"\"\"\n Compare all senses of two lemmas by showing their senses side-by-side.\n The lemma with more senses is printed on the left.\n `l1`: str. lemma 1\n `l2`: str. lemma 2\n \"\"\"\n l1 = f\"^{l1}$\"\n l2 = f\"^{l2}$\"\n \n l1_lst = [str(li).replace('', '').replace('(', '').replace(')', '') for li in cwn.find_senses(lemma=l1)]\n l2_lst = [str(li).replace('', '').replace('(', '').replace(')', '') for li in cwn.find_senses(lemma=l2)]\n \n if len(l1_lst) < len(l2_lst):\n zip_lst = zip(l2_lst, l1_lst)\n long = l2_lst\n short = l1_lst\n else:\n zip_lst = zip(l1_lst, l2_lst)\n long = l1_lst\n short = l2_lst\n \n # Print paired list\n for i, (a, b) in enumerate(zip_lst):\n #print(f\"{a:50}{b:>50}\")\n print(f\"{i+1:>3} {a.ljust(45, ' ')}{b}\") # 使用全形空白\n # Print remaining list\n if len(long) > len(short):\n for item in long[len(list(zip_lst)):]:\n i += 1\n print(f\"{i+1:>3} {item}\")\n\n \ndef exploreSense(q, cwn, type=\"definition\", printExamples=False):\n \"\"\"\n Exploratory searching definition or examples.\n Can use regex in `q`.\n The matched string in the definition or examples will be wrapped in【】.\n `type`: where to perform the search. Must be \"definition\" or \"examples\".\n \"\"\"\n if type == \"definition\":\n senses = cwn.find_senses(definition=q)\n else:\n senses = cwn.find_senses(examples=q)\n \n pat = re.compile(f\"({q})\")\n for j, sense in enumerate(senses):\n lemma = re.search(\"\\((\\w+)\\)\", str(sense)).group(1)\n # Label searched term in definition\n sense.definition = pat.sub(\"【\\\\1】\", sense.definition) if type == \"definition\" else sense.definition\n print(f\"{j+1:>3}) {lemma.ljust(4, ' ')} id: {sense.id:<10} pos: {sense.pos:<8} def: {sense.definition}\")\n for s, sent in enumerate(sense.examples):\n if printExamples:\n print(\"\\t\", end=\"\")\n # Label searched term in definition\n sent = pat.sub(\"【\\\\1】\", sent) if type == \"examples\" else sent\n print(f\"{s+1}) {sent}\")\n print()\n\n\ndef sense_id(id, cwn):\n \"\"\"\n Show all info about a cwn sense.\n \"\"\"\n sense = cwn.V[id]\n sense['id'] = id\n pprint.pprint(sense) \n","sub_path":"interactive_searcher.py","file_name":"interactive_searcher.py","file_ext":"py","file_size_in_byte":3624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366919856","text":"# coding=utf-8\n# date: 2018/11/26, 14:44\n# name: smz\n\nimport tensorflow as tf\n\n\ndef demo_one():\n indices = [0, 1, 2, -1] # -1轴表示全部为0,即没有一个是标签\n depth = 4\n on_value = 1\n off_value = 0\n tensor = tf.one_hot(indices=indices, depth=depth, on_value=on_value, off_value=off_value, axis=1)\n\n with tf.Session() as sess:\n tensor_value = sess.run(tensor)\n print(\"one_hot tensor:\\n\", tensor_value)\n\n\n\nif __name__ == \"__main__\":\n demo_one()\n","sub_path":"LinearRegression/tf_summary/tf_one_hot.py","file_name":"tf_one_hot.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"103871874","text":"# -*- coding: utf-8 -*\n#rid 8 STP\nimport xlsxwriter\nfrom io import BytesIO\nimport datetime\nfrom .. import stp_config\n\ndef form_url(params):\n\tbase_url = str(stp_config.CONST.API_URL_PREFIX) + 'stp_tree_planting/'\n\tbase_url += str(params[\"year\"])\n\tbase_url += '/' + str(params[\"item_num\"])\n\treturn base_url\n\n#Tree Planting Details\ndef render(res, params):\n\n\trid = params[\"rid\"]\n\tyear = params[\"year\"]\n\tcon_num = params[\"con_num\"]\n\tassign_num = params[\"assign_num\"]\n\n\toutput = BytesIO()\n\tworkbook = xlsxwriter.Workbook(output, {'in_memory': True})\n\tworksheets = []\n\ttitle = 'Tree Planting Detail'\n\t\n\tdata = res\n\n\titem_fields = [\"Description of Tree Planting Locations\", \"Mark Type\", \"Mark Location\", \"Offset From Mark (m)\", \n\t\t\t\t\t\t\"Spacing\", \"Item\", \"Quantity\", \"Hydro\", \"Comments\"]\n\titem_fields2 = [\"Species Summary, This Location\", \"Total Items\"]\n\n\t#MAIN DATA FORMATING\n\tformat_text = workbook.add_format(stp_config.CONST.FORMAT_TEXT)\n\tformat_text_right = workbook.add_format(stp_config.CONST.FORMAT_TEXT_RIGHT)\n\tformat_text_left = workbook.add_format(stp_config.CONST.FORMAT_TEXT_LEFT)\n\tformat_num = workbook.add_format(stp_config.CONST.FORMAT_NUM)\n\titem_header_format = workbook.add_format(stp_config.CONST.ITEM_HEADER_FORMAT)\n\t##Hunter's additional formatting\n\titem_format = workbook.add_format(stp_config.CONST.ITEM_FORMAT)\n\ttitle_format = workbook.add_format(stp_config.CONST.TITLE_FORMAT)\n\titem_format_money = workbook.add_format(stp_config.CONST.ITEM_FORMAT_MONEY)\n\tsubtitle_format = workbook.add_format(stp_config.CONST.SUBTITLE_FORMAT)\n\tsubtotal_format = workbook.add_format(stp_config.CONST.SUBTOTAL_FORMAT)\n\tsubtotal_format_text = workbook.add_format(stp_config.CONST.SUBTOTAL_FORMAT_TEXT)\n\tsubtotal_format_money = workbook.add_format(stp_config.CONST.SUBTOTAL_FORMAT_MONEY)\n\n\t#HEADER\n\t#write general header and format\n\trightmost_idx = 'I'\n\t\n\t#MAIN DATA\n\tcons_main = {}\n\n\tfor idx, val in enumerate(data[\"items\"]):\n\t\tif (data[\"items\"][idx][\"type_id\"] in [1,2,3,5,6] and (str(assign_num) == '-1' and not \"assignment_num\" in data[\"items\"][idx]) or \n\t\t\t((\"assignment_num\" in data[\"items\"][idx]) and (str(data[\"items\"][idx][\"assignment_num\"]) == str(assign_num)))):\n\t\t\tif not data[\"items\"][idx][\"detail_num\"] in cons_main:\n\t\t\t\tcons_main.update({data[\"items\"][idx][\"detail_num\"] : {\n\t\t\t\t\t\"Municipality\" : data[\"items\"][idx][\"municipality\"] if \"municipality\" in data[\"items\"][idx] else \"none\",\n\t\t\t\t\t\"Regional Road\" : data[\"items\"][idx][\"regional_road\"] if \"regional_road\" in data[\"items\"][idx] else \"none\",\n\t\t\t\t\t\"Between Road 1\" : data[\"items\"][idx][\"between_road_1\"] if \"between_road_1\" in data[\"items\"][idx] else \"none\",\n\t\t\t\t\t\"Between Road 2\" : data[\"items\"][idx][\"between_road_2\"] if \"between_road_2\" in data[\"items\"][idx] else \"none\",\n\t\t\t\t\t\"RINs\" : data[\"items\"][idx][\"rins\"] if \"rins\" in data[\"items\"][idx] else \"none\",\n\t\t\t\t\t\"Contract Item No.\" : data[\"items\"][idx][\"contract_item_num\"] if \"contract_item_num\" in data[\"items\"][idx] else \"none\",\n\t\t\t\t\t\"Tree Planting Detail No.\" : data[\"items\"][idx][\"detail_num\"] if \"detail_num\" in data[\"items\"][idx] else \"none\"\n\t\t\t\t}}) \n\n\tfor idx, val in enumerate(cons_main):\n\t\tworksheets.append(workbook.add_worksheet(val))\n\t\tworksheets[idx].set_column('A:A', 40)\n\t\tworksheets[idx].set_column('B:I', 18)\n\t\tworksheets[idx].set_row(0,36)\n\t\tworksheets[idx].set_row(1,36)\n\n\t\tstp_config.const.write_gen_title(title, workbook, worksheets[idx], rightmost_idx, year, con_num)\n\n\t\t#additional header image\n\t\tworksheets[idx].insert_image('G1', stp_config.CONST.ENV_LOGO,{'x_offset':150,'y_offset':18, 'x_scale':0.5,'y_scale':0.5, 'positioning':2})\n\n\t\tcr = 7\n\t\tfor idx2, val2 in enumerate(cons_main[val]):\n\t\t\tworksheets[idx].write('A{}'.format(cr), val2, format_text_right)\n\t\t\tworksheets[idx].write('B{}'.format(cr), cons_main[val][val2], format_text_left)\n\t\t\tcr += 1\n\n\t\tlocs = {#location : [[]]}\n\t\t}\n\n\t\tsummary = {}\n\n\t\tfor i, v in enumerate(data[\"items\"]):\n\t\t\tif (data[\"items\"][i][\"type_id\"] in [1,2,3,5,6] and ((str(assign_num) == '-1' and not \"assignment_num\" in data[\"items\"][i]) or \n\t\t\t((\"assignment_num\" in data[\"items\"][i]) and (str(data[\"items\"][i][\"assignment_num\"]) == str(assign_num))))):\n\t\t\t\tif data[\"items\"][i][\"detail_num\"] == val:\n\t\t\t\t\tloc = data[\"items\"][i][\"regional_road\"] + ', ' + data[\"items\"][i][\"between_road_1\"] + ' to ' + data[\"items\"][i][\"between_road_2\"]\n\t\t\t\t\ttItem = ((data[\"items\"][i].get(\"stock_type\", \"--\") + ' - ' +\n\t\t\t\t\t\tdata[\"items\"][i].get(\"plant_type\", \"--\") + ' - ' +\n\t\t\t\t\t\tdata[\"items\"][i].get(\"species\", \"--\")) if data[\"items\"][i][\"type_id\"] == 1\n\t\t\t\t\t\telse data[\"items\"][i][\"stump_size\"] if data[\"items\"][i][\"type_id\"] == 2\n\t\t\t\t\t\telse data[\"items\"][i][\"transp_dis\"] if data[\"items\"][i][\"type_id\"] == 3\n\t\t\t\t\t\telse 'Supplemental Tree Maintenance' if data[\"items\"][i][\"type_id\"] == 5\n\t\t\t\t\t\telse 'Extra Work' if data[\"items\"][i][\"type_id\"] == 6\n\t\t\t\t\t\telse ' ')\n\n\t\t\t\t\tsummary[tItem] = summary.get(tItem, 0) + data[\"items\"][i].get(\"quantity\", 0)\n\n\t\t\t\t\tif not loc in locs:\n\t\t\t\t\t\tlocs.update({loc : [[ \n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"roadside\", \" \"),\n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"mark_type\", \" \"),\n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"marking_location\", \" \"),\n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"offset_from_mark\", \" \"),\n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"spacing_on_centre\", \" \"),\n\t\t\t\t\t\t\ttItem,\n\t\t\t\t\t\t\tdata[\"items\"][i][\"quantity\"] if \"quantity\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\tdata[\"items\"][i][\"hydro\"] if \"hydro\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"comments\", ' ')\n\t\t\t\t\t\t\t]]})\n\t\t\t\t\telse:\n\t\t\t\t\t\tlocs[loc].append([\n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"roadside\", \" \"), \n\t\t\t\t\t\t\tdata[\"items\"][i][\"mark_type\"] if \"mark_type\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\tdata[\"items\"][i][\"marking_location\"] if \"marking_location\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\tdata[\"items\"][i][\"offset_from_mark\"] if \"offset_from_mark\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\tdata[\"items\"][i][\"spacing_on_centre\"] if \"spacing_on_centre\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\ttItem,\n\t\t\t\t\t\t\tdata[\"items\"][i][\"quantity\"] if \"quantity\" in data[\"items\"][i] else ' ',\n\t\t\t\t\t\t\tdata[\"items\"][i][\"hydro\"] if \"hydro\" in data[\"items\"][i] else ' ', \n\t\t\t\t\t\t\tdata[\"items\"][i].get(\"comments\", ' ')\n\t\t\t\t\t\t\t])\n\n\n\t\tfor side in ['North', 'South', 'East', 'West', 'Centre Median']:\n\n\t\t\tfor lid, loc in enumerate(locs):\n\t\t\t\ttLoc = []\n\t\t\t\tfor l in locs[loc]:\n\t\t\t\t\tif l[0] == side:\n\t\t\t\t\t\ttLoc.append(l[1:])\n\t\t\t\t\t\tprint(tLoc)\n\t\t\t\tif tLoc:\n\t\t\t\t\tcr += 1\n\t\t\t\t\tworksheets[idx].write('A' + str(cr), 'RoadSide: ' + side, subtitle_format)\n\t\t\t\t\tworksheets[idx].write_row('A' + str(cr+1), item_fields, item_header_format)\n\t\t\t\t\tcr += 2\n\n\t\t\t\t\tif not len(tLoc) == 1:\n\t\t\t\t\t\tworksheets[idx].merge_range('A' + str(cr) + ':A' + str(cr + len(tLoc) - 1), loc, format_text)\n\t\t\t\t\telse:\n\t\t\t\t\t\tworksheets[idx].write('A' + str(cr), loc, format_text)\n\n\t\t\t\t\tfor item in tLoc:\n\t\t\t\t\t\tworksheets[idx].write_row('B' + str(cr), item[0:2], format_text)\n\t\t\t\t\t\tworksheets[idx].write('D{}'.format(cr), item[2], format_num)\n\t\t\t\t\t\tworksheets[idx].write('E{}'.format(cr), item[3], format_num)\n\t\t\t\t\t\tworksheets[idx].write('F{}'.format(cr), item[4], format_text)\n\t\t\t\t\t\tworksheets[idx].write('G{}'.format(cr), item[5], format_num)\n\t\t\t\t\t\tworksheets[idx].write_row('H{}'.format(cr), item[6:], format_text)\n\t\t\t\t\t\tcr += 1\n\n\t\tcr += 1\n\t\tworksheets[idx].write_row('A' + str(cr), item_fields2, item_header_format)\n\t\tcr += 1\n\n\t\ttStart = cr\n\t\tfor sid, item in enumerate(summary):\n\t\t\tworksheets[idx].write('A{}'.format(cr), item, format_text)\n\t\t\tworksheets[idx].write('B{}'.format(cr), summary[item], format_num)\n\t\t\tcr += 1\n\t\tworksheets[idx].write('A'+str(cr), \"Total: \", subtotal_format_text)\n\t\tworksheets[idx].write_formula('B' + str(cr), '=SUM(B' + str(tStart) + ':B' + str(cr-1) + ')', subtotal_format)\n\n\n\tworkbook.close()\n\n\txlsx_data = output.getvalue()\n\treturn xlsx_data","sub_path":"stp/report_classes/stp_tpd.py","file_name":"stp_tpd.py","file_ext":"py","file_size_in_byte":7735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269400214","text":"import datetime\nimport logging\nimport os\n\nimport dill\nimport numpy as np\nfrom xgboost.sklearn import XGBClassifier\nfrom sklearn.metrics import log_loss, roc_auc_score\n\nDATA_DIR = '../data/final/vectors_improved/'\nLOG_FILE = '../log/breast_cancer_vectors_monthly_optimized_xgb.log'\nRESULTS_FILE = '../log/breast_cancer_vectors_monthly_optimized_results.dill'\nN_JOBS = 30\n\ndata_files = [\n 'vectors_patient2vec_pvdbow_hs_win-30_emb-100.dill',\n 'vectors_patient2vec_pvdbow_hs_win-30_emb-50.dill',\n 'vectors_patient2vec_pvdbow_hs_win-50_emb-100.dill',\n 'vectors_patient2vec_pvdbow_hs_win-5_emb-100.dill',\n]\n\nrand_search = dill.load(open('../log/breast_cancer_vectors_parameter_monthly_optim_xgb.dill', 'rb'))\n\n# Logging setup\nlogging.basicConfig(filename=LOG_FILE, level=logging.INFO,\n format='%(message)s')\n\n\nresults = {}\n\n\nfor data_file in data_files:\n print('Training on {}.'.format(data_file))\n\n # Loading data\n data = dill.load(open(os.path.join(DATA_DIR, data_file), 'rb'))\n results[data_file] = {}\n for months_before in sorted(list(data.keys())):\n print('\\tMonth {}'.format(months_before))\n\n train_x = data[months_before][\"TRAIN\"][\"X\"]\n train_y = data[months_before][\"TRAIN\"][\"y\"]\n test_x = data[months_before][\"TEST\"][\"X\"]\n test_y = data[months_before][\"TEST\"][\"y\"]\n\n # Getting best params\n best_params = rand_search[data_file][months_before].best_params_\n best_params['random_state'] = 1\n best_params['n_jobs'] = N_JOBS\n\n # Creating and training model\n clf = XGBClassifier(**best_params)\n clf.fit(train_x, train_y, verbose=True)\n\n # Scoring\n pred_y = clf.predict_proba(test_x)\n\n auc_score = roc_auc_score(test_y, pred_y[:,1])\n log_score = log_loss(test_y, pred_y)\n\n results[data_file][months_before] = {}\n results[data_file][months_before]['true_y'] = test_y\n results[data_file][months_before]['pred_y'] = pred_y\n\n logging.info('{}, {}, {}, {}'.format(data_file, months_before, auc_score, log_score))\n\ndill.dump(results, open(RESULTS_FILE, 'wb'))\n","sub_path":"breast_cancer/classification_vectors/8_monthly_optimized_xgb.py","file_name":"8_monthly_optimized_xgb.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80176369","text":"from ButtonHandlers import ButtonHandlers\nfrom DataAccessObject.Book import Book as BookDAO\nfrom tkinter import Tk\nfrom WidgetContainer import WidgetContainer\nfrom WidgetCreator import WidgetCreator\n\n\nclass Main:\n def __init__(self, book_dao):\n self.window = Tk()\n self.widget_creator = WidgetCreator(self.window)\n self.widget_container = WidgetContainer(self.window)\n self.button_handlers = ButtonHandlers(book_dao, self.widget_container, self.widget_creator)\n\n def main_loop(self):\n self.button_handlers.show_books()\n self.window.mainloop()\n\n\nwith BookDAO() as bookDao:\n main = Main(bookDao)\n main.main_loop()\n\n","sub_path":"PMC/Section16/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"573531247","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\nimport configparser\nimport os\nimport json\nimport re\nimport mimetypes\nfrom wsgiref.util import FileWrapper\n\nfrom django.http.response import StreamingHttpResponse\n\nrootpath = ''\n\ndef getRootPath():\n global rootpath\n if(not rootpath):\n configDir = os.path.dirname(os.path.abspath(\"views.py\"))\n conf = configparser.ConfigParser()\n conf.read(configDir + '/config.ini')\n rootpath = conf.get('global', 'rootpath')\n return rootpath\n\ndef path(request):\n global rootpath\n rootpath = getRootPath()\n requestPathStr = request.GET.get('path', '/')\n if (requestPathStr[0] != '/'):\n requestPathStr = '/' + requestPathStr\n\n requestPath = rootpath + requestPathStr\n\n childFileListName = []\n if(os.path.exists(requestPath) or os.path.isdir(requestPath)):\n print(os.listdir(requestPath))\n childFileListName = list(filter(lambda x: x[0] != '.', os.listdir(requestPath)))\n print(childFileListName)\n childFileList = []\n for item in childFileListName:\n childFileList.append({\"name\": item, \"parentDir\": requestPathStr, \"isVedio\": os.path.isfile(requestPath + \"/\" + item)})\n\n return HttpResponse(json.dumps(childFileList), content_type='application/json')\n\nclass RangeFileWrapper(object):\n def __init__(self, filelike, blksize=8192, offset=0, length=None):\n self.filelike = filelike\n self.filelike.seek(offset, os.SEEK_SET)\n self.remaining = length\n self.blksize = blksize\n\n def close(self):\n if hasattr(self.filelike, 'close'):\n self.filelike.close()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.remaining is None:\n # If remaining is None, we're reading the entire file.\n data = self.filelike.read(self.blksize)\n if data:\n return data\n raise StopIteration()\n else:\n if self.remaining <= 0:\n raise StopIteration()\n data = self.filelike.read(min(self.remaining, self.blksize))\n if not data:\n raise StopIteration()\n self.remaining -= len(data)\n return data\n\ndef video(request):\n global rootpath\n rootpath = getRootPath()\n requestPathStr = request.GET.get('path', '/')\n if (requestPathStr[0] != '/'):\n requestPathStr = '/' + requestPathStr\n path = rootpath + requestPathStr\n print(path)\n\n range_header = request.META.get('HTTP_RANGE', '').strip()\n range_re = re.compile(r'bytes\\s*=\\s*(\\d+)\\s*-\\s*(\\d*)', re.I)\n range_match = range_re.match(range_header)\n size = os.path.getsize(path)\n\n print(request.META)\n \n f = open(path,\"rb\")\n response = HttpResponse()\n response['Content-Type'] ='video/mp4'\n response['Content-Length'] = os.path.getsize(path)\n response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(f.name) \n response['Accept-Range'] = 'bytes'\n response['Access-Control-Allow-Origin'] = '*'\n # response['Connection'] = 'keep-alive'\n response.write(f.read())\n return response\n\n# content_type, encoding = mimetypes.guess_type(path)\n# content_type = \"video/mpeg\"\n# if range_match:\n# first_byte, last_byte = range_match.groups()\n# first_byte = int(first_byte) if first_byte else 0\n# last_byte = int(last_byte) if last_byte else size - 1\n# if last_byte >= size:\n# last_byte = size - 1\n# length = last_byte - first_byte + 1\n# resp = StreamingHttpResponse(RangeFileWrapper(open(path, 'rb'), offset=first_byte, length=length), status=206, content_type=content_type)\n# resp['Content-Length'] = str(length)\n# resp['Content-Range'] = 'bytes %s-%s/%s' % (first_byte, last_byte, size)\n# else:\n# # 不是以视频流方式的获取时,以生成器方式返回整个文件,节省内存\n# # resp = StreamingHttpResponse(FileWrapper(open(path, 'rb')), content_type=content_type)\n# # resp['Content-Length'] = str(size)\n# print('abcsadfasdf')\n# f = open(path,\"rb\") \n# response = HttpResponse()\n# response.write(f.read())\n# response['Content-Type'] ='video/mp4'\n# response['Content-Length'] =os.path.getsize(path)\n# response['Content-Disposition'] = 'attachment;filename=\"{}\"'.format(f.name) \n# return response\n# resp['Accept-Ranges'] = 'bytes'\n","sub_path":"project/CartoonServer/CartoonFileManager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459654007","text":"\"\"\"\nClass ini digunakan untuk menghandle semua request pada route /board.\nMethod yang disediakan pada Route ini adalah :\n- POST\n\nRoute ini digunakan untuk :\nMemanipulasi data board (Device IoT)\nseperti status active, on process.\n\"\"\"\n\nfrom flask_restful import Resource, reqparse\nfrom firebase_admin import db\n\n__ref__= 'database/board'\n__id__ = 'id'\n__is_active__ = 'active'\n__on_proccess__ = 'OnProcess'\n\n__format__ = {\n __id__: '',\n __is_active__: '',\n __on_proccess__: ''\n}\n\n\nclass Board(Resource):\n _ref = db.reference(__ref__)\n _parser = reqparse.RequestParser()\n\n def __init__(self):\n self._parser.add_argument(__id__)\n self._parser.add_argument(__is_active__)\n self._parser.add_argument(__on_proccess__)\n\n def post(self):\n \"\"\"\n Fungsi merupakan blueprint untuk menghandle\n requests POST.\n\n Pada method ini akan terjadi operasi seperti :\n - Mengambil data dari Firebase Realtime Database dengan\n reference /database/board.\n\n - Melakukan Searching apakah parameter id yang diterima\n sudah ada atau belum ada.\n\n - Jika Hasil searching menyatakan sudah ada di database maka,\n Akan Mengupdate data yang diperlukan.\n\n - Namun jika hasil searchingnya menyatakan belum ada maka,\n akan menambahkan data baru ke dalam database dengan\n id data yang unik.\n\n :return: JSON Format, Jika tidak ada kesalahan operasi\n seharusnya akan memberikan response success.\n \"\"\"\n data = self._parser.parse_args()\n exists = False\n\n snapshot = self._ref.get()\n for id in snapshot:\n if snapshot[id][__id__] == data[__id__]:\n exists = True\n\n\n if exists:\n self._ref.child(data[__id__]).update({\n __format__[__is_active__]: data[__is_active__],\n __format__[__on_proccess__]: 'false'\n })\n\n else:\n format = __format__\n format[__id__] = data[__id__]\n format[__is_active__] = 'false'\n format[__on_proccess__] = 'false'\n\n self._ref.push(format)\n\n return {'response': 'OK'}, 200\n","sub_path":"resource/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146387435","text":"#!/usr/bin/env python3\nimport argparse\nimport subprocess\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib_venn import venn2, venn2_circles\n\n\ndef parser():\n \"\"\"Parse command-line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Search some files\")\n parser.add_argument(\"peaks\", help=\"BED file\")\n parser.add_argument(\"known\", help=\"BED file\")\n args = parser.parse_args()\n return args\n\ndef overlap(peaks_file, regions_file):\n \n annotations = (\"browser\", \"track\")\n num_peaks = sum(1 for line in open(peaks_file)) # Number of peaks\n num_regions = sum(1 for line in open(regions_file)) # Number of known regions\n cmd = [\"bedtools\", \"intersect\", \"-a\", peaks_file, \"-b\", regions_file, \"-u\"]\n try:\n out_bytes = subprocess.check_output(cmd)\n except subprocess.CalledProcessError as e:\n out_bytes = e.output # Output generated before error\n code = e.returncode # Return code\n \n out_text = out_bytes.decode('utf-8')\n out_lines = out_text.splitlines()\n num_lines = len(out_lines)\n return (num_peaks - num_lines, num_regions - num_lines, num_lines)\n\n\ndef venn(sizes, labels):\n \"\"\"Plot venn diagram of overlap with known regions.\"\"\"\n v = venn2(subsets=sizes, set_labels=labels)\n\n # Subset labels\n label10 = \"{:,}\".format(sizes[0])\n label11 = \"{:,}\".format(sizes[1])\n label01 = \"{:,}\".format(sizes[2])\n v.get_label_by_id('10').set_text(label10)\n v.get_label_by_id('11').set_text(label11)\n v.get_label_by_id('01').set_text(label01)\n \n # Subset colours\n v.get_patch_by_id(\"10\").set_color(\"#FFFFFF\")\n v.get_patch_by_id(\"11\").set_color(\"#FF0000\")\n v.get_patch_by_id(\"01\").set_color(\"#FFFFFF\")\n \n # Subset alphas\n v.get_patch_by_id(\"10\").set_alpha(0.5)\n v.get_patch_by_id(\"11\").set_alpha(0.5)\n v.get_patch_by_id(\"01\").set_alpha(0.5)\n \n # Border styles\n c = venn2_circles(subsets=sizes, linestyle='solid')\n plt.title(\"Overlap with known regions\")\n plt.savefig(\"overlap.pdf\")\n\n\ndef main():\n args = parser() # parse command-line arguments\n sizes = overlap(args.peaks, args.known) # calculate conservation rate\n labels = (args.peaks, args.known)\n venn(sizes, labels) # plot venn diagram\n\nif __name__ == '__main__':\n main()\n","sub_path":"overlap_with_known_regions.py","file_name":"overlap_with_known_regions.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"250230709","text":"import numpy as np \nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', '--input', required=True, type=str, help=\"REAX connectivity file\")\nparser.add_argument('-tr', '--traj', required=True, type=str, help=\"REAX connectivity file\")\nparser.add_argument('-cn','--CN', required=True,type= float, help=\"Coordination number\")\nparser.add_argument('-t','--type', required=True,type= float, help=\"Coordination number\")\nargs = parser.parse_args()\ndatafile = args.input\ntraj = args.traj\ncn = args.CN\natom_type = args.type\n\nID_z = []\nID = []\nwith open(traj) as f:\n\tfor line in f:\n\t\tparts = line.split()\n\t\tif parts[0].isdigit() and len(parts) == 5:\n\t\t\tif float(parts[0]) == atom_type:\n\t\t\t\tID_z.append([parts[1],parts[4]])\n\t\t\t\tID.append(parts[1])\n\t\t\t\t#print(parts[1], parts[4])\n\nwith open(datafile) as f:\n\tcount = 0\n\tfor line in f:\n\t\tparts = line.split()\n\t\tif parts[0].isdigit() and float(parts[1]) == atom_type and float(parts[2]) == cn:\n\t\t\tcount += 1\n\t\t\tiAtom = ID.index(parts[1])\n\t\t\tprint(ID_z[iAtom][1])\n#print(count)\n\t\t\t\n\n","sub_path":"struc_conn/over_underCN.py","file_name":"over_underCN.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"577296500","text":"# -*- coding: utf 8 -*-\n\"\"\"\nDefine a suite a tests for the Component module.\n\"\"\"\nimport pytest\n\nfrom striplog.rock import Rock\nfrom striplog import Component\nfrom striplog.component import ComponentError\nfrom striplog import Lexicon\n\nr = {'colour': 'grey',\n 'grainsize': 'vf-f',\n 'lithology': 'sand'}\n\nr2 = {'grainsize': 'VF-F',\n 'colour': 'Grey',\n 'lithology': 'Sand'}\n\nr3 = {'grainsize': 'Coarse',\n 'colour': 'Grey',\n 'lithology': 'Sandstone'}\n\nr6 = {'grainsize': 'Coarse',\n 'colour': 'Grey',\n 'lithology': None}\n\n\ndef test_rock():\n rock = Rock(r)\n assert rock\n\n\ndef test_init():\n rock = Component(r)\n assert rock.colour == 'grey'\n\n\ndef test_identity():\n rock = Component(r)\n assert rock != 'non-Component'\n\n rock2 = Component(r2)\n assert rock == rock2\n\n rock3 = Component(r3)\n assert rock != rock3\n\n\ndef test_summary():\n rock = Component(r)\n s = rock.summary(fmt=\"My rock: {lithology} ({colour}, {GRAINSIZE})\")\n assert s == 'My rock: sand (grey, VF-F)'\n\n rock6 = Component(r6)\n s = rock6.summary(fmt=\"My rock: {lithology}\")\n assert s == 'My rock: '\n\n empty = Component({})\n d = \"String\"\n assert not empty # Should have False value\n assert empty.summary(default=d) == d\n\n with pytest.raises(ComponentError):\n rock.summary(fmt=\"My rock: {not there}\")\n\n\ndef test_from_text():\n rock3 = Component(r3)\n lexicon = Lexicon.default()\n s = 'Grey coarse sandstone.'\n rock4 = Component.from_text(s, lexicon)\n assert rock3 == rock4\n rock5 = Component.from_text(s, lexicon, required='not there')\n assert not rock5 # Should be None\n","sub_path":"tests/test_component.py","file_name":"test_component.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305733070","text":"from django.conf.urls import url\nfrom django.urls import path\n\nfrom Frontend.Views.reviews import *\nfrom Frontend.admin import event_admin_site\nfrom Frontend.Views.cart import *\nfrom Frontend.views import *\nfrom Frontend.Views.loginSignup import *\nfrom Frontend.Views.search import *\nurlpatterns = [\n path('', index, name=\"index\"),\n path('OneDay', OneDay, name=\"trips1\"),\n path('MultipleDays', MultipleDays, name=\"trips2\"),\n path('Trip/', trip, name=\"trip\"),\n path('Destinations/Trip/', trip, name=\"trip\"),\n path('cart/Trip/', trip, name=\"trip\"),\n path('Blogs', blogs, name=\"blogs\"),\n path('Blog', blog, name=\"blog\"),\n\n path('NewsFeed/', News_single, name=\"News_single\"),\n path('Destinations/', place, name=\"place\"),\n path('Places', Places, name=\"places\"),\n path('Operator/', operator, name=\"operator\"),\n path('Operator/Trip/', trip, name=\"trip\"),\n path('Operator', touroperator, name=\"touroperator\"),\n #path('Login', login, name=\"login\"),\n #path('Register', register, name=\"register\"),\n path('search_results', search_results, name=\"search_results\"),\n url(r'^ajax/search/$', search, name='search'),\n path('logout', logout, name=\"logout\"),\n url(r'^ajax/Subscribe_NewsLetter/$', Subscribe_NewsLetter, name='Subscribe_NewsLetter'),\n path('myoperator/', event_admin_site.urls),\n path('tripscrap/', tripScrap, name=\"Scrap\"),\n path('newsscrap/', newsScrap, name=\"News\"),\n #path('NewsFeed', News, name=\"News\"),\n path('cart/', cart, name=\"cart\"),\n path('compare/-vs-', compare, name=\"compare\"),\n url(r'^ajax/Add_in_Cart/$', Add_in_Cart, name='Add_in_Cart'),\n url(r'^ajax/range/$', rangess, name='range'),\n url(r'^api/get_places/', get_places, name='get_places'),\n url(r'^ajax/Login/$', login, name='search'),\n url(r'^ajax/signup/$', register, name='search'),\n path('orders/', orderConfirm, name='orderConfirm'),\n path('OrderDetails/', OrderStatus,name=\"OrderStatus\"),\n path('OrderMain/', OrderMain, name=\"OrderMain\"),\n #path('api/',UserList.as_view()),\n url('apis//', apis, name=\"apis\"),\n url(r'^ajax/Add_Trip_Review/$', Add_Trip_Review, name='Add_Review'),\n url(r'^ajax/Add_TourOp_Review/$', Add_TourOp_Review, name='Add_Review'),\n url(r'^ajax/Add_Dest_Review/$', Add_Dest_Review, name='Add_Review'),\n path('sort/', sorting, name=\"sort\"),\n path('sort1/', sorting1, name=\"sort\"),\n]","sub_path":"Frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"411750325","text":"# Config.py\n#\n# This program reads the configuration file in the HOME directory. \n# The file contains numerous configurations that are each labeled\n# with a name in brackets as in [name]\n# The intent is that a user might have more than one profile, such as\n# dev, test, stage, and prod\n#\n#\n\nimport os\nimport sys\nimport re\nimport configparser\n\nclass Config:\n\n\t_instance = None\n\tdef shared():\n\t\tif Config._instance == None:\n\t\t\tConfig._instance = Config()\n\t\treturn Config._instance\n\n\n\tdef __init__(self):\n\t\tself.home = os.environ.get('HOME') # unix\n\t\tif self.home == None:\n\t\t\tself.home = os.environ.get('HOMEPATH') # windows\n\t\tif self.home == None:\n\t\t\tself.home = os.getcwd()\n\n\t\tconfigFile = os.path.join(self.home, \"dbp-etl.cfg\")\n\t\tif not os.path.exists(configFile):\n\t\t\tprint(\"ERROR: Config file '%s' does not exist.\" % (configFile))\n\t\t\tsys.exit()\n\n\t\tprofile = os.environ.get('PROFILE', \"\")\n\t\tif (len(profile)) ==0:\n\t\t\tif len(sys.argv) < 2:\n\t\t\t\tprint(\"ERROR: config profile, such as 'dev,test,prod' is first required parameter.\")\n\t\t\t\tsys.exit()\n\t\t\tprofile = sys.argv[1]\n\n\t\tconfig = configparser.ConfigParser(interpolation = None)\n\t\tconfig.read(configFile)\n\t\tsections = config.sections()\n\t\tif profile not in sections:\n\t\t\tprint(\"ERROR: config profile %s is not in %s\" % (profile, configFile))\n\t\t\tsys.exit()\n\t\tself.profile = profile\n\t\tself.hashMap = config[profile]\n\n\t\t#for key, value in self.hashMap.items():\n\t\t#\tprint(key, value)\n\n\t\tif len(self.hashMap) == 0:\n\t\t\tprint(\"ERROR: Config profile %s does not exist in '%s'.\" % (profileLabel, configFile))\n\t\t\tsys.exit()\n\n\t\tsplitPattern = re.compile(\"\\\\\\\\|/\") # I have no idea why \\\\\\\\ escapes to one \\\n\t\tprogramRunning = splitPattern.split(sys.argv[0])[-1]\n\n\t\tself.database_names = {}\n\t\tself.current_database_name = None\n\t\tself.setConfigParametersFromProfile(profile, programRunning)\n\n\t\tprint(\"Config '%s' is loaded.\" % (profile))\n\n\tdef _get(self, name):\n\t\tvalue = self.hashMap.get(name)\n\t\tif value == None:\n\t\t\tprint(\"ERROR: Config entry '%s' is missing.\" % (name))\n\t\t\tsys.exit()\n\t\treturn value\n\n\tdef _getPath(self, name):\n\t\tvalue = self._get(name)\n\t\tpath = value.replace(\"/\", os.path.sep)\n\t\tif path.startswith(\"~\"):\n\t\t\tpath = path.replace(\"~\", self.home)\n\t\tif not os.path.exists(path):\n\t\t\tprint(\"ERROR: path %s for %s does not exist\" % (path, name))\n\t\t\tsys.exit()\n\t\treturn path\n\n\tdef _getInt(self, name):\n\t\treturn int(self._get(name))\n\n\tdef _getFloat(self, name):\n\t\treturn float(self._get(name))\n\n\tdef _getOptional(self, name):\n\t\treturn self.hashMap.get(name)\n\n\tdef getOptional(self, name):\n\t\treturn self.hashMap.get(name)\n\n\tdef getDatabaseNames(self):\n\t\treturn self.database_names\n\n\tdef setConfigParametersFromProfile(self, profile, programRunning):\n\t\tself.s3_artifacts_bucket = self._get(\"s3.artifacts_bucket\")\n\t\tself.s3_aws_profile = self._getOptional(\"s3.aws_profile\") \n\t\tself.s3_aws_role_arn = self._getOptional(\"s3.aws_role_arn\") \n\t\tself.s3_aws_role_profile = self._getOptional(\"s3.aws_role_profile\") # this is temporary\n\t\tself.filename_lpts_xml = self._getOptional(\"filename.lpts_xml\")\n\t\tself.filename_metadata_xml = self._getOptional(\"filename.metadata_xml\")\n\t\tself.s3_bucket = self._get(\"s3.bucket\")\n\t\tself.s3_vid_bucket = self._get(\"s3.vid_bucket\")\n\t\tself.directory_accepted = self._getOptional(\"directory.accepted\")\n\t\tself.directory_errors = self._getOptional(\"directory.errors\")\n\t\tself.directory_upload_aws = self._getPath(\"directory.upload_aws\")\n\t\tself.directory_quarantine = self._getPath(\"directory.quarantine\")\n\t\tself.directory_duplicate = self._getPath(\"directory.duplicate\")\n\t\tself.directory_bucket_list = self._getPath(\"directory.bucket_list\")\n\t\tself.node_exe = self._getPath(\"node.exe\")\n\t\tself.sofria_client_js = self._getPath(\"sofria_client.js\")\n\t\tself.filename_accept_errors = self._getPath(\"filename.accept.errors\")\n\t\tself.filename_datetime = self._get(\"filename.datetime\")\n\t\tself.mysql_exe = self._getOptional(\"mysql.exe\")\n\t\tself.data_missing_verses_allowed = self._getOptional(\"data.missing_verses_allowed\")\n\n\t\t# TODO these dependencies need to be sorted out\n\t\tif programRunning in {\"DBPLoadController.py\"}:\n\t\t\tself.audio_transcoder_url = self._get(\"audio.transcoder.url\")\n\t\t\tself.audio_transcoder_key = self._get(\"audio.transcoder.key\")\n\t\t\tself.audio_transcoder_sleep_sec = self._getInt(\"audio.transcoder.sleep.sec\")\n\t\t\tself.audio_transcoder_input = self._get(\"audio.transcoder.input\")\n\t\t\tself.lambda_zip_function = self._get(\"lambda.zip.function\")\n\t\t\tself.lambda_zip_region = self._get(\"lambda.zip.region\")\n\t\t\tself.lambda_zip_timeout = self._getInt(\"lambda.zip.timeout\")\n\n\t\t\tself.video_transcoder_region = self._get(\"video.transcoder.region\")\n\t\t\tself.video_transcoder_pipeline = self._get(\"video.transcoder.pipeline\")\n\t\t\tself.video_preset_hls_1080p = self._get(\"video.preset.hls.1080p\")\n\t\t\tself.video_preset_hls_720p = self._get(\"video.preset.hls.720p\")\n\t\t\tself.video_preset_hls_480p = self._get(\"video.preset.hls.480p\")\n\t\t\tself.video_preset_hls_360p = self._get(\"video.preset.hls.360p\")\n\t\t\tself.video_preset_web = self._get(\"video.preset.web\")\n\t\t\tself.filename_lpts_xml = self._getPath(\"filename.lpts_xml\")\n\t\t\tself.database_names['dbp'] = self.hashMap.get(\"database.db_name\")\n\t\t\tself.database_names['user_dbp'] = self.hashMap.get(\"database.user_db_name\")\n\t\t\tself.database_host = self._get(\"database.host\")\n\t\t\tself.database_user = self._get(\"database.user\")\n\t\t\tself.database_passwd = self._get(\"database.passwd\")\n\t\t\tself.database_db_name = self._get(\"database.db_name\")\n\t\t\tself.database_user_db_name = self._get(\"database.user_db_name\")\n\t\t\tself.database_port = self._getInt(\"database.port\")\n\t\t\tself.database_tunnel = self._getOptional(\"database.tunnel\")\n\n\t\telif programRunning in {\"AudioHLS.py\"}:\n\t\t\tself.directory_audio_hls = self._getPath(\"directory.audio_hls\") #\"%s/FCBH/files/tmp\" % (self.home)\n\t\t\tself.audio_hls_duration_limit = self._getInt(\"audio.hls.duration.limit\") #10 #### Must become command line param\n\n\t\tif profile == 'test':\n\t\t\tself.video_transcoder_pipeline = self._get(\"video.transcoder.pipeline\")\n\t\t\tself.video_transcoder_mock = self._get(\"video.transcoder.mock\")\n\t\t\tself.video_transcoder_region = self._get(\"video.transcoder.region\")\n\t\t\tself.video_preset_hls_1080p = self._get(\"video.preset.hls.1080p\")\n\t\t\tself.video_preset_hls_720p = self._get(\"video.preset.hls.720p\")\n\t\t\tself.video_preset_hls_480p = self._get(\"video.preset.hls.480p\")\n\t\t\tself.video_preset_hls_360p = self._get(\"video.preset.hls.360p\")\n\t\t\tself.video_preset_web = self._get(\"video.preset.web\")\n\n\t\tif profile in {'test', 'dev'}:\n\t\t\tself.database_names['dbp'] = self.hashMap.get(\"database.db_name\")\n\t\t\tself.database_names['user_dbp'] = self.hashMap.get(\"database.user_db_name\")\n\t\t\tself.database_host = self._get(\"database.host\")\n\t\t\tself.database_user = self._get(\"database.user\")\n\t\t\tself.database_passwd = self._get(\"database.passwd\")\n\t\t\tself.database_db_name = self._get(\"database.db_name\")\n\t\t\tself.database_user_db_name = self._get(\"database.user_db_name\")\n\t\t\tself.database_port = self._getInt(\"database.port\")\n\t\t\tself.database_tunnel = self._getOptional(\"database.tunnel\")\n\t\t\tself.setCurrentDatabaseDBName(self.hashMap.get(\"database.db_name\"))\n\t\telif profile == 'linguasource':\n\t\t\tself.database_host = self._get(\"database.host\")\n\t\t\tself.database_user = self._get(\"database.user\")\n\t\t\tself.database_passwd = self._get(\"database.passwd\")\n\t\t\tself.language_db_name = self._get(\"database.language_db_name\")\n\t\t\tself.biblebrain_db_name = self._get(\"database.biblebrain_db_name\")\n\t\t\tself.database_port = self._getInt(\"database.port\")\n\t\t\tself.database_tunnel = self._getOptional(\"database.tunnel\")\n\t\t\tself.database_names['language'] = self.hashMap.get(\"database.language_db_name\")\n\t\t\tself.database_names['biblebrain'] = self.hashMap.get(\"database.biblebrain_db_name\")\n\t\t\tself.setCurrentDatabaseDBName(self.hashMap.get(\"database.language_db_name\"))\n\t\telif profile == 'newdata':\n\t\t\tself.database_names['dbp'] = self.hashMap.get(\"database.db_name\")\n\t\t\tself.database_names['user_dbp'] = self.hashMap.get(\"database.user_db_name\")\n\t\t\tself.database_host = self._get(\"database.host\")\n\t\t\tself.database_user = self._get(\"database.user\")\n\t\t\tself.database_passwd = self._get(\"database.passwd\")\n\t\t\tself.database_db_name = self._get(\"database.db_name\")\n\t\t\tself.database_user_db_name = self._get(\"database.user_db_name\")\n\t\t\tself.database_port = self._getInt(\"database.port\")\n\t\t\tself.database_tunnel = self._getOptional(\"database.tunnel\")\n\t\t\tself.setCurrentDatabaseDBName(self.hashMap.get(\"database.db_name\"))\n\n\tdef setCurrentDatabaseDBName(self, name):\n\t\tself.current_database_name = name\n\n\tdef getCurrentDatabaseDBName(self):\n\t\treturn self.current_database_name\n\n# Unit Test\nif (__name__ == '__main__'):\n\tconfig = Config()\n\tprint(\"User\", config.database_user)\n\t# print(\"DB\", config.database_db_name)\n\tprint(\"Current DB name\", config.current_database_name)\n\tprint(\"DB names\", config.getDatabaseNames())\n","sub_path":"load/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":8835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281898882","text":"'''\n5.2 Write a program that repeatedly prompts a user for integer numbers until the user enters 'done'.\nOnce 'done' is entered, print out the largest and smallest of the numbers.\nIf the user enters anything other than a valid number catch it with a try/except and put out an appropriate message and\nignore the number. Enter 7, 2, bob, 10, and 4 and match the output below.\n'''\n\n\n\nflag = True\n\nwhile True:\n var = input(\"Enter a number: \")\n if var == \"done\":\n break\n try:\n int_var = int(var)\n except:\n print(\"Invalid input\")\n\n if flag == True:\n lar = int_var\n smal = int_var\n flag = False\n\n\n if int_var > lar:\n lar = int_var\n if int_var < smal:\n smal = int_var\n\nprint(\"Maximum is\", lar)\nprint(\"Minimum is\", smal)\n","sub_path":"Week 7/Assignment 5.2.py","file_name":"Assignment 5.2.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389727437","text":"# -*- coding: UTF-8 -*-\r\n#将医保信息统编代码和价格列成txt\r\nimport re\r\nimport os\r\nimport os.path\r\nimport datetime\r\nimport g #全局变量库\r\n#NLS_LANG=AMERICAN_AMERICA.ZHS16GBK\r\ndef __runsql(sqltext,**sqlargs):\r\n g.result=None\r\n try :\r\n a=open(os.path.join(g.sqldir,sqltext+'.sql'),'r')\r\n b=a.readlines()\r\n a.close()\r\n except:\r\n print('未找到相应SQL脚本文件。')\r\n try:\r\n sql=''\r\n for line in b:\r\n sql=sql+line\r\n g.result=g.cur.execute(sql,sqlargs).fetchall()\r\n except :\r\n print('运行SQL语句发生错误')\r\n\r\n\r\n\r\ndef __everyday(startdate,enddate):\r\n a=datetime.datetime.strptime(startdate,'%Y-%m-%d')\r\n b=datetime.datetime.strptime(enddate,'%Y-%m-%d')\r\n c=a\r\n while c<=b:\r\n yield (datetime.datetime.strftime(c,'%Y-%m-%d'),datetime.datetime.strftime(c+datetime.timedelta(days=1),'%Y-%m-%d'))\r\n c=c+datetime.timedelta(days=1)\r\n \r\ndef __printresult():\r\n header=''\r\n line=''\r\n for i in g.cur.description:\r\n #header=header+i[0].ljust(g.colwid,' ')+g.sep\r\n header=header+i[0]+g.sep\r\n #header=header[0:-1]+g.end\r\n for i in g.result:\r\n for j in i :\r\n #line=line+str(j).ljust(g.colwid,' ')+g.sep\r\n line=line+str(j)+g.sep\r\n line=line[0:-1]+g.end\r\n print(header)\r\n print(line)\r\n\r\ndef __exportresult(filename):\r\n header=''\r\n line=''\r\n for i in g.cur.description:\r\n header=header+i[0]+g.sep\r\n header=header[0:-1]+g.end\r\n for i in g.result:\r\n for j in i :\r\n line=line+str(j)+g.sep\r\n line=line[0:-1]+g.end\r\n if os.path.exists(g.outputdir):\r\n pass\r\n else:\r\n print('正在创建导出目录',g.outputdir)\r\n os.mkdir(os.path.abspath(g.outputdir))\r\n a=open(os.path.join(g.outputdir,filename),'w')\r\n a.write(header)\r\n a.write(line)\r\n a.close()\r\n print('查询结果已经保存在:',os.path.join(g.outputdir,filename))\r\n\t\r\n\t\r\na=open('非药品项目价格列表.txt','r')\r\nb=a.readlines()\r\na.close()\r\nc={}\r\nfor item in b:\r\n code,price=item[:-1].split('\\t')\r\n try:\r\n c[code]=float(price)\r\n except:\r\n c[code]='na'\r\n\t\r\n__runsql('中心项目库')\r\nfor item in g.result:\r\n if item[0] in c:\r\n if c[item[0]] != 'na':\r\n try:\r\n x=float(item[3])\r\n if float(item[3])!=c[item[0]]:\r\n print(item,'!',c[item[0]])\r\n except:\r\n pass\r\n","sub_path":"workscript/检查非药物项目价格.py","file_name":"检查非药物项目价格.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286631856","text":"import scrapy\nfrom realestate_3.items import REItem\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nimport datetime\n\n\nclass rentSpider(CrawlSpider):\n name = \"gerberpropertiescomau\"\n allowed_domains = ['gerberproperties.com.au']\n start_urls = ['http://gerberproperties.com.au/Buy.php',\n 'http://gerberproperties.com.au/Rent.php',\n 'http://gerberproperties.com.au/Sold-Properties.php'\n ]\n count = 0\n\n rules = (\n Rule(LinkExtractor(allow=allowed_domains, restrict_xpaths=('//*[@id=\"contentContainer\"]/div[1]/ul/li/a')),callback='parse3', follow=True),\n # Rule(LinkExtractor(allow=allowed_domains, restrict_xpaths=('//*[@id=\"pre_content\"]/div[3]/div[2]/a[@class=\"next_page\"]')), follow=True),\n )\n\n def parse3(self, response):\n # defaults\n item = REItem()\n for key in item.fields:\n item[key] = 0\n\n item['sold'] = 'False'\n item['source_url'] = response.url\n item['address_state'] = 'NSW'\n item['address_postcode'] = 0000\n\n dump_html = response.body\n dump_html = dump_html.decode('UTF-8','ignore').replace('\\'', '\\\"')\n item['dump_html'] = dump_html\n date = datetime.datetime.now()\n item['created'] = '%s-%s-%s %s:%s:%s' % (date.year, date.month, date.day, date.hour, date.minute, date.second)\n # no default\n\n # sold\n if 'Sold' in response.request.headers.get('Referer', None):\n item['sold'] = 'True'\n\n # agency_name\n item['agency_name'] = 'Gerber Properties'\n\n # agent_url\n try:\n item['agent_url'] = response.xpath('//*[@class=\"agentList\"]/li/h5/b/text()').extract()[0].strip() + ', '+ \\\n response.xpath('//*[@class=\"agentList\"]/li/text()div[@class=\"agent-info\"]/p[@class=\"mobile\"]/text()').extract()[0].encode('ascii','ignore').replace('\\n','').strip()\n except:\n pass\n\n try:\n item['agent_name'] = response.xpath('//*[@class=\"agentList\"]/li/h5/b/text()').extract()[0]\n # print item['agent_name']\n item['agent_mobile'] = response.xpath('//*[@class=\"agentList\"]/li/text()').extract()[5]\n item['agent_phone'] = response.xpath('//*[@class=\"agentList\"]/li/text()').extract()[3]\n # print item['agent_mobile']\n # print item['agent_phone']\n except Exception:\n pass\n\n\n # address\n item['address_street'] = response.xpath('//*[@class=\"slideshow_caption\"]/h3/text()').extract()[0].strip()\n item['address_suburb'] = response.xpath('//*[@class=\"slideshow_caption\"]/h3/b/text()').extract()[0].strip()\n\n # description\n desc = []\n for i in response.xpath('//*[@class=\"description\"]/text()').extract():\n i = i.encode('ascii', 'ignore').strip()\n if i == '':\n continue\n desc.append(i)\n desc = str(map(str, desc))\n desc = desc.replace('[', '').replace(']', '').replace('\\'', '\\\"')\n item['description'] = desc\n\n # title\n item['title'] = response.xpath('//*[@id=\"contentContainer\"]/div[1]/h2/text()').extract()[0].replace('\\'', '\\\"')\n\n # price\n item['price'] =response.xpath('//*[@id=\"contentContainer\"]/div[2]/h4[2]/span/text()').extract()[0].strip().replace('\\'', '\\\"')\n\n # bbc\n bbc = response.xpath('//*[@class=\"slideshow_icons\"]/li')\n for i in bbc:\n if 'Bed' in i.xpath('img/@alt').extract()[0]:\n item['bedrooms'] = i.xpath('text()').extract()[0]\n if 'Bath' in i.xpath('img/@alt').extract()[0]:\n item['bathrooms'] = i.xpath('text()').extract()[0]\n if 'Car' in i.xpath('img/@alt').extract()[0]:\n item['garage_spaces'] = i.xpath('text()').extract()[0]\n\n # sold control\n if 'SOLD' in item['price']:\n item['sold'] = 'True'\n\n # coords\n # 'google.maps.LatLng('\n # var a=new GLatLng\n try:\n coords = response.body.find('var a=new GLatLng')\n coords = response.body[coords:coords+50]\n coords = coords[18:coords.find(')')]\n coords = coords.split(',')\n # print coords\n item['latitude'] = float(coords[0].replace('\"','').strip())\n item['longitude'] = float(coords[1].replace('\"','').strip())\n except:\n pass\n\n return item\n\n","sub_path":"realestate_from_luke/realestate_3/realestate_3/spiders/gerberpropertiescomau.py","file_name":"gerberpropertiescomau.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599495753","text":"# -*- coding: utf-8 -*-\nimport os, sys\nimport time\nimport random\nimport numpy as np\nimport tensorflow as tf\nfrom collections import deque\nfrom collections import OrderedDict\nfrom utility.utils import Utils\nfrom Trainer.based_trainer import BasedTrainer\nfrom display_as_gif import display_frames_as_gif\nfrom ReplayBuffer.replay_memory import ReplayBuffer, PrioritizeReplayBuffer, Rollout\n\nif tf.config.experimental.list_physical_devices('GPU'):\n for cur_device in tf.config.experimental.list_physical_devices(\"GPU\"):\n tf.config.experimental.set_memory_growth(cur_device, enable=True)\n\nclass Trainer(BasedTrainer):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.state_deque = deque(maxlen=self.multi_step)\n self.reward_deque = deque(maxlen=self.multi_step)\n self.action_deque = deque(maxlen=self.multi_step)\n\n def step(self, episode):\n state = self.env.reset()\n total_reward = 0\n time_per_episode = 0\n for step in range(1, self.n_steps+1):\n start_time = time.time()\n if self.total_steps < self.n_warmup:\n action = self.env.action_space.sample()\n else:\n if self.render:\n self.env.render()\n action = self.agent.choose_action(state)\n state_, reward, done, _ = self.env.step(action)\n\n # Multi-step learning\n self.state_deque.append(state)\n self.reward_deque.append(reward)\n self.action_deque.append(action)\n\n if len(self.state_deque) == self.multi_step or done:\n t_reward, p_index = self.multi_step_reward(self.reward_deque, self.agent.discount)\n state = self.state_deque[0]\n action = self.action_deque[0]\n self.replay_buf.push(np.array(state), action, done, np.array(state_), t_reward, p_index+1)\n\n total_reward += reward\n\n # Update main network\n if len(self.replay_buf) > self.batch_size and len(self.replay_buf) > self.n_warmup and self.total_steps % self.agent.update_interval == 0:\n indexes, transitions, weights = self.replay_buf.sample(self.agent.batch_size, episode/self.n_episode)\n train_data = map(np.array, zip(*transitions))\n loss, td_error = self.agent.update_main_net(train_data, weights)\n self.learning_flag = True\n self.summary(transitions, self.total_steps, loss)\n if (indexes != None):\n for i, td_error in enumerate(np.array(td_error)):\n self.replay_buf.update(indexes[i], td_error)\n\n if done or step == self.n_steps:\n time_per_step = time_per_episode / step\n self.total_steps += 1\n # Training results\n metrics = OrderedDict({\n \"episode\": episode,\n \"total_steps\": self.total_steps,\n \"step\": step,\n \"total_reward\":total_reward,\n \"time/step\": time_per_step\n })\n self.step_end(metrics)\n break\n\n state = state_\n time_per_episode += time.time() - start_time\n self.total_steps += 1\n\n if (episode % self.test_interval == 0 or episode == self.n_episode) and self.learning_flag:\n self.test(episode)\n\n return\n\n def step_end(self, metrics, other=None):\n super().step_end(metrics, other=None)\n self.state_deque.clear()\n self.action_deque.clear()\n self.reward_deque.clear()\n return","sub_path":"Trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285445357","text":"import cv2\nfrom facenet_pytorch import MTCNN\nfrom PIL import Image\nimport os\n\ndef createDatasetImage(folderName):\n name = folderName\n DATASET_PATH = os.path.join(\"datasets\", name)\n if not os.path.isdir(DATASET_PATH):\n os.mkdir(DATASET_PATH)\n\n mtcnn = MTCNN(prewhiten=False, keep_all=True, thresholds=[0.6, 0.7, 0.9])\n\n image_no = 0\n capture = cv2.VideoCapture(0)\n count = 0\n while True:\n count += 1\n check, frame = capture.read()\n\n faces, _ = mtcnn.detect(Image.fromarray(frame))\n if faces is not None and count % 7 == 0:\n image_no += 1\n cv2.imwrite(os.path.join(DATASET_PATH, f\"{name}_{image_no}.jpg\"), frame)\n if image_no == 100:\n break\n\n image_text = f\"Number of image taken {image_no} for {name}\"\n cv2.putText(frame, image_text, (20, 20), cv2.LINE_AA, .5, (100, 0, 200), 1)\n if faces is not None:\n for (x, y, w, h) in faces:\n x, y, w, h = int(x), int(y), int(w), int(h)\n cv2.rectangle(frame, (x, y), (w, h), (200, 100, 0), 2)\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n capture.release()\n cv2.destroyAllWindows()","sub_path":"create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274801845","text":"\"\"\" \r\nnwcontext.py is for handling \"context\" - which is defined as past words implictly used\r\nin current sentences. By my lights there are a very limited set of such things, excluding\r\nvarious personal pronouns that can be assumed absent in a sales-bot application, or can\r\nbe brought in later. For now we have these categories of concept:\r\n\r\nPositing and juxtpositing mechanisms bring ideas into mind. \r\nAnd the basic operations that apply\r\nto one or several postited ideas are:\r\ngroup (for comparable nouns)\r\nalternate (for distinct values of an adjective category like color)\r\nmerge (for adjective values from different categories that apply to the same nouns)\r\nsequence (for nouns in sequence)\r\n\r\nThe goal of nwcontext is to support these operations as CONTEXT_OP vars, as you will see.\r\nThe plan is: label parents with the above identifiers and look in the past context for\r\nwords that share the right sort of parent.\r\n \r\n\"\"\"\r\n\r\nfrom narwhal.nwtypes import *\r\nfrom narwhal.nwutils import *\r\nfrom narwhal.nwcontrol import *\r\nfrom narwhal.nwsegment import *\r\n\r\n\r\nclass SegmentBuffers:\r\n def __init__(self, N):\r\n self.N = N\r\n self.buffer = []\r\n for i in range(0,N):\r\n self.buffer.append( [NULL_VAR] )\r\n self.next = 0\r\n\r\n def clear(self):\r\n for i in range(0,self.N):\r\n self.buffer[i] = [NULL_VAR] \r\n\r\n def addSegment(self, segment):\r\n self.buffer[ self.next ] = segment\r\n self.next = (self.next+1)%self.N\r\n\r\n def getAll( self ):\r\n a = []\r\n for i in range(0,self.N):\r\n s = self.buffer[(self.next + i)%self.N ]\r\n if s != [NULL_VAR]:\r\n a.extend( s )\r\n return a\r\n\r\n\"\"\" Some basic methods, followed by increasingly abstract entitiees\"\"\"\r\n\r\n# willforget context after this many vars have gone bye.\r\nMAXCONTEXTMEM = 30\r\n\r\n\r\ndef isParent(node, var):\r\n for child in node.children :\r\n if child.equals(var):\r\n return True\r\n return False\r\n\r\n# find the node of tree with isParent(var) True\r\ndef getParent(tree, var):\r\n\r\n if isParent(tree,var):\r\n return tree\r\n \r\n for node in tree.children:\r\n p = getParent(node,var)\r\n if p == NULL_VAR:\r\n continue\r\n else:\r\n return p\r\n\r\n return NULL_VAR\r\n\r\n##############################\r\ndef get2Alternatives(tree, segment ):\r\n revseg = segment[::-1] # reverse the list\r\n p = NULL_VAR\r\n q = NULL_VAR\r\n var0 = NULL_VAR\r\n numvars = 0\r\n for var in revseg:\r\n\r\n numvars += 1\r\n if numvars>MAXCONTEXTMEM:\r\n continue\r\n\r\n # find a parent that is \"ALTERNATIV\"E\r\n p = getParent(tree,var)\r\n if not p.contextType == ALTERNATIVE_CONTEXT:\r\n continue\r\n\r\n # if you already saw such a parent\r\n if not q==NULL_VAR:\r\n if q.equals(p): #and it is the same parent\r\n return [ var, var0]\r\n else:\r\n return [NULL_VAR, NULL_VAR]\r\n q = p.copy()\r\n var0 = var\r\n return [NULL_VAR, NULL_VAR]\r\n \r\ndef getOneOfGroup(tree, segment):\r\n revseg = segment[::-1] # reverse the list\r\n p = NULL_VAR\r\n numvars = 0\r\n for var in revseg:\r\n numvars += 1\r\n if numvars>MAXCONTEXTMEM:\r\n continue\r\n\r\n # find a parent that is \"ALTERNATIVE\"\r\n p = getParent(tree,var)\r\n if not p.contextType == GROUP_CONTEXT:\r\n continue\r\n # returns first groupable var\r\n return [var]\r\n return [NULL_VAR]\r\n\r\ndef getManyOfGroup(tree, segment):\r\n revseg = segment[::-1] # reverse the list\r\n p = NULL_VAR\r\n h = []\r\n numvars = 0\r\n for var in revseg:\r\n numvars += 1\r\n if numvars>MAXCONTEXTMEM:\r\n continue\r\n\r\n # find a parent that is \"ALTERNATIVE\"\r\n p = getParent(tree,var)\r\n if not p.contextType == GROUP_CONTEXT:\r\n continue\r\n # returns first groupable var\r\n h.append(var)\r\n return h\r\n\r\n # look for two ints in context and get their associated last const\r\ndef get2Ints(tree, segment):\r\n revseg = segment[::-1] # reverse the list\r\n p = NULL_VAR\r\n h = []\r\n numints = 0\r\n for var in revseg:\r\n if var.isA(\"int\"):\r\n numints += 1\r\n if numints<=2:\r\n h.append( var )\r\n h = h[::-1]\r\n return h\r\n\r\ndef getAll(tree, segment ):\r\n revseg = segment[::-1] # reverse the list\r\n p = NULL_VAR\r\n h = []\r\n for var in revseg:\r\n if var != NULL_VAR:\r\n h.append( var )\r\n h = h[::-1]\r\n return h\r\n\r\ndef getN(segment, N):\r\n revseg = segment[::-1] # reverse the list\r\n p = NULL_VAR\r\n h = []\r\n n = 0\r\n for var in revseg:\r\n if var != NULL_VAR:\r\n if n 0 and step % args[\"print_every_step\"] == 0:\n end = time.time()\n diff = timedelta(seconds=round(end - start))\n print_output = \"[epoch: {:>3} step: {:>4}] train loss: {:>4.6} time: {}\".format(\n epoch, step, loss.item(), diff)\n print(print_output)\n step += 1\n\n # if specified, after every training epoch, do validation\n if args[\"validate\"]:\n model.eval()\n output_list = []\n truth_list = []\n for batch in val_iter:\n text, target = batch.text, batch.target\n with torch.no_grad():\n logit = model(text)\n\n output_list.append(logit.detach())\n truth_list.append(target.detach())\n\n y_trues, y_preds = [], []\n\n if args[\"num_classes\"] == 2:\n for y_true, logit in zip(truth_list, output_list):\n y_pred = (torch.sigmoid(logit) > args[\"pos_threshold\"]).long().cpu().numpy()\n y_true = y_true.cpu().numpy()\n y_trues.append(y_true)\n y_preds.append(y_pred)\n y_trues = np.concatenate(y_trues, axis=0)\n y_preds = np.concatenate(y_preds, axis=0)\n\n precision = metrics.precision_score(y_trues, y_preds, pos_label=1)\n recall = metrics.recall_score(y_trues, y_preds, pos_label=1)\n f1 = metrics.f1_score(y_trues, y_preds)\n\n print(\"Validation: precision: {:.4f}, recall: {:.4f}, f1: {:.4f}\".format(precision, recall, f1))\n print(\"\")\n if args[\"save_best_model\"] and f1 > best_f1:\n best_f1 = f1\n torch.save(model.state_dict(), args[\"model_path\"])\n else:\n pass\n\n return model\n\n\nclass Attention(nn.Module):\n def __init__(self, feature_dim, step_dim, bias=True, **kwargs):\n super(Attention, self).__init__(**kwargs)\n\n self.supports_masking = True\n\n self.bias = bias\n self.feature_dim = feature_dim\n self.step_dim = step_dim\n self.features_dim = 0\n\n weight = torch.zeros(feature_dim, 1)\n nn.init.xavier_uniform_(weight)\n self.weight = nn.Parameter(weight)\n\n if bias:\n self.b = nn.Parameter(torch.zeros(step_dim))\n\n def forward(self, x, mask=None):\n feature_dim = self.feature_dim\n step_dim = self.step_dim\n\n eij = torch.mm(\n x.contiguous().view(-1, feature_dim),\n self.weight\n ).view(-1, step_dim)\n\n if self.bias:\n eij = eij + self.b\n\n eij = torch.tanh(eij)\n a = torch.exp(eij)\n\n if mask is not None:\n a = a * mask\n\n a = a / torch.sum(a, 1, keepdim=True) + 1e-10\n\n weighted_input = x * torch.unsqueeze(a, -1)\n return torch.sum(weighted_input, 1)\n\n\nclass GRU(nn.Module):\n def __init__(self, args):\n super(GRU, self).__init__()\n\n hidden_size = 128\n maxlen = 100\n pretrained_embed = args[\"data_train\"].fields[\"text\"].vocab.vectors\n\n if pretrained_embed is None:\n self.embed = nn.Embedding(vocab_size, args[\"embed_dim\"])\n else:\n self.embed = nn.Embedding.from_pretrained(pretrained_embed, freeze=True)\n\n self.embed_dropout = nn.Dropout2d(0.1)\n self.lstm1 = nn.LSTM(args[\"embed_dim\"], hidden_size, bidirectional=True, batch_first=True)\n self.lstm2 = nn.LSTM(hidden_size * 2, hidden_size, bidirectional=True, batch_first=True)\n\n self.lstm_attention = Attention(hidden_size * 2, maxlen)\n self.gru_attention = Attention(hidden_size * 2, maxlen)\n\n self.linear = nn.Linear(hidden_size*2, 64)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.1)\n self.out = nn.Linear(64, 1)\n self.loss = nn.BCEWithLogitsLoss(pos_weight=args[\"pos_weight\"])\n\n def forward(self, x):\n h_embed = self.embed(x)\n h_embed = torch.squeeze(self.embed_dropout(torch.unsqueeze(h_embed, 0)))\n\n h_gru1, _ = self.lstm1(h_embed)\n # h_gru2, (h_n, _) = self.gru2(h_gru1)\n\n # h_lstm_atten = self.lstm_attention(h_lstm)\n # h_gru_atten = self.gru_attention(h_gru)\n\n # avg_pool = torch.mean(h_gru, 1)\n # max_pool, _ = torch.max(h_gru, 1)\n\n # conc = torch.cat((h_lstm_atten, h_gru_atten, avg_pool, max_pool), 1)\n\n shape = list(h_gru1.size())\n\n # use the last hidden state for classification\n h_n = torch.index_select(h_gru1, 1, torch.tensor(shape[1]).to(\"cuda:0\"))\n conc = self.relu(self.linear(h_n))\n conc = self.dropout(conc)\n out = self.out(conc)\n return out\n\n\ndef model_infer(model, args):\n \"\"\"Inference using model.\n \"\"\"\n print(\"Predicting...\")\n # model = TextCNN(args)\n load_model(model, args[\"model_path\"], use_cuda=args[\"cuda\"])\n\n device = \"cpu\"\n if args[\"cuda\"]:\n device = \"cuda:0\"\n model = model.to(device)\n\n model.eval()\n batch_output = []\n\n # define batch iterator\n data_iter = torchtext.data.Iterator(dataset=args[\"data_test\"], batch_size=args[\"batch_size\"], train=False,\n device=device, sort=False)\n for batch in data_iter:\n text = batch.text\n with torch.no_grad():\n logit = model(text)\n batch_output.append(logit.detach())\n\n y_preds = []\n for logit in batch_output:\n y_pred = (torch.sigmoid(logit) > args[\"pos_threshold\"]).long().cpu().numpy()\n y_preds.append(y_pred)\n y_preds = np.concatenate(y_preds, axis=0).flatten()\n\n # submit result\n test_df = pd.read_csv(args[\"test_path\"], index_col=False, header=0)\n data = {\"qid\": test_df[\"qid\"], \"prediction\": y_preds}\n subm_df = pd.DataFrame(data=data)\n subm_df.to_csv(args[\"submission_path\"], header=True, index=False)\n print(\"submission saved as {}.\".format(submission_path))\n print('')\n\n\ndef ensemble_predicate(models, test_data, batch_size, pos_threshold, test_path, sub_path):\n for model in models:\n model.eval()\n\n batch_output = []\n\n # define batch iterator\n data_iter = torchtext.data.Iterator(dataset=test_data, batch_size=batch_size, train=False,\n device=\"cuda:0\", sort=False)\n\n for batch in data_iter:\n text = batch.text\n logits = []\n with torch.no_grad():\n for model in models:\n logit = model(text).squeeze(-1)\n logits.append(logit)\n logit = sum([d for d in logits])/len(logits)\n batch_output.append(logit)\n\n y_preds = []\n for logit in batch_output:\n y_pred = (torch.sigmoid(logit) > pos_threshold).long().cpu().numpy()\n y_preds.append(y_pred)\n y_preds = np.concatenate(y_preds, axis=0).flatten()\n\n # submit result\n test_df = pd.read_csv(test_path, index_col=False, header=0)\n data = {\"qid\": test_df[\"qid\"], \"prediction\": y_preds}\n subm_df = pd.DataFrame(data=data)\n subm_df.to_csv(sub_path, header=True, index=False)\n print(\"submission saved as {}.\".format(submission_path))\n print('')\n\n\n# train_data_path = '../input/train.csv'\n# test_data_path = '../input/test.csv'\n# glove_embedding = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'\n# param_embedding = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'\n# submission_path = './submission.csv'\n# model_path = './default_model.pkl'\n\ntrain_data_path = 'data/train.csv'\ntest_data_path = 'data/test.csv'\nglove_embedding = 'data/glove.840B.300d.txt'\ngoogle_embedding = 'data/GoogleNews-vectors-negative300.bin'\nparam_embedding = 'data/paragram_300_sl999.txt'\nwiki_embedding = 'data/wiki-news-300d-1M.vec'\nsubmission_path = 'data/submission.csv'\nmodel_path = 'data/default_model.pkl'\n\ntrain, val, test, vocab_size, padding_idx, text = preprocess(train_path=train_data_path, test_path=test_data_path)\ntrain_args = {\n # basic configurations\n \"embed_dim\": 300,\n \"epochs\": 10,\n \"batch_size\": 32,\n \"learning_rate\": 0.001,\n \"validate\": True,\n \"save_best_model\": True,\n \"model_path\": model_path,\n \"cuda\": True,\n \"print_every_step\": 1000,\n \"num_classes\": 2,\n \"data_train\": train,\n \"data_val\": val,\n \"data_test\": test,\n \"vocab_size\": vocab_size,\n \"padding_index\": padding_idx,\n \"test_path\": test_data_path,\n \"submission_path\": submission_path,\n \"drop_prob\": 0.1,\n\n # configurations for binary classification\n \"pos_weight\": torch.tensor([1.0]),\n \"pos_threshold\": 0.5,\n \"eval_metrics\": \"f1\"\n}\n\nensembles = []\n\ntext.vocab.load_vectors(torchtext.vocab.Vectors(glove_embedding))\n# cnn = TextCNN(args=train_args)\nlstm = LSTM(args=train_args)\nmodel_train(lstm, args=train_args)\n\n\n# gru_param = GRU(args=train_args)\n# gru_param = model_train(model=gru_param, args=train_args)\n# ensembles.append(gru_param)\n\n# text.vocab.load_vectors(torchtext.vocab.Vectors(glove_embedding))\n# gru_glove = GRU(args=train_args)\n# gru_glove = model_train(model=gru_glove, args=train_args)\n# ensembles.append(gru_glove)\n\n\n# text_cnn = TextCNN(args=train_args)\n# text_cnn = model_train(model=text_cnn, args=train_args)\n\n# ensembles.append(model_glove)\n# ensembles.append(text_cnn)\n\n# text.vocab.load_vectors(torchtext.vocab.Vectors(paragram_embedding))\n# model_paragram = model_train(train_args)\n# ensembles.append(model_paragram)\n\n# model_infer(model_glove, train_args)\n# ensemble_predicate(ensembles, test, batch_size=128, pos_threshold=0.33, test_path=test_data_path,\n# sub_path=submission_path)\n","sub_path":"textcnn_less_memory.py","file_name":"textcnn_less_memory.py","file_ext":"py","file_size_in_byte":13339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653815543","text":"from sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris\nimport numpy as np\nfrom pathlib import Path\nfrom sklearn.linear_model import LogisticRegression\n# Другие методы МL, еще больше - в библиотеке.\n# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n# from sklearn.neighbors import KNeighborsClassifier\n# from sklearn.naive_bayes import GaussianNB\n# from sklearn.tree import DecisionTreeClassifier\n# from sklearn.svm import SVC\nimport csv\nimport os.path\n\ndef median_filter (x, k):\n \"\"\"Apply a length-k median filter to a 1D array x.\n Boundaries are extended by repeating endpoints.\n \"\"\"\n assert k % 2 == 1, \"Median filter length must be odd.\"\n assert x.ndim == 1, \"Input must be one-dimensional.\"\n k2 = (k - 1) // 2\n y = np.zeros ((len (x), k), dtype=x.dtype)\n y[:,k2] = x\n for i in range (k2):\n j = k2 - i\n y[j:,i] = x[:-j]\n y[:j,i] = x[0]\n y[:-j,-(i+1)] = x[j:]\n y[-j:,-(i+1)] = x[-1]\n return np.median (y, axis=1)\n\n\n# probe1_folder = Path(r\"C:\\Users\\Acer A315\\Desktop\\EDUCATION\\1. Diploma\\1. Sketches\\_Server_\\4. ArduinoPythonSrv-master - Modified\\1_probe\")\nvectors = []\n\nfor i in range(0, 119):\n with open(f'all_probes/{i}_probe/accelx_data.txt', 'r') as f:\n ax = np.array([float(x) for x in f.read().split('\\n')[:-1]])\n with open(f'all_probes/{i}_probe/accely_data.txt', 'r') as f:\n ay = np.array([float(x) for x in f.read().split('\\n')[:-1]])\n with open(f'all_probes/{i}_probe/accelz_data.txt', 'r') as f:\n az = np.array([float(x) for x in f.read().split('\\n')[:-1]])\n\n with open(f'all_probes/{i}_probe/gyrox_data.txt', 'r') as f:\n gx = np.array([float(x) for x in f.read().split('\\n')[:-1]])\n with open(f'all_probes/{i}_probe/gyrox_data.txt', 'r') as f:\n gy = np.array([float(x) for x in f.read().split('\\n')[:-1]])\n with open(f'all_probes/{i}_probe/gyrox_data.txt', 'r') as f:\n gz = np.array([float(x) for x in f.read().split('\\n')[:-1]])\n\n # print(\"Usual ax\", ax)\n\n# for i in range(10, 15):\n# with open(f'all_probes/{i}_probe/{i}_probe.csv', 'r') as f:\n# csvreader = csv.reader(f, delimiter=',', quotechar='|')\n# j = 0\n# vector = []\n# for row in csvreader:\n# if j > 0:\n# # print(row)\n# vector += [float(x) for x in row]\n# j += 1\n# # print(vector)\n# vectors.append(vector)\n\n\n\n ax = median_filter(ax, 5)\n ay = median_filter(ay, 5)\n az = median_filter(az, 5)\n\n gx = median_filter(gx, 5)\n gy = median_filter(gy, 5)\n gz = median_filter(gz, 5)\n\n # print(\"Filtered ax\", ax)\n vector = []\n for xa, ya, za, xg, yg, zg in zip(ax, ay, az, gx, gy, gz):\n vector.append(xa)\n vector.append(ya)\n vector.append(za)\n vector.append(xg)\n vector.append(yg)\n vector.append(zg)\n\n #print(vector)\n\n\n #print(len(vector))\n vectors.append(vector)\n\n#print(vectors)\n\ncsvfile = open(f'all_probes/dataset2.csv', 'w', newline='\\n')\nspamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\nfor i in range(len(vectors)): # закомментировала, чтобы разделить и сохранить обучающую и тестовую выборку в два файла csv\n# for i in range[0, 119](len(vectors)): # пыталась\n with open(f'all_probes/{i+1}_probe/Result', 'r') as f:\n success = int(f.read())\n spamwriter.writerow(vectors[i] + [success])\n\n\n# test_data = open(f'all_probes/test-data2.csv', 'w', newline='\\n')\n# spamwriter = csv.writer(test_data, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# for vectors:\n# spamwriter.writerow(vectors[120])\n\n\n# for i in range(1, 4):\n# csvfile = open(f'all_probes/{i}_probe/{i}_000probe.csv', 'w', newline='')\n# spamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n# spamwriter.writerow(['vector', 'true or false'])\n# # for xa, ya, za, xg, yg, zg in zip(ax, ay, az, gx, gy, gz):\n# for vector in vectors:\n# spamwriter.writerow(vector + [])\n# csvfile.close()","sub_path":".Archive/8. Archive of the entire project/14.04.2021/attempt_vectors.py","file_name":"attempt_vectors.py","file_ext":"py","file_size_in_byte":4260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133697060","text":"import random\nfrom random import choice\n\n\ndef get_range_number():\n \"\"\"\ntake the guessed numbers distance\"\"\"\n global range_1, range_2\n while True:\n range_1 = input(\"choose start number range : \")\n try:\n range_1 = int(range_1)\n try:\n range_2 = input(\"choose end number range : \")\n range_2 = int(range_2)\n except:\n print(\"input only the numeric form!!!\")\n continue\n except:\n print(\"input only the numeric form!!!\")\n continue\n else:\n if range_1 < range_2 and (range_2-range_1) >= 9:\n break\n else:\n print(\n \"the starting number must be less than the final digit and the \\\n minimum distance is 10 digits.\")\n\n\ndef get_guess_numbers():\n numbers = {}\n \"\"\"take the guessed numbers and the numbers associated with these numbers.\"\"\"\n use_number = []\n while True:\n number = random.randint(range_1, range_2)\n for i in range(2, range_2):\n if (number % i == 0 or i % number == 0) and (i != number):\n use_number.append(i)\n if use_number or number == 0:\n break\n break\n numbers['guess_number'] = number\n numbers['list_use_number'] = use_number\n return numbers\n\n\ndef zero_number(list_num):\n \"\"\"generate several statements related to zeros.\"\"\"\n str_zero = [\n \"this number is neither a positive number nor a negative number\",\n \"multiplying by this number yields this number as well\",\n \"the result of division by this number is undefined\",\n \"is an even number\",\n ]\n if list_num:\n get_str = choice(list_num)\n str_hint = \"- \" + str_zero[get_str]\n list_num.remove(get_str)\n else:\n str_hint = \"- this number is not a prime number\"\n return str_hint\n\n\ndef get_primes(number):\n \"\"\"yields several statements regarding large prime numbers\"\"\"\n another_number_1 = choice(\n [x for x in range((number-random.randint(1, 10)), number)])\n another_number_2 = choice(\n [x for x in range(number, (number+random.randint(1, 10)))])\n str_hint = \"- this number satisfies the inequality \" + \\\n str(another_number_1) + \" <= ??? <= \" + str(another_number_2)\n return str_hint\n\n\ndef clue_formula(number, use_number, temp, list_num_zero):\n \"\"\"formula for taking hints.\"\"\"\n if number == 0:\n str_hint = zero_number(list_num_zero)\n elif use_number:\n get_use_number = choice(use_number)\n if number % get_use_number == 0 and get_use_number not in temp:\n a = get_use_number\n b = int(number/get_use_number)\n str_hint = \"- this number satisfies the equation: \" + \\\n str(a) + \" x \" + str(b)\n use_number.remove(get_use_number)\n temp.append(b)\n elif get_use_number % number == 0:\n a = get_use_number\n b = int(get_use_number/number)\n str_hint = \"- this number satisfies the equationn: \" + \\\n str(a) + \" / \" + str(b)\n use_number.remove(get_use_number)\n temp.append(b)\n else:\n another_number = choice(\n [x for x in range(-(range_2), range_2) if x != number])\n if another_number > number:\n str_hint = \"- number < \" + str(another_number)\n else:\n str_hint = \"- number >\" + str(another_number)\n else:\n str_hint = get_primes(number)\n return use_number, str_hint, temp\n","sub_path":"guess number/feature.py","file_name":"feature.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574577376","text":"import torch\nimport torch.nn\nimport torch.nn.functional\nimport torch.optim\n\n\nclass DeepQNetwork(torch.nn.Module):\n\n def __init__(self, height, width, input_channels, outputs):\n super(DeepQNetwork, self).__init__()\n\n # First layer\n self.conv1 = torch.nn.Conv2d(input_channels, 16, kernel_size=8, stride=4)\n\n # Second layer\n self.conv2 = torch.nn.Conv2d(16, 32, kernel_size=4, stride=2)\n\n # Method that computes the number of units of a convolution output given an input\n # Equation taken from:\n # Dumoulin, V., & Visin, F.(2016).A guide to convolution arithmetic for deep learning. 1–31. Retrieved from\n # http://arxiv.org/abs/1603.07285\n def conv2d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size) // stride) + 1\n\n convw = conv2d_output_size(conv2d_output_size(width, kernel_size=8, stride=4), kernel_size=4, stride=2)\n convh = conv2d_output_size(conv2d_output_size(height, kernel_size=8, stride=4), kernel_size=4, stride=2)\n\n linear_output_size = 32 * convw * convh\n\n # Hidden layer\n self.hiden_linear_layer = torch.nn.Linear(linear_output_size, 256)\n\n # Output layer\n self.head = torch.nn.Linear(256, outputs)\n\n def forward(self, x):\n x = torch.nn.functional.relu(self.conv1(x))\n x = torch.nn.functional.relu(self.conv2(x))\n x = x.view(x.size(0), -1)\n x = torch.nn.functional.relu(self.hiden_linear_layer(x))\n return self.head(x)\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"452833389","text":"import copy\nimport json\nimport tempfile\nimport os\nimport pkg_resources\n\nimport tanker \nfrom tanker.CliTag import CliTagParser\n\nfrom flask import Flask, request, send_file\nfrom werkzeug import secure_filename\nfrom tanker.DescriptorFile import Factory as DescriptorFileFactory, DescriptorFile, DescriptorFileException\nfrom collections import OrderedDict\n\n@tanker.app.route('/api/v1/info/', methods=['GET'])\ndef get_tanker_version():\n return json.dumps({'version': str(pkg_resources.get_distribution('tanker'))}, indent=4), 200, [('content-type', 'application/json')]\n\n@tanker.app.route('/api/v1/tasks/', methods=['GET'])\ndef get_tasks():\n tasks = tanker.getStorage().get_all_task_names()\n return json.dumps(tasks, indent=4), 200, [('content-type', 'application/json')]\n\n@tanker.app.route('/api/v1/tasks/versions/latest/', methods=['GET'])\ndef get_latest_task_versions():\n tasks_with_versions = tanker.getStorage().get_task_versions()\n latest_versions = {}\n for (name, tasks) in tasks_with_versions.items():\n if len(tasks) > 0:\n latest_versions[name] = tasks[0]\n return json.dumps(latest_versions, indent=4), 200, [('content-type', 'application/json')]\n\n@tanker.app.route('/api/v1/tasks/versions/', methods=['GET'])\ndef get_tasks_with_versions():\n tasks_with_versions = tanker.getStorage().get_task_versions()\n return json.dumps(tasks_with_versions, indent=4), 200, [('content-type', 'application/json')]\n\n@tanker.app.route('/api/v1/task///', methods=['GET'])\ndef get_task_descriptor(name, version):\n if not tanker.getStorage().task_exists(name, version):\n return 'The task name/version \"{}/{}\" was not found.'.format(name, version), 404, [('content-type', 'text/plain')]\n else:\n version = tanker.getStorage().resolve_version(name, version)\n with tanker.getStorage().descriptor_file_contents(name, version) as contents:\n if 'application/json' in request.accept_mimetypes:\n return taskToJson(name, version, DescriptorFileFactory().createFromString(contents)), 200, [('content-type', 'application/json')]\n else:\n return contents, 200, [('content-type', 'text/plain')]\n\n@tanker.app.route('/api/v1/task//versions/', methods=['GET'])\ndef get_task_versions(name):\n task_versions = tanker.getStorage().get_task_versions(name)\n return json.dumps(task_versions, indent=4), 200, [('content-type', 'application/json')]\n\n@tanker.app.route('/api/v1/task///archive/', methods=['GET'])\ndef get_task_archive(name, version):\n task_zip_file = tanker.getStorage().get_task_archive(name, version)\n response = send_file(task_zip_file, as_attachment=True, attachment_filename='{}-{}.zip'.format(name, version))\n os.remove(task_zip_file)\n return response\n\n@tanker.app.route('/api/v1/task//', methods=['PUT'])\ndef upload_task(name):\n temp_dir = tempfile.mkdtemp()\n if 'archive_file' not in request.files:\n return \"No 'archive_file' specified in the request\\n\", 400, [('content-type', 'text/plain')]\n f = request.files['archive_file']\n zip_file_path = os.path.join(temp_dir, secure_filename(f.filename))\n f.save(zip_file_path)\n storage = tanker.getStorage()\n try:\n version = storage.add_task(name, zip_file_path)\n return version, 200, [('content-type', 'text/plain')]\n except Exception as error:\n return str(error), 400, [('content-type', 'text/plain')]\n\ndef taskToJson(name, version, descriptorFile):\n outputDict = OrderedDict()\n outputDict[\"task\"] = copy.deepcopy(descriptorFile.items(\"task\"))\n outputDict[\"task\"][\"name\"] = name \n outputDict[\"task\"][\"version\"] = version\n outputDict[\"runtime\"] = copy.deepcopy(descriptorFile.items(\"runtime\"))\n command = descriptorFile.get('task', 'command')\n if command is not None:\n outputDict[\"task\"][\"command\"] = CliTagParser().simplify(command)\n outputDict[\"parameters\"] = OrderedDict()\n for tag in descriptorFile.getTags():\n outputDict[\"parameters\"][tag.name] = OrderedDict([\n ('param_type', tag.type),\n ('quantifier', tag.quantifier),\n ('prefix', tag.prefix),\n ('default', tag.default),\n ('description', tag.description)\n ])\n else:\n outputDict[\"task\"][\"command\"] = None\n return json.dumps(outputDict, indent=2)\n","sub_path":"tanker/Api.py","file_name":"Api.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269430792","text":"n = int(input().strip())\n\ndef gcd(a,b):\n while b != 0:\n t = b\n b = a % b\n a = t\n return a\n\ndef lcm(a,b):\n return int ((a * b) / gcd(a, b))\n\n\nfor i in range(n):\n num = int(input().strip())\n ans = 1\n for j in range(1,num + 1):\n if ans % j == 0:\n continue\n factor = gcd(ans,j)\n toBeMultiplied = j // factor\n ans *= toBeMultiplied\n print(ans)\n","sub_path":"Project Euler/ProjectEuler_5.py","file_name":"ProjectEuler_5.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"483799212","text":"from __future__ import print_function, division\nimport math\n \nclass AB:\n def createString(self, N, K):\n s = [1] * (N//2) + [0] * ((N+1)//2)\n while K:\n for i in range(N-1):\n if s[i] == 1 and s[i+1] == 0:\n s[i] = 0\n s[i+1] = 1\n break\n else:\n return \"\"\n K -= 1\n return \"\".join(\"A\" if i == 0 else \"B\" for i in s)\n","sub_path":"AB.py","file_name":"AB.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"43148766","text":"import logging\n\nfrom citrus import SourceResource\n\nlogger = logging.getLogger(__name__)\nlogger.addHandler(logging.NullHandler())\nlogger.debug(f'Loaded {__name__} map')\n\n\ndef ssdn_dc_bepress_map(rec):\n sr = SourceResource()\n\n # contributor\n if rec.contributor:\n sr.contributor = [{'name': contributor} for contributor in\n rec.contributor]\n\n # creator\n if rec.creator:\n sr.creator = [{'name': creator} for creator in\n rec.creator]\n\n # date\n try:\n sr.date = {'begin': rec.date,\n 'end': rec.date,\n 'displayDate': rec.date}\n except TypeError:\n logger.info(f\"No date - {rec.harvest_id}\")\n\n # description\n sr.description = rec.description\n\n # format\n sr.format = rec.format\n\n # identifier\n for identifier in rec.identifier:\n if identifier.startswith('http'):\n sr.identifier = identifier\n\n # language\n try:\n sr.language = [{'name': lang} for lang in rec.language]\n except TypeError:\n logger.info(f\"No language - {rec.harvest_id}\")\n\n # place\n if rec.place:\n sr.spatial = [{'name': place} for place in rec.place]\n\n # publisher\n sr.publisher = rec.publisher\n\n # rights\n if len(rec.rights) > 1:\n for r in rec.rights:\n if r.startswith('http'):\n sr.rights = [{'@id': r}]\n else:\n if rec.rights[0].startswith('http'):\n sr.rights = [{'@id': rec.rights[0]}]\n else:\n logger.warning(f\"No rights URI - {rec.harvest_id}\")\n sr.rights = [{'text': rec.rights[0]}]\n\n # subject\n if rec.subject:\n sr.subject = [{'name': subject} for subject in rec.subject]\n\n # title\n sr.title = rec.title\n\n # type\n sr.type = rec.type\n\n # thumbnail\n tn = None\n\n yield sr, tn\n","sub_path":"ssdn_dc_bepress_map.py","file_name":"ssdn_dc_bepress_map.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"20841357","text":"import csv\nimport logging\nimport os\nimport discord\nfrom discord.ext import commands, tasks\nfrom discord.utils import get\n\n# logging config\nlogging.basicConfig(\n filename=\".log/reg.log\",\n format=\"%(asctime)s - %(message)s\",\n level=logging.INFO,\n datefmt=\"%d-%b-%y %H:%M:%S\",\n)\n\n# set up channel ids and enviroment variables\nreg_channel_id = int(os.environ[\"REG_CHANNEL_ID\"])\n\ntry:\n log_channel_id = int(os.environ[\"LOG_CHANNEL_ID\"])\nexcept:\n log_channel_id = None\n\ntry:\n only_respond_reg = int(os.environ[\"ONLY_RESPOND_REG\"])\nexcept:\n only_respond_reg = False\n\n# TODO: seperate customization in conf file\nevent_name = \"EuroPython\"\n\ninstruction = f\"Welcome to {event_name}! Please use `!register , ` to register.\\nE.g. `!register James Brown, 99999`\\nNOTE: please ONLY register for YOURSELF.\"\n\n\ndef welcome_msg(mention, roles):\n if len(roles) == 2:\n return f\"Welcome {mention}, you now have the {roles[0]} and {roles[1]} roles.\"\n elif len(roles) == 1:\n return f\"Welcome {mention}, you now have the {roles[0]} role.\"\n else:\n text = roles[1:-1].join(\", \")\n return f\"Welcome {mention}, you now have the {roles[0]}, {text} and {roles[-1]} roles.\"\n\n\nbot = commands.Bot(\n command_prefix=\"!\",\n description=f\"Registration Desk for {event_name}\",\n help_command=None,\n)\n\n\ndef roles_given(name, ticket_no):\n # check the roles that need to be given to the user\n # return list of roles that need to be given\n with open(os.environ[\"DATA_PATH\"], newline=\"\") as csvfile:\n datareader = csv.reader(csvfile, delimiter=\",\")\n for row in datareader:\n try: # skip if it's header\n if int(row[4]) == int(ticket_no):\n if row[0] == name:\n if row[3] == \"sprint\":\n return [\"sprinter\"]\n if row[2] == \"yes\":\n return [\"speaker\", \"attendee\"]\n else:\n return [\"attendee\"]\n except:\n continue\n\n\n@bot.event\nasync def on_ready():\n await bot.change_presence(\n status=discord.Status.online,\n activity=discord.Activity(type=discord.ActivityType.listening, name=\"!help\"),\n )\n await bot.get_channel(reg_channel_id).send(instruction)\n print(\"Bot is ready\")\n logging.info(\"Bot logged in\")\n\n\n@bot.command()\nasync def register(ctx, *, info):\n if not only_respond_reg or ctx.channel.id == reg_channel_id:\n info = info.split(\",\")\n roles = roles_given(info[0], info[1])\n if roles is None:\n logging.info(\n f\"FAIL: Cannot find request form user {ctx.author} with name={info[0]}, ticket_no={info[1]}\"\n )\n await ctx.send(\n f\"{ctx.author.mention} Sorry cannot find the ticket #{info[1]} with name: {info[0]}.\\nPlease check and make sure you put down your full name same as the one you used in registering your ticket then try again.\\nIf you want a team member to help you, please reply to this message with '@registration'\"\n )\n else:\n log_msg = f\"SUCCESS: Register user {ctx.author} name={info[0]}, ticket_no={info[1]} with roles={roles}\"\n logging.info(log_msg)\n if log_channel_id is not None:\n await bot.get_channel(log_channel_id).send(log_msg)\n\n await ctx.message.add_reaction(\"🎟️\")\n await ctx.message.add_reaction(\"🤖\")\n await ctx.author.edit(nick=info[0])\n attendee_role = get(ctx.author.guild.roles, name=\"attendee\")\n await ctx.author.add_roles(attendee_role)\n\n for role in roles:\n role_id = get(ctx.author.guild.roles, name=role)\n await ctx.author.add_roles(role_id)\n\n await ctx.author.send(welcome_msg(ctx.author.mention, roles))\n\n\n@bot.command()\nasync def help(ctx):\n if not only_respond_reg or ctx.channel.id == reg_channel_id:\n await ctx.send(instruction)\n\n\nbot.run(os.environ[\"REG_BOT_SECRET\"])\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299195919","text":"import numpy as np\nimport cv2\nimport os\n\nclass_dictionary = {0: 'background', 1: 'aeroplane', 2: 'bicycle', 3: 'bird', 4: 'boat',\n 5: 'bottle', 6: 'bus', 7: 'car', 8: 'cat', 9: 'chair',\n 10: 'cow', 11: 'dining_table', 12: 'dog', 13: 'horse', 14: 'motorbike',\n 15: 'person', 16: 'potted_plant', 17: 'sheep', 18: 'sofa', 19: 'train',\n 20: 'TV_monitor', 21: 'edge'}\n\n\n# VOC2007-2012数据集简介:\n\n# 两个文件夹:JPEGImages文件夹包含17125张rgb图片,SegmentationClass文件夹包含12031张语义分割图片,id序号相等对应标注同一张。\n# 利用from PIL import Image函数读取SegmentationClass中png图片,可以得到标注信息。\n# Image读取得到 (h,w) 单通道矩阵,像素值总共有22个类别,由22个数字代替:0、1、2、...、20、和255。\n\n# 0代表背景信息\n# 1-20代表图片中目标物体种类\n# 255代表目标物体轮廓信息,在代码处理过程中我们将其忽略\n\ndef read_path():\n\n data_x = []\n data_y = []\n\n filename = os.listdir('SegmentationClass')\n filename.sort()\n for name in filename:\n\n serial_number = name.split('.')[0]\n img_path = 'JPEGImages/' + serial_number + '.jpg'\n seg_path = 'SegmentationClass/' + serial_number + '.png'\n\n data_x.append(img_path)\n data_y.append(seg_path)\n\n return data_x, data_y\n\n\ndef make_data():\n\n data_x, data_y = read_path()\n print('all image quantity : ', len(data_y)) # 12031\n\n train_x = data_x[:11000]\n train_y = data_y[:11000]\n val_x = data_x[11000:]\n val_y = data_y[11000:]\n test_x = data_x[11000:]\n test_y = data_y[11000:]\n\n return train_x, train_y, val_x, val_y, test_x, test_y\n\n","sub_path":"read_data_path.py","file_name":"read_data_path.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46285272","text":"from django import forms\n\nfrom models import Class, Experiment\n\n\nclass ResultForm(forms.Form):\n experiment = forms.ModelChoiceField(queryset=Experiment.objects.all(), label='Experiment')\n student_class = forms.ModelChoiceField(queryset=Class.objects.all(), label='Class')\n\n def __init__(self, *args, **kwargs):\n qs = kwargs.get('queryset', None)\n kwargs.pop('queryset')\n super(ResultForm, self).__init__(*args, **kwargs)\n for field_name in ['experiment', 'student_class']:\n self.fields[field_name].widget.attrs['class'] = 'form-control'\n if qs:\n self.fields['student_class'].queryset = qs\n\n\nclass SelectExperimentForm(forms.Form):\n experiment = forms.ModelChoiceField(queryset=Experiment.objects.all(), label='Experiment')\n\n def __init__(self, *args, **kwargs):\n super(SelectExperimentForm, self).__init__(*args, **kwargs)\n self.fields['experiment'].widget.attrs['class'] = 'form-control'\n\n\nclass SelectClassForm(forms.Form):\n student_class = forms.ModelChoiceField(queryset=Class.objects.all(), label='Class')\n\n def __init__(self, *args, **kwargs):\n if 'queryset' in kwargs:\n qs = kwargs.pop('queryset')\n else:\n qs = None\n super(SelectClassForm, self).__init__(*args, **kwargs)\n self.fields['student_class'].widget.attrs['class'] = 'form-control'\n if qs:\n self.fields['student_class'].queryset = qs\n","sub_path":"analytics/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396740921","text":"from math import sin\n\ndef is_power_of_two(n):\n if n == 2:\n return True\n if n % 2 != 0:\n return False\n return is_power_of_two(n / 2)\n\n\ndef fib(n):\n if n == 0:\n return 1\n if n == 1:\n return 1\n return fib(n - 1) + fib(n - 2)\n\n\ndef find_root_sqrt2(epsilon, a, b):\n f_a = (a ** 2) - 2\n f_b = (b ** 2) - 2\n c = (a + b) / 2\n f_c = (c ** 2) - 2\n if abs(f_c) < epsilon:\n return c\n else:\n if f_c < 0:\n return find_root_sqrt2(epsilon, c, b)\n else:\n return find_root_sqrt2(epsilon, a, c)\n\n\ndef find_root(f, epsilon, a, b):\n c = (a + b) / 2\n if abs(f(c)) < epsilon:\n return c\n else:\n if f(c) < 0:\n return find_root(f, epsilon, c, b)\n else:\n return find_root(f, epsilon, a, c)\n\n\ndef root2(x):\n return (x ** 2) - 2\n\n\ndef sinPoint5(x):\n return sin(x) - 0.5\n\n\nt0 = {\"key\":\"node0\",\n \"val\":27,\n \"children\":[]}\n\nt1 = {\"key\":\"node0\",\n \"val\":1,\n \"children\":[{\"key\":\"node0\",\n \"val\":2,\n \"children\":[{\"key\":\"node0\",\n \"val\":3,\n \"children\":[]}]},\n {\"key\":\"node0\",\n \"val\":4,\n \"children\":[]},\n {\"key\":\"node0\",\n \"val\":5,\n \"children\":[]}]}\n\n\ndef count_leaves(t):\n '''\n Count the number of leaves in the tree rooted at t\n \n Inputs: (dictionary) a tree\n \n Returns: (integer) number of leaves in t\n '''\n assert t is not None\n\n if not t[\"children\"]:\n return 1\n\n num_leaves = 0\n for kid in t[\"children\"]:\n num_leaves += count_leaves(kid)\n\n return num_leaves\n\n\ndef add_values(t):\n assert t is not None\n\n values = t['val']\n\n if not t['children']:\n return values\n \n for child in t['children']:\n values += add_values(child)\n\n return values","sub_path":"labs/lab8/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18477990","text":"import pkg_resources\nimport platform\nimport requests\nimport sys\nfrom .response_factory import SlackAPIDictResponse, SlackAPIObjectResponse\nfrom .rtm_client import SlackRTMClient\nimport json\nfrom ..schema import endpoints\nfrom ..api import SlackAPI\n\n\nclass SlackClient(object):\n \"\"\"The web client\n \n \"\"\"\n def __init__(self,\n token,\n base_url=\"https://slack.com/api\",\n user_agent=None,\n response_factory=SlackAPIDictResponse,\n include_api=True,\n ):\n \"\"\"\n \n :param token: \n :param base_url: \n :param user_agent: \n :param response_factory: \n \"\"\"\n\n self.token = token\n self.base_url = base_url\n self._user_agent = user_agent\n self.response_factory = response_factory\n if include_api:\n self.api = SlackAPI(bind=self)\n else:\n self.api = None\n\n\n def api_call(self, endpoint, options, **kwargs):\n if options.get('include_token', False):\n kwargs['token'] = self.token\n\n headers = {\n 'user-agent': self.user_agent,\n }\n\n for key in kwargs:\n if not isinstance(kwargs[key], str):\n kwargs[key] = json.dumps(kwargs[key])\n\n response = requests.post(self.url(endpoint), data=kwargs, headers=headers)\n\n if endpoint in {'rtm.start', 'rtm.connect'}:\n return SlackRTMClient.from_response(endpoint=endpoint, token=self.token, response=response.json())\n\n return self.response_factory(endpoint, response)\n\n def url(self, endpoint):\n return \"{}/{}\".format(self.base_url, endpoint)\n\n @property\n def user_agent(self):\n if self._user_agent is None:\n dist = pkg_resources.get_distribution('slackly')\n\n self._user_agent = {\n \"client\": \"{0}/{1}\".format(dist.project_name, dist.version),\n \"python\": \"Python/{v.major}.{v.minor}.{v.micro}\".format(v=sys.version_info),\n \"system\": \"{0}/{1}\".format(platform.system(), platform.release())\n }\n\n return ''.join(self._user_agent.values())\n\n def __repr__(self):\n token = '...' + self.token[-5:]\n return \"{0.__class__.__name__}(base_url='{0.base_url}', token='{1}', response_factory={0.response_factory})\".format(self, token)","sub_path":"src/slackly/client/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21428607","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport time\r\n\r\n#Load data\r\n#Target moments to achieve (targetmean and targetcov)\r\n#Image input (inputh)\r\ntargetmean = np.load(\"targetmean.npy\")\r\ntargetcov = np.load(\"targetcov.npy\")\r\ninputh = np.load(\"inputh.npy\")\r\n\r\n#Initialize hyperparameters to random values\r\n#You can load your own hyperparameters here (e.g. p = np.load('some_p_you_got_before.npy'))\r\n#p -- hyperparameters for the weight matrix\r\n#q -- hyperparameters for transformation of inputh\r\n#r -- hyperparameters for noise covariance\r\np = [0.01 * (1.0 + np.random.rand()), 0.01 * (1.0 + np.random.rand()), 0.01 * (1.0 + np.random.rand()), 0.01 * (1.0 + np.random.rand()), 0.8, 0.8, 0.8, 0.8]\r\nq = np.sqrt([0.5 + np.random.rand(), 2.1 * np.random.rand(), 1.0])\r\nr = [2.0, 2.0, 0.3, 0.8]\r\n\r\n#Simulation constants\r\ntau_eta = 0.02 #Time constant for noise\r\ndt = 0.0002 #Time step\r\nN = 100\t#Number of neurons\t\t\t\t\t\r\ntrialsperbatch = 50 #Number of trials to take\r\n\r\n#Precomputed numerator matrix of exponential term in the weight matrix\r\ni = np.arange(0, 100, 1) * 2 * np.pi / 50\r\nj = np.arange(0, 100, 1) * 2 * np.pi / 50\r\ntheta_i, theta_j = np.meshgrid(i,j)\r\nnumerator = np.cos(theta_i - theta_j) - np.ones([100,100])\r\n\r\n#Precomputed T_inv\r\ntau_e = 0.020\r\ntau_i = 0.010\r\ntime_constants = np.zeros(100)\r\nfor i in range(50):\r\n\ttime_constants[i] = tau_e\r\n\r\nfor i in range(50,100):\r\n\ttime_constants[i] = tau_i\r\n\r\nT = np.diag(time_constants)\r\nT_inv = np.linalg.inv(T)\r\n\r\n#Numpy for on-the-fly noise generation coefficients\r\neps_1 = (1.0-dt/tau_eta)\r\neps_2 = np.sqrt(2.0*dt/tau_eta)\r\n\r\n#Throw matrices into tensorflow wrapper\r\ntfneuroninput = tf.placeholder(shape=(5, 100, 1), dtype=tf.float32, name=\"tfneuroninput\")\r\ntftmean = tf.placeholder(shape=(5, 50), dtype=tf.float32, name=\"tftmean\")\r\ntftcov = tf.placeholder(shape=(5, 50, 50), dtype=tf.float32, name=\"tftcov\")\r\ntfstart = tf.placeholder(tf.int32, shape=())\r\ntfend = tf.placeholder(tf.int32, shape=())\r\ntfnumerator = tf.constant(numerator, dtype=tf.float32, name=\"tfnumerator\")\r\ntfTinv = tf.constant(T_inv, dtype=tf.float32, name=\"tfTinv\")\r\ntfTinv = tf.stack([tfTinv] *5)\r\ntfdt = tf.constant(dt, dtype=tf.float32, name=\"tfTinv\")\r\ntfeps1 = tf.constant(eps_1, dtype=tf.float32, name=\"tfeps1\")\r\ntfeps2 = tf.constant(eps_2, dtype=tf.float32, name=\"tfeps2\")\r\n\r\n# Introduce p, q and r as Tensorflow variables to be optimized\r\na_ee = tf.Variable(p[0], dtype=tf.float32, name=\"a_ee\")\r\na_ei = tf.Variable(p[1], dtype=tf.float32, name=\"a_ei\")\r\na_ie = tf.Variable(p[2], dtype=tf.float32, name=\"a_ie\")\r\na_ii = tf.Variable(p[3], dtype=tf.float32, name=\"a_ii\")\r\nw_ee = tf.Variable(p[4], dtype=tf.float32, name=\"w_ee\")\r\nw_ei = tf.Variable(p[5], dtype=tf.float32, name=\"w_ei\")\r\nw_ie = tf.Variable(p[6], dtype=tf.float32, name=\"w_ie\")\r\nw_ii = tf.Variable(p[7], dtype=tf.float32, name=\"w_ii\")\r\n\r\nh_scale = tf.Variable(q[0], dtype=tf.float32, name=\"h_scale\")\r\nh_thres = tf.Variable(q[1], dtype=tf.float32, name=\"h_thres\")\r\nh_pow = tf.Variable(q[2], dtype=tf.float32, name=\"h_pow\")\r\n\r\neta_e = tf.Variable(r[0], dtype=tf.float32, name=\"eta_e\")\r\neta_i = tf.Variable(r[1], dtype=tf.float32, name=\"eta_i\")\r\neta_mix = tf.Variable(r[2], dtype=tf.float32, name=\"eta_mix\")\r\neta_w = tf.Variable(r[3], dtype=tf.float32, name=\"eta_w\")\r\n\r\n#Function to construct weight matrix from parameters\r\ndef construct_W(a_ee,a_ei,a_ie,a_ii,w_ee,w_ei,w_ie,w_ii):\r\n\t#Construct the width matrix\r\n\ttop = tf.concat([tf.ones([50,50], dtype=tf.float32)*(tf.square(w_ee)), tf.ones([50,50], dtype=tf.float32)*(tf.square(w_ei))], axis=1)\r\n\tbottom = tf.concat([tf.ones([50,50], dtype=tf.float32)*(tf.square(w_ie)), tf.ones([50,50], dtype=tf.float32)*(tf.square(w_ii))], axis=1)\r\n\tWwidth = tf.concat([top,bottom], axis=0, name=\"Wwidth\")\r\n\r\n\t#Construct the height matrix\r\n\ttop = tf.concat([tf.ones([50,50], dtype=tf.float32)*(tf.square(a_ee)+0.01), tf.ones([50,50], dtype=tf.float32)*(-tf.square(a_ei)-0.01)], axis=1)\r\n\tbottom = tf.concat([tf.ones([50,50], dtype=tf.float32)*(tf.square(a_ie)+0.01), tf.ones([50,50], dtype=tf.float32)*(-tf.square(a_ii)-0.01)], axis=1)\r\n\tWheight = tf.concat([top,bottom], axis=0, name=\"Wheight\")\r\n\r\n\t#Return W\r\n\t#If you want to set the diagonals to zero, this would be the place to do it\r\n\t#Note that you may have to set diagonals in each quartile to zero rather than the diagonal of this concatenated matrix\r\n\treturn Wheight * tf.exp(tfnumerator/Wwidth) #- (subtract diagonal entries here)\r\n\r\n#Function to construct actual neuron input from inputh\r\ndef construct_h(tfneuroninput, h_scale, h_thres, h_pow):\r\n\treturn tf.square(h_scale)*tf.exp(tf.square(h_pow) * tf.log(tfneuroninput+tf.square(h_thres)))\r\n\r\n#Function to construct noise covariance matrix\r\ndef construct_eta(eta_e, eta_i, eta_mix, eta_w):\r\n\t#Construct the width matrix\r\n\tEtawidth = tf.ones([100,100], dtype=tf.float32)*tf.square(eta_w)\r\n\r\n\t#Compute the E-I/I-E heights\r\n\trho = eta_i * eta_e * (0.5 + 0.5 * tf.tanh(eta_mix))\r\n\r\n\t#Construct the height matrix\r\n\ttop = tf.concat([tf.ones([50,50], dtype=tf.float32)*tf.square(eta_e), tf.ones([50,50], dtype=tf.float32)*rho], axis=1)\r\n\tbottom = tf.concat([tf.ones([50,50], dtype=tf.float32)*rho, tf.ones([50,50], dtype=tf.float32)*tf.square(eta_i)], axis=1)\r\n\tEtaheight = tf.concat([top,bottom], axis=0, name=\"Etaheight\")\r\n\r\n\tactual_eta = Etaheight * tf.exp(tfnumerator/Etawidth)\r\n\r\n\t#Return Eta\r\n\treturn actual_eta + tf.eye(100) * 0.01\r\n\r\n#Functions for creating loops\r\n#The loop has a condition (innercond) and a function (innerbody) to execute while the condition is true\r\ndef innercond(u, eta, t, W, h, L, tftmean, tftcov, umean, ucov, cost):\r\n\t#tfend is the number of time steps before ending the loop\r\n\t#the value of tfend is fed right before training in the last part of the code below\r\n\treturn tf.less(t,tfend)\r\n\r\ndef innerbody(u, eta, t, W, h, L, tftmean, tftcov, umean, ucov, cost):\r\n\t#Feel free to use u, the membrane potentials, anyway you like here\r\n\t#This would represent the membrane potential u at time t, if time is relevant to your cost function\r\n\t#In this case, we calculate the moments and then accumulate the cost \r\n\tmean = tf.reduce_mean(u[:,:50], axis=2)\r\n\tdeviation = u[:,:50] - tf.expand_dims(mean,2)\r\n\tcov = deviation@tf.transpose(deviation,[0,2,1])/(trialsperbatch)\r\n\tcost += (4e-05 * tf.reduce_sum(tf.square(mean - tftmean)) + 8e-05*tf.reduce_sum(tf.linalg.diag_part(tf.square(cov - tftcov))) + 8e-07 * tf.reduce_sum(tf.square(cov - tftcov)))\r\n\t\r\n\t#Update u for the next time step\r\n\tdu = tfTinv@(-u + h + W@(0.3 * tf.square(tf.nn.relu(u))) + eta)*tfdt\r\n\tu = tf.clip_by_value(u + du,-100,100)\r\n\r\n\t#Update noise for the next time step\r\n\ttemp = L@tf.random_normal([100,trialsperbatch], 0.0, 1.0, dtype=tf.float32)\r\n\teta = tfeps1 * eta + tfeps2 * temp\r\n\r\n\treturn (u, eta, t+1, W, h, L, tftmean, tftcov, umean, ucov, cost)\r\n\r\n#This is the function that outputs the cost\r\ndef run_simulation(W, h, L, tftmean, tftcov):\r\n\t#Define some constants\r\n\t#Note how the membrane potentials are defined here: (number of images, number of neurons, number of trials)\r\n\tcost = tf.constant(0, dtype=tf.float32)\r\n\tu = tf.random_normal([5,100,trialsperbatch], 0.0, 2.0, dtype=tf.float32)\r\n\tdu = tf.zeros([5,100,trialsperbatch], dtype=tf.float32)\r\n\tumean = tf.zeros([5,50], dtype=tf.float32)\r\n\tucov = tf.zeros([5,50,50], dtype=tf.float32)\r\n\teta = L@tf.random_normal([100,trialsperbatch], 0.0, 1.0, dtype=tf.float32)\r\n\tt = tf.constant(0, dtype=tf.int32)\r\n\t#Start the loop\r\n\tu, eta, t, W, h, L, tftmean, tftcov, umean, ucov, cost = tf.while_loop(innercond, innerbody, (u, eta, t, W, h, L, tftmean, tftcov, umean, ucov, cost))\r\n\treturn cost\r\n\r\n#Tensorflow stuff to minimize cost\r\n#Build the model, characterized by tfW, the weight matrix, tfh, the neuron input, and tfeta, the noise covariance matrix\r\ntfW = construct_W(a_ee,a_ei,a_ie,a_ii,w_ee,w_ei,w_ie,w_ii)\r\ntfW = tf.stack([tfW]*5)\r\ntfh = construct_h(tfneuroninput, h_scale, h_thres, h_pow)\r\ntfeta = construct_eta(eta_e, eta_i, eta_mix, eta_w)\r\ntfL = tf.cast(tf.linalg.cholesky(tfeta), dtype = tf.float32)\r\n#Calculate the cost function of this model\r\ncost = run_simulation(tfW, tfh, tfL, tftmean, tftcov)\r\n#Optimize for it\r\noptimizer = tf.train.AdamOptimizer(learning_rate=0.05)\r\nminimize = optimizer.minimize(cost)\r\n\r\n#These variables below are the ones to be fed into the network right before start of simulation\r\nneuroninput = inputh.reshape(5,-1,1)\r\ntmean = targetmean\r\ntcov = targetcov\r\nsimend = int(0.5/dt)\r\niterations = 100 #Number of iterations (It is placed here for convenience since the code for data collection is nearby below)\r\n\r\n#Start Tensorflow session (Code actually starts running here)\r\nwith tf.Session() as sess:\r\n\tsess.run(tf.global_variables_initializer())\r\n\tfor i in range(iterations):\r\n\t\t#This trains the network for one iteration\r\n\t\tsess.run(minimize,feed_dict={tfend: simend, tfneuroninput: neuroninput, tftmean: tmean , tftcov: tcov})\r\n\t\t#There might be data you want to collect at every iteration, such as the cost\r\n\t\t#You can make the model evaluate anything here, as long as it has been defined. \r\n\t\t#For example, you can make it calculate tfW, tfL, cost or even w_ee (one of the hyperparameters)\r\n\t\tprint(sess.run([cost,w_ee],feed_dict={tfend: simend, tfneuroninput: neuroninput, tftmean: tmean , tftcov: tcov}))\r\n\r\n\r\n","sub_path":"optimizer_stochastic.py","file_name":"optimizer_stochastic.py","file_ext":"py","file_size_in_byte":9284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"293228033","text":"##add annotations for each variant outputted by `correlation.py` script\n##annotations from https://useast.ensembl.org/info/genome/variation/prediction/predicted_data.html\n\nimport argparse\nimport gzip\nimport pysam\n\nargparser = argparse.ArgumentParser(description = 'Adds annotations from VCF/BCF.')\nargparser.add_argument('-i', '--in', metavar = 'file', dest = 'in_file', required = True, help = 'Input file with variants.')\nargparser.add_argument('-c', '--consequences', metavar = 'file', dest = 'in_CSQ_VCF', required = True, help = 'Input VCF/BCF file with CSQ INFO field.')\nargparser.add_argument('-o', '--out', metavar = 'file', dest = 'out_file', required = True, help = 'Output file.')\n\nso_terms = [\n 'transcript_ablation',\n 'splice_acceptor_variant',\n 'splice_donor_variant',\n 'stop_gained',\n 'frameshift_variant',\n 'stop_lost',\n 'start_lost',\n 'transcript_amplification',\n 'inframe_insertion',\n 'inframe_deletion',\n 'missense_variant',\n 'protein_altering_variant',\n 'splice_region_variant',\n 'incomplete_terminal_codon_variant',\n 'start_retained_variant',\n 'stop_retained_variant',\n 'synonymous_variant',\n 'coding_sequence_variant',\n 'mature_miRNA_variant',\n '5_prime_UTR_variant',\n '3_prime_UTR_variant',\n 'non_coding_transcript_exon_variant',\n 'intron_variant',\n 'NMD_transcript_variant',\n 'non_coding_transcript_variant',\n 'upstream_gene_variant',\n 'downstream_gene_variant',\n 'TFBS_ablation',\n 'TFBS_amplification',\n 'TF_binding_site_variant',\n 'regulatory_region_ablation',\n 'regulatory_region_amplification',\n 'feature_elongation',\n 'regulatory_region_variant',\n 'feature_truncation',\n 'intergenic_variant',\n 'LoF'\n]\n\nif __name__ == '__main__':\n args = argparser.parse_args()\n\n with gzip.open(args.in_file, 'rt') as ifile, pysam.VariantFile(args.in_CSQ_VCF) as ivcf, gzip.open(args.out_file, 'wt') as ofile:\n header = ifile.readline().rstrip().split()\n if any(x not in ['CHROM', 'POS', 'REF', 'ALT', 'N_GT', 'IMP_AF', 'GT_AF', 'DOSE_AF', 'IMP_R2', 'GT_vs_GT', 'GT_vs_DS'] for x in header):\n raise Exception('Wrong header!')\n\n\n vcf_meta = ivcf.header.info.get('CSQ', None)\n if vcf_meta is None:\n raise Exception('Missing CSQ INFO field!')\n\n csq_header = vcf_meta.description.split(':', 1)[-1].strip().split('|')\n has_chr_prefix = list(ivcf.header.contigs)[0].startswith('chr')\n\n ofile.write('{}\\t{}\\n'.format('\\t'.join(header), '\\t'.join(so_terms)))\n\n for line in ifile:\n fields = dict(zip(header, line.rstrip().split()))\n if has_chr_prefix and not fields['CHROM'].startswith('chr'):\n chrom = 'chr' + fields['CHROM']\n variant_name = \"chr{}_{}_{}_{}\".format(fields['CHROM'], fields['POS'], fields['REF'], fields['ALT'])\n elif not has_chr_prefix and fields['CHROM'].startswith('chr'):\n chrom = fields['CHROM'][3:]\n variant_name = \"{}_{}_{}_{}\".format(fields['CHROM'][3:], fields['POS'], fields['REF'], fields['ALT'])\n else:\n chrom = fields['CHROM']\n variant_name = \"{}_{}_{}_{}\".format(fields['CHROM'], fields['POS'], fields['REF'], fields['ALT'])\n\n\n pos = int(fields['POS'])\n\n consequences = set()\n for record in ivcf.fetch(chrom, max(0, pos - 1), pos + 1):\n if record.pos == pos and record.ref == fields['REF'] and record.alts[0] == fields['ALT']: # assuming annoation VCF has only bi-allelic entry per line\n for transcript_consequence in record.info['CSQ']:\n transcript_consequence = dict(zip(csq_header, transcript_consequence.split('|')))\n if transcript_consequence['BIOTYPE'] != 'protein_coding':\n continue\n for x in transcript_consequence['Consequence'].split(','):\n consequences.update(x.split('&'))\n if transcript_consequence['LoF'] == 'HC':\n consequences.add('LoF')\n break\n\n ofile.write('{}\\t{}\\n'.format( '\\t'.join(fields[h] for h in header), '\\t'.join(str(int(t in consequences)) for t in so_terms)))\n","sub_path":"AnnotateVariants/annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123924628","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\ndf = pd.read_csv('pie4.csv')\n\ncountries = df['country']\nmedals = df['gold_medal']\ncolors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#8c564b\"]\nexplode = (0.1, 0, 0, 0, 0)\n\nplt.pie(medals, labels=countries, colors=colors, explode=explode, autopct='%1.2f%%', counterclock=False, startangle=180)\nplt.axis('equal')\nplt.title('Gold medal achievements of five most successful\\ncountries in 2016 Summer Olympics')\n\nplt.show()\n","sub_path":"MathPlotLibProjects/pie4.py","file_name":"pie4.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"24616880","text":"import os\nimport pandas as pd\nimport time\n\njob_template = '''cat <= 0 and xx=0 and yy str:\n stack = []\n primitives = {}\n i = 0\n # 找到primitives\n for c in S:\n tmp = primitives.get(i, [])\n tmp.append(c)\n primitives[i] = tmp\n if c == '(':\n stack.append(c)\n else:\n stack.pop()\n # 如果当前stack空了,那么说明找到了一个primitive\n # 如果字符串还没有遍历完,那么就继续找下一个primitive\n if len(stack) == 0:\n i += 1\n\n res = []\n # 对每个primitive进行剥皮\n for k, primitive in primitives.items():\n len_2 = len(primitive) // 2\n if len_2 == 1:\n res.append('')\n continue\n i = 0\n while primitive[i + 1] != ')':\n if primitive[i] == '(' and primitive[2 * len_2 - i - 1] == ')':\n i += 1\n else:\n break\n res.extend(primitive[i:2 * len_2 - i])\n return ''.join(res)\n\n def removeOuterParentheses2(self, S: str) -> str:\n stack = []\n primitives = {}\n i = 0\n # 找到primitives\n for c in S:\n tmp = primitives.get(i, [])\n tmp.append(c)\n primitives[i] = tmp\n if c == '(':\n stack.append(c)\n else:\n stack.pop()\n # 如果当前stack空了,那么说明找到了一个primitive\n # 如果字符串还没有遍历完,那么就继续找下一个primitive\n if len(stack) == 0:\n i += 1\n\n res = []\n # 对每个primitive进行剥皮:只剥一层\n for k, primitive in primitives.items():\n if primitive[0] == '(' and primitive[-1] == ')':\n res.extend(primitive[1:len(primitive) - 1])\n return ''.join(res)\n\n def removeOuterParentheses3(self, S: str) -> str:\n stack = []\n primitives = {}\n i = 0\n res = []\n # 找到primitives\n for c in S:\n tmp = primitives.get(i, [])\n tmp.append(c)\n primitives[i] = tmp\n if c == '(':\n stack.append(c)\n else:\n stack.pop()\n # 如果当前stack空了,那么说明找到了一个primitive\n # 如果字符串还没有遍历完,那么就继续找下一个primitive\n if len(stack) == 0:\n # 对每个primitive进行剥皮:只剥一层\n if primitives[i][0] == '(' and primitives[i][-1] == ')':\n res.extend(primitives[i][1:len(primitives[i]) - 1])\n i += 1\n\n return ''.join(res)\n\n def removeOuterParentheses(self, S: str) -> str:\n stack, res, tmp = [], '', ''\n # 找到primitives\n for c in S:\n tmp += c\n if c == '(':\n stack.append(c)\n else:\n stack.pop()\n # 如果当前stack空了,那么说明找���了一个primitive\n # 如果字符串还没有遍历完,那么就继续找下一个primitive\n if len(stack) == 0:\n # 对每个primitive进行剥皮:只剥一层\n res += tmp[1:-1]\n tmp = ''\n\n return res\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\nres = Solution().removeOuterParentheses('()()')\nprint(res)\n","sub_path":"Week_02/[1021]删除最外层的括号.py","file_name":"[1021]删除最外层的括号.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321692847","text":"import scrapy, logging, os\nimport html2text, datetime, requests, json\nfrom scrapy.crawler import CrawlerProcess\n\nclass QuotesSpider(scrapy.Spider):\n name = \"simon\"\n \n def __init__(self):\n self.default_month_format = {\n \"January\": 1,\n \"February\": 2,\n \"March\": 3,\n \"April\": 4,\n \"May\": 5,\n \"June\": 6,\n \"July\": 7,\n \"August\": 8,\n \"September\": 9,\n \"October\": 10,\n \"November\": 11,\n \"December\": 12\n }\n logging.getLogger('protego').setLevel(logging.WARNING)\n logging.getLogger('scrapy.statscollectors').setLevel(logging.WARNING)\n\n def start_requests(self):\n urls = [\n 'https://ielts-simon.com/'\n ]\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n \n def parse(self, response):\n beta_inner = response.xpath('//*[@id=\"beta\"]')\n general_info = self.extract_general_info(beta_inner)\n list_of_newer_date = []\n \n for i in general_info:\n date_converted = self.date_converter(i['date'])\n if self.a_newer_date(date_converted) == True:\n\n list_of_newer_date.append(date_converted)\n modified_data = self.modified_data(i)\n self.slack_send(modified_data)\n\n self.note_the_newest_day(list_of_newer_date)\n\n def extract_general_info(self, data):\n final_result = []\n date_header = data.xpath(\"//h2[@class='date-header']\")\n for a_date in date_header:\n a_date_text = a_date.xpath(\"text()\").extract()[0]\n entry_parent = a_date.xpath('following-sibling::div')[0]\n entry_content = entry_parent.xpath(\".//div[@class='entry-body']\")\n entry_content_text = html2text.html2text(entry_content[0].extract())\n entry_header = entry_parent.xpath(\".//h3[@class='entry-header']\")\n entry_header_href = entry_header.xpath(\"./a//@href\").getall()[0]\n entry_header_text = entry_header.xpath(\"./a/text()\").getall()[0]\n \n final_result.append(\n {\n 'date': f\"{a_date_text}\",\n 'content': f\"{entry_content_text}\",\n 'link': f\"{entry_header_href}\",\n 'header': f\"{entry_header_text}\"\n }\n )\n return final_result\n\n def a_newer_date(self, data):\n saved_date = open('the_day', 'r').read().strip().split(\"-\")\n #saved_date = os.environ['SIMON_LATEST_DATE'].split(\"-\")\n '''\n data writen in the 'the_day' file is like '2019-11-16'\n '''\n the_day = datetime.datetime(int(saved_date[0]), int(saved_date[1]), int(saved_date[2]))\n if the_day < data: \n return True\n \n def note_the_newest_day(self, data):\n if data != []:\n the_day = data[0]\n for i in data:\n if the_day < i:\n the_day = i\n \n with open(\"the_day\", \"w\") as f:\n f.write(datetime.datetime.strftime(the_day, \"%Y-%m-%d\"))\n #os.environ['SIMON_LATEST_DATE'] = datetime.datetime.strftime(the_day, \"%Y-%m-%d\")\n else:\n pass\n\n def slack_send(self, data):\n slack_url = os.environ['SLACK_WEBHOOK_URL']\n headers = {'Content-type': 'application/json'}\n try:\n act = requests.post(\n slack_url,\n data=json.dumps(data),\n headers=headers)\n except act.exceptions.HTTPError as e:\n print(e)\n\n def modified_data(self, data):\n output = {\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": \"<{link}|{header}> - *{date}*\\n```{content}```\".format(\n link=data['link'],\n header=data['header'],\n date=data['date'],\n content=data['content'])\n }\n }\n ]\n }\n return output\n \n def date_converter(self, data_in_string):\n result = data_in_string.split(\",\")\n year = int(result[2])\n month_name = result[1].split(\" \")[1]\n day_number = int(result[1].split(\" \")[2])\n month_number = int(self.default_month_format[month_name])\n \n return datetime.datetime(year, month_number, day_number)\n\n\nif __name__ == \"__main__\":\n process = CrawlerProcess({\n 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'\n })\n\n process.crawl(QuotesSpider)\n process.start() # the script will block here until the crawling is finished","sub_path":"projects/simon/simon/spiders/simon.py","file_name":"simon.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645102729","text":"'''\nCustom section extension for Python-Markdown\n============================================\n\nWraps the document in logical sections, as implied by h1-h6 headings.\n\nDowngrades h1-h5 headings to h2-h6, and adds a span inside the heading\nelements containing the heading text, for styling purposes.\n\nThe section logic is based on:\nhttps://github.com/jessedhillon/mdx_sections\n'''\n\nimport re\nfrom markdown.util import etree\nfrom markdown import Extension\nfrom markdown.treeprocessors import Treeprocessor\n\n\nclass CustomSectionProcessor(Treeprocessor):\n\tdef process_headings(self, node):\n\t\tpattern = re.compile('^h(\\d)')\n\n\t\tfor child in list(node):\n\t\t\tmatch = pattern.match(child.tag.lower())\n\n\t\t\tif match:\n\t\t\t\tnew_level = min(6, int(match.group(1)) + 1)\n\t\t\t\tchild.tag = 'h' + str(new_level)\n\n\t\t\t\tspan = etree.SubElement(child, 'span')\n\t\t\t\tspan.text = child.text\n\t\t\t\tchild.text = ''\n\n\t\t\telse:\n\t\t\t\tself.process_headings(child)\n\n\tdef process_sections(self, node):\n\t\ts = []\n\t\tpattern = re.compile('^h(\\d)')\n\n\t\tfor child in list(node):\n\t\t\tmatch = pattern.match(child.tag.lower())\n\n\t\t\tif match:\n\t\t\t\tsection = etree.SubElement(node, 'section')\n\n\t\t\t\tsection.append(child)\n\t\t\t\tnode.remove(child)\n\n\t\t\t\tfor key, value in list(child.attrib.items()):\n\t\t\t\t\tsection.set(key, value)\n\t\t\t\t\tdel child.attrib[key]\n\n\t\t\t\tdepth = int(match.group(1))\n\t\t\t\tcontained = False\n\n\t\t\t\twhile s:\n\t\t\t\t\tcontainer, container_depth = s[-1]\n\t\t\t\t\tif depth <= container_depth:\n\t\t\t\t\t\ts.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontained = True\n\t\t\t\t\t\tbreak\n\n\t\t\t\tif contained:\n\t\t\t\t\tcontainer.append(section)\n\t\t\t\t\tnode.remove(section)\n\n\t\t\t\ts.append((section, depth))\n\n\t\t\telse:\n\t\t\t\tif s:\n\t\t\t\t\tcontainer, container_depth = s[-1]\n\t\t\t\t\tcontainer.append(child)\n\t\t\t\t\tnode.remove(child)\n\t\t\t\telse:\n\t\t\t\t\tself.process_sections(child)\n\n\t\tif len(node) > 1 or len(node) == 1 and len(node[0]) > 0:\n\t\t\textra_section = None\n\n\t\t\tfor child in list(node):\n\t\t\t\tif child.tag.lower() == 'p':\n\t\t\t\t\tif extra_section is None:\n\t\t\t\t\t\textra_section = etree.Element('section')\n\t\t\t\t\t\tnode.insert(0, extra_section)\n\n\t\t\t\t\tnode.remove(child)\n\t\t\t\t\textra_section.append(child)\n\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\n\tdef run(self, root):\n\t\tself.process_headings(root)\n\t\tself.process_sections(root)\n\t\treturn root\n\n\nclass CustomSectionExtension(Extension):\n\tdef extendMarkdown(self, md, md_globals):\n\t\text = CustomSectionProcessor(md)\n\t\tmd.treeprocessors.add('customsection', ext, '_end')\n\n\ndef makeExtension(*args, **kwargs):\n\treturn CustomSectionExtension(*args, **kwargs)\n","sub_path":"plugins/markdown_customsection/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"495580229","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 4 22:49:53 2021\n\n@author: Austin Hsu\n\"\"\"\n\nimport io\nimport os\nimport random\nimport torch\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom argparse import Namespace\nfrom PIL import Image\nfrom torchvision.transforms import ToTensor\nfrom fbprophet import Prophet\nfrom .dataset import StockDataset\nfrom .profiler import SimpleProfiler #\nfrom .logger import TensorboardLogger #\n\nclass ProphetStockTrainer:\n \n def __init__(self, hparams: Namespace) -> None:\n self.hparams = hparams\n self.recovery = lambda x, min, max: x*(max-min)+min\n \n # --- random seed ---\n self._setup_seed(seed=self.hparams.seed)\n \n # --- mkdir ---\n os.makedirs(os.path.join(self.hparams.log_path, self.hparams.exp_name), exist_ok=True)\n \n # --- profiler ---\n self.profiler = SimpleProfiler(output_filename=os.path.join(self.hparams.log_path, self.hparams.exp_name, 'profile.txt'))\n \n # --- logger ---\n self.logger = TensorboardLogger(save_dir=os.path.join(self.hparams.log_path, self.hparams.exp_name))\n \n def _setup_seed(self, seed: int) -> None:\n random.seed(seed)\n np.random.seed(seed)\n \n def _plot_predict(self, *figures) -> io._io.BytesIO:\n plt.figure()\n for (fig, label) in figures:\n plt.plot(fig, label=label)\n plt.legend(loc='upper left')\n buf = io.BytesIO()\n plt.savefig(buf, format='jpeg')\n buf.seek(0)\n return buf\n \n def _log_figure(self, fig_buffer: io._io.BytesIO, task_name: str, ma: int=1) -> None:\n image = Image.open(fig_buffer)\n image = ToTensor()(image)\n self.logger.log_image(f'Predicted Close MA({ma}): {task_name}', image, current_step=0)\n \n def fit(self) -> None:\n \n # --- training module ---\n Prophet_Regressor = Prophet()\n \n # --- training setup ---\n with self.profiler.profile('Prepare Train Batch'):\n train_dataset = StockDataset(\n mode='Train',\n skip_preprocess=os.path.exists(os.path.join(self.hparams.stat_dir, f\"{self.hparams.data_name}_stats.ckpt\")),\n save_type='prophet',\n time_step=self.hparams.time_step,\n moving_average=self.hparams.moving_average,\n train_percent=1.,\n data_dir=self.hparams.data_dir,\n data_name=self.hparams.data_name,\n stat_dir=self.hparams.stat_dir,\n )\n valid_dataset = StockDataset(\n mode='Valid',\n skip_preprocess=True,\n save_type='prophet',\n time_step=self.hparams.time_step,\n moving_average=self.hparams.moving_average,\n train_percent=1.,\n data_dir=self.hparams.data_dir,\n data_name=self.hparams.data_name,\n stat_dir=self.hparams.stat_dir,\n )\n train_data = np.concatenate((train_dataset.data, valid_dataset.data))\n train_label = np.concatenate((train_dataset.label, valid_dataset.label))\n train_index = np.concatenate((train_dataset.data_index, valid_dataset.data_index))\n train_noise = train_label-train_data # pred_data = train_data (close of last day) + trained_noise\n train_df = pd.DataFrame({\n 'ds': train_index,\n 'y': train_noise,\n })\n \n # --- trainer fit ---\n with self.profiler.profile('Fit Model'):\n Prophet_Regressor.fit(train_df)\n \n # --- test ---\n with self.profiler.profile('Test Loop'):\n avg_test_loss = self.test(Prophet_Regressor)\n self.test_loss = avg_test_loss\n \n # --- checkpoint ---\n with self.profiler.profile('Save Model'):\n print(\"Checkpointing model...\")\n checkpoint_name = f\"model_{self.hparams.method}_{self.hparams.data_name}.ckpt\"\n checkpoint = {\n 'model': Prophet_Regressor,\n 'method': self.hparams.method,\n 'firm': self.hparams.data_name,\n 'moving_average': self.hparams.moving_average,\n 'train_loss': 0,\n 'valid_loss': 0,\n 'test_loss': avg_test_loss,\n 'epoch': 0,\n }\n torch.save(checkpoint, os.path.join(self.hparams.checkpoint_path, checkpoint_name))\n \n # --- checkpoint done ---\n print(\"Checkpoint completed.\")\n\n # --- log hparams ---\n model_hparams = {'method': self.hparams.method, 'firm': self.hparams.data_name, 'moving_average': self.hparams.moving_average}\n self.logger.log_hparams(hparam_dict=model_hparams, metric_dict={i:checkpoint[i] for i in checkpoint.keys() if 'loss' in i})\n \n # --- close logger ---\n self.logger.close()\n \n # --- Profiler Summarization ---\n self.profiler.describe()\n \n return\n \n def test(self, regressor=None):\n \n # --- testing module ---\n if regressor is None:\n # --- call from checkpoint ---\n checkpoint_name = f\"model_{self.hparams.method}_{self.hparams.data_name}.ckpt\"\n checkpoint_dir = os.path.join(self.hparams.checkpoint_path, checkpoint_name)\n print(f\"Using checkpointed model from {checkpoint_dir}\")\n checkpoint = torch.load(checkpoint_dir)\n regressor = checkpoint['model']\n \n # --- testing setup ---\n test_dataset = StockDataset(\n mode='Test',\n skip_preprocess=os.path.exists(os.path.join(self.hparams.stat_dir, f\"{self.hparams.data_name}_stats.ckpt\")),\n save_type='prophet',\n time_step=self.hparams.time_step,\n moving_average=self.hparams.moving_average,\n data_dir=self.hparams.data_dir,\n data_name=self.hparams.data_name,\n stat_dir=self.hparams.stat_dir,\n )\n test_data = test_dataset.data\n test_label = test_dataset.label\n test_index = test_dataset.data_index\n test_len = test_label.shape[0]\n test_noise = test_label-test_data # pred_data = train_data (close of last day) + trained_noise\n \n # --- test ---\n future = pd.DataFrame({'ds': np.concatenate((regressor.history_dates, test_index))})\n pred_noise = regressor.predict(future)[-test_len:].yhat.values\n \n # --- get loss ---\n avg_test_loss = np.mean((test_noise-pred_noise)**2)\n \n return avg_test_loss\n \n def predict(self, regressor=None, task_name=\"default\") -> None:\n # --- testing module ---\n if regressor is None:\n # --- call from checkpoint ---\n checkpoint_name = f\"model_{self.hparams.method}_{self.hparams.data_name}.ckpt\"\n checkpoint_dir = os.path.join(self.hparams.checkpoint_path, checkpoint_name)\n print(f\"Using checkpointed model from {checkpoint_dir}\")\n checkpoint = torch.load(checkpoint_dir)\n regressor = checkpoint['model']\n \n # --- testing setup ---\n test_dataset = StockDataset(\n mode='Test',\n skip_preprocess=os.path.exists(os.path.join(self.hparams.stat_dir, f\"{self.hparams.data_name}_stats.ckpt\")),\n save_type='prophet',\n time_step=self.hparams.time_step,\n moving_average=self.hparams.moving_average,\n data_dir=self.hparams.data_dir,\n data_name=self.hparams.data_name,\n stat_dir=self.hparams.stat_dir,\n )\n label_stats = test_dataset.label_stat\n test_data = test_dataset.data\n test_label = test_dataset.label\n test_index = test_dataset.data_index\n test_len = test_label.shape[0]\n test_noise = test_label-test_data # pred_data = train_data (close of last day) + trained_noise\n \n # --- test ---\n future = pd.DataFrame({'ds': np.concatenate((regressor.history_dates, test_index))})\n pred_noise = regressor.predict(future)[-test_len:].yhat.values\n\n # --- get loss ---\n avg_test_loss = np.mean((test_noise-pred_noise)**2)\n \n # --- noise to label ---\n pred_label = pred_noise + test_data\n \n # --- recovery ---\n pred_label = self.recovery(pred_label, **label_stats)\n test_label = self.recovery(test_label, **label_stats)\n \n # --- result ---\n tqdm_info = f\"loss={avg_test_loss:8.6f}\"\n print(f\"Test result: {tqdm_info}\")\n \n # --- plot prediction ---\n target_list = (test_label, 'real')\n pred_list = (pred_label, 'predict')\n image_buffer = self._plot_predict(target_list, pred_list)\n self._log_figure(fig_buffer=image_buffer, task_name=task_name+f\" (loss={avg_test_loss})\", ma=self.hparams.moving_average)\n \n # --- close logger ---\n self.logger.close()\n \n return","sub_path":"src/prophet_trainer.py","file_name":"prophet_trainer.py","file_ext":"py","file_size_in_byte":9127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"127875037","text":"#!/usr/bin/python3\n'''This script creates an hdf5 file from three text files\nthat are interpreted to be the train, validation, and test\nsplits. A data entry is considered to be a whole line (sentence)\n\nTrain is mandatory (first positional argument),\nthe other two (-v -t) are optional.\n'''\n\ndef parse_arguments():\n '''Parse arguments\n '''\n import argparse\n parser = argparse.ArgumentParser(\n description = 'Turn a text-based dataset to an hdf5 file')\n\n parser.add_argument('train',\n help = 'Train file')\n\n parser.add_argument('-v', '--validation',\n default = None,\n help = 'Validation file')\n\n parser.add_argument('-t', '--test',\n default = None,\n help = 'Test file')\n\n parser.add_argument('--force',\n action = 'store_true')\n\n parser.add_argument('-o',\n default = None,\n type = str,\n help = 'Output path (default = train.h5')\n\n parser.add_argument('-l',\n default = -1,\n type = int,\n help = 'Sequence length (default = mean + std + sqrt(std))')\n\n return parser.parse_args()\n\ndef get_len_stats(trainfile):\n '''Returns mu + std + sqrt(std)\n from the character sequence length of the lines of a file\n '''\n import numpy as np\n l = []\n with open(trainfile, 'r') as f:\n for line in f:\n l.append(len(line))\n mu = np.mean(l)\n dev = np.std(l)\n return int(mu + dev + np.sqrt(dev))\n\ndef refine_opts(opts):\n '''Given user options, refine them\n '''\n if opts.o is None:\n opts.o = '%s.h5' % opts.train\n\n if opts.l < 0:\n opts.l = get_len_stats(opts.train)\n\ndef add_alphabet(alphabet, filename):\n '''Adds the symbols from the file in filename\n to the alphabet, it also gives an index to new,\n unseen symbols\n '''\n for line in open(filename, 'r'):\n for char in line:\n if not char in alphabet:\n # Who needs index trackers, heh\n alphabet[char] = len(alphabet)\n\ndef create_dataset(train, validation, test, dest, max_len):\n '''Given a train and (optionally) a validation and a test file,\n creates and stores an h5 dataset on the specified destination.\n Sequences are truncated at max_len characters'''\n splits = {\n 'train' : train,\n 'validation' : validation,\n 'test' : test\n }\n alphabet = {}\n # Create the alphabet with just the train split.\n # This will simulate more realistic situations where strange\n # unknown symbols may appear on live inputs\n add_alphabet(alphabet, train)\n # Add fictional tokens for begin, unknown, and end\n\n # Begin\n assert '<' not in alphabet.keys(), 'Begin token is taken'\n alphabet['<'] = len(alphabet)\n\n # End\n # Not actually needed in our datasets, as the final period (.)\n # is a \"natural\" end token!\n assert '>' not in alphabet.keys(), 'End token is taken'\n alphabet['>'] = len(alphabet)\n\n # Unknown\n assert ',' not in alphabet.keys(), 'Unknown token is taken'\n alphabet[','] = len(alphabet)\n\n s = len(alphabet)\n\n # Save the reverse alphabet, too\n\n reverse_alphabet = {}\n for (k, v) in alphabet.items():\n reverse_alphabet[v] = k\n\n # Create the h5py file\n import h5py\n f = h5py.File(dest, 'w')\n # Save the alphabet map\n import json\n import numpy as np\n f['alphabet'] = json.dumps(alphabet)\n f['reverse_alphabet'] = json.dumps(reverse_alphabet)\n # Create a dataset per split\n datasets = f.create_group('datasets')\n for (name, filename) in splits.items():\n print(splits)\n current_dataset = datasets.create_group(name)\n # Avoid non-existing datasets\n if filename is None: continue\n # Create both the example and the answer\n examples, answers = [], []\n # for each dataset\n with open(filename, 'r') as r:\n # for each sentence\n for example_sentence in r:\n example_sentence = '<' + example_sentence\n matrix = np.zeros((max_len, s), dtype = np.uint8)\n for i in range(min(len(example_sentence), max_len)):\n char = example_sentence[i]\n matrix[i, alphabet[char] if char in alphabet.keys() else alphabet[','] ] = 1\n examples.append(matrix)\n answers.append(np.roll(matrix, -1, 0))\n answers[-1][-1][:] = 0\n current_dataset['examples'] = np.array(examples)\n current_dataset['answers'] = np.array(answers)\n f.close()\n\n\ndef main():\n '''Create an hdf5 dataset from text\n '''\n opts = parse_arguments()\n refine_opts(opts)\n import os\n if opts.force or not os.path.exists(opts.o):\n create_dataset(opts.train, opts.validation, opts.test, opts.o, opts.l)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"create_hdf5_dataset.py","file_name":"create_hdf5_dataset.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"567625958","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 24 12:38:03 2018\n\n@author: mbergemann@unimelb.edu.au\n\"\"\"\n\n\nimport matplotlib\nfrom tint import Cell_tracks, animate\nimport os, pandas as pd\nfrom itertools import groupby\nimport numpy as np\nfrom netCDF4 import Dataset as nc, num2date, date2num\nfrom datetime import datetime, timedelta\nfrom tint.helpers import get_times, get_grids\nfrom tint.visualization import embed_mp4_as_gif, plot_traj\ndataF = os.path.join(os.path.abspath('.'),'data','CPOL_radar.nc') #NetCDF data file\ntrackdir = os.path.join(os.path.abspath('.'),'tracks') #Output directory\noverwrite = True #Overwirte existing old files\nfirst = '2006-11-16 03:00' #Start-date\nlast = '2006-11-17 15:00' #End-date\n\nf = nc('/home/unimelb.edu.au/mbergemann/Data/Darwin/netcdf/Cmorph_1998-2010.nc')\nlats = f.variables['lat'][:]\nlons = f.variables['lon'][:]\nslices = get_times(f.variables['time'], first, last, None)\nx = lons[int(len(lons)/2)]\ny = lats[int(len(lats)/2)]\ns = slices[0]\ngr = (i for i in get_grids(f, s, lons, lats, varname='precip'))\nanim = (i for i in get_grids(f, s, lons, lats, varname='precip'))\nstart = num2date(f.variables['time'][s[0]],\n f.variables['time'].units)\nend = num2date(f.variables['time'][s[-1]],\n f.variables['time'].units)\nsuffix = '%s-%s'%(start.strftime('%Y_%m_%d_%H'), end.strftime('%Y_%m_%d_%H'))\ntracks_obj = Cell_tracks()\ntracks_obj.params['MIN_SIZE'] = 4\ntracks_obj.params['FIELD_THRESH'] = 1\ntracks_obj.params['ISO_THRESH'] = 2\ntracks_obj.params['ISO_SMOOTH'] = 2\ntracks_obj.params['SEARCH_MARGIN'] = 750\ntracks_obj.params['FLOW_MARGIN'] = 1550\ntracks_obj.params['MAX_DISPARITY'] = 999\ntracks_obj.params['MAX_FLOW_MAG ']= 50\ntracks_obj.params['MAX_SHIFT_DISP'] = 15\ntracks_obj.params['GS_ALT'] = 1500\ntrack_file = os.path.join(trackdir,'cpol_tracks_%s.pkl'%suffix)\nncells = tracks_obj.get_tracks(gr, (x,y))\nanimate(tracks_obj, anim, os.path.join(trackdir,'ani', 'cmporph_tracks_%s.mp4'%suffix),\n overwrite=overwrite, dt=9.5, tracers=True, basemap_res='f')\nf.close()\nembed_mp4_as_gif(os.path.join(trackdir,'ani', 'cmporph_tracks_%s.mp4'%suffix))\nax = plot_traj(tracks_obj.tracks, lons, lats, basemap_res='f', label=True, mintrace=2)","sub_path":"examples/SatteliteTrack.py","file_name":"SatteliteTrack.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"621748895","text":"import torch\nfrom torch.utils.data import DataLoader\nimport torch.backends.cudnn as cudnn\n\nimport numpy as np\nimport time\nimport sys\nimport os\nimport math\nimport tqdm\nimport timeit\nfrom datetime import datetime\nimport dateutil.tz\n\nfrom catr.models import utils, caption\nfrom catr.datasets import coco\nfrom catr.cfg_damsm_bert import Config\n# from catr.engine import train_one_epoch, evaluate\n\nfrom torch.utils.tensorboard import SummaryWriter\n\n\ndef train_one_epoch(model, criterion, data_loader,\n optimizer, device, epoch, max_norm, tbw):\n model.train()\n model.backbone[0].eval() # keep damsm cnn encoder freeze\n criterion.train()\n\n epoch_loss = 0.0\n total = len(data_loader)\n cnt = 0\n\n with tqdm.tqdm(total=total) as pbar:\n for images, masks, caps, cap_masks in data_loader:\n samples = utils.NestedTensor(images, masks).to(device)\n caps = caps.to(device)\n cap_masks = cap_masks.to(device)\n\n outputs = model(samples, caps[:, :-1], cap_masks[:, :-1])\n loss = criterion(outputs.permute(0, 2, 1), caps[:, 1:])\n loss_value = loss.item()\n epoch_loss += loss_value\n cnt += 1\n\n if not math.isfinite(loss_value):\n print(f'Loss is {loss_value}, stopping training')\n sys.exit(1)\n\n optimizer.zero_grad()\n loss.backward()\n if max_norm > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)\n optimizer.step()\n\n pbar.update(1)\n pbar.set_description('Train Loss (run_avg): %.6f, batch loss: %.6f' \n % (epoch_loss / cnt, loss_value))\n tbw.add_scalar('Train/total_loss_iter', loss_value, cnt + total * epoch)\n \n# if cnt == 50:\n# break\n\n return epoch_loss / total\n\n\n@torch.no_grad()\ndef evaluate(model, criterion, data_loader, device, epoch, tbw):\n model.eval()\n criterion.eval()\n\n validation_loss = 0.0\n total = len(data_loader)\n cnt = 0\n\n with tqdm.tqdm(total=total) as pbar:\n for images, masks, caps, cap_masks in data_loader:\n samples = utils.NestedTensor(images, masks).to(device)\n caps = caps.to(device)\n cap_masks = cap_masks.to(device)\n\n outputs = model(samples, caps[:, :-1], cap_masks[:, :-1])\n loss = criterion(outputs.permute(0, 2, 1), caps[:, 1:])\n\n validation_loss += loss.item()\n cnt += 1\n\n pbar.update(1)\n pbar.set_description('Validation loss (run_avg): %.6f, batch loss: %.6f' \n % (validation_loss / cnt, loss.item()))\n# tbw.add_scalar('Val/total_loss_iter', loss.item(), cnt + total * epoch)\n\n# if cnt == 200:\n# break\n \n return validation_loss / total\n\n\ndef main(config):\n device = torch.device(config.device)\n cudnn.benchmark = True\n print(f'Initializing Device: {device}')\n\n seed = config.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n model, criterion = caption.build_model_v2(config)\n model.to(device)\n print(\"Initializing from Checkpoint V3...\")\n checkv3 = torch.load('catr/checkpoint_v3.pth', map_location='cpu')\n model.load_state_dict(checkv3['model'], strict=False)\n\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f\"Number of params: {n_parameters}\")\n\n param_dicts = [\n {\"params\": [p for n, p in model.named_parameters(\n ) if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": config.lr_backbone,\n },\n ]\n optimizer = torch.optim.AdamW(\n param_dicts, lr=config.lr, weight_decay=config.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, config.lr_drop)\n\n dataset_train = coco.build_dataset14(config, mode='training')\n dataset_val = coco.build_dataset14(config, mode='validation')\n print(f\"Train: {len(dataset_train)}\")\n print(f\"Valid: {len(dataset_val)}\")\n\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n\n batch_sampler_train = torch.utils.data.BatchSampler(\n sampler_train, config.batch_size, drop_last=True\n )\n\n data_loader_train = DataLoader(\n dataset_train, batch_sampler=batch_sampler_train, num_workers=config.num_workers)\n data_loader_val = DataLoader(dataset_val, config.batch_size,\n sampler=sampler_val, drop_last=False, num_workers=config.num_workers)\n\n if os.path.exists(config.checkpoint):\n print(\"Loading Checkpoint...\")\n checkpoint = torch.load(config.checkpoint, map_location='cpu')\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n config.start_epoch = checkpoint['epoch'] + 1\n \n now = datetime.now(dateutil.tz.tzlocal())\n timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')\n tb_dir = 'catr/tensorboard/{0}_{1}_{2}'.format('coco%s' % config.data_ver, config.prefix, timestamp)\n tbw = SummaryWriter(log_dir=tb_dir) # Tensorboard logging\n \n print(\"Start Training..\")\n for epoch in range(config.start_epoch, config.epochs):\n \n# print('\\n=>Validation on COCO%s' % config.data_ver)\n# start_time = timeit.default_timer()\n# # validation\n# validation_loss = evaluate(model, criterion, data_loader_val, device, epoch, tbw)\n# tbw.add_scalar('Val/total_loss_epoch', validation_loss, epoch)\n# stop_time = timeit.default_timer()\n# print('[Epoch: %d, Val Loss: %.6f, Execution time: %.2f]' \n# % (epoch, validation_loss, (stop_time - start_time) / 60))\n \n print('\\n=>Epoches %i, learning rate = %.7f' % (epoch, optimizer.param_groups[0]['lr']))\n start_time = timeit.default_timer()\n # training\n epoch_loss = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch, config.clip_max_norm, tbw)\n lr_scheduler.step()\n tbw.add_scalar('Train/total_loss_epoch', epoch_loss, epoch)\n stop_time = timeit.default_timer()\n print('[Epoch: %d, Train Loss: %.6f, Execution time: %.2f]' \n % (epoch, epoch_loss, (stop_time - start_time) / 60))\n\n print('\\n=>Validation on COCO%s' % config.data_ver)\n start_time = timeit.default_timer()\n # validation\n validation_loss = evaluate(model, criterion, data_loader_val, device, epoch, tbw)\n tbw.add_scalar('Val/total_loss_epoch', validation_loss, epoch)\n stop_time = timeit.default_timer()\n print('[Epoch: %d, Val Loss: %.6f, Execution time: %.2f]' \n % (epoch, validation_loss, (stop_time - start_time) / 60))\n \n # save checkpoint every epoch\n# if epoch % 5 == 4:\n if True:\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n }, 'catr/checkpoints/' + config.prefix + '_coco%s_ep%02d.pth' % (config.data_ver, epoch))\n\n print()\n\n\nif __name__ == \"__main__\":\n config = Config()\n main(config)\n","sub_path":"code/.ipynb_checkpoints/catr_main_v2-checkpoint.py","file_name":"catr_main_v2-checkpoint.py","file_ext":"py","file_size_in_byte":7564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"562882623","text":"\"\"\"\nUsed APIs:\n https://openweathermap.org/api\n https://docs.openexchangerates.org\n\"\"\"\n\n# Import for the new library.\nimport feedparser\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\n# We'll use this lib for JSON parsing.\nimport json\n# We'll use this lib to correctly encode URL parameters.\nimport urllib\n# We'll use this lib to download the data from the web.\nimport urllib.request as urllib2\nimport urllib.parse\nimport datetime\n# We'll user this lib to work with cookies (now we can add additional parameters to response).\nfrom flask import make_response\n\napp = Flask(__name__)\n\n# Build a Python dict with all our RSS feeds.\nRSS_FEEDS = {'habr': 'https://habrahabr.ru/rss/hubs/all/',\n 'cnn': 'http://rss.cnn.com/rss/edition.rss',\n 'fox': 'http://feeds.foxnews.com/foxnews/latest',\n 'rbk': 'feed://static.feed.rbc.ru/rbc/logical/footer/news.rss',\n 'lenta': 'feed:https://lenta.ru/rss/news',\n 'vc': 'feed:https://vc.ru/rss/all'}\n\nDEFAULTS = {'publication': 'habr',\n 'city': 'Saint Petersburg, RU',\n 'currency_from': 'USD',\n 'currency_to': 'RUB'}\n\nWEATHER_API_URL = \"http://api.openweathermap.org/data/2.5/\" \\\n \"find?q={}&units=metric&appid=0cce66ee8aae26d43e25af39e9d1c133\"\n\nCURRENCY_URL = \"https://openexchangerates.org/api/latest.json?app_id=55331f91a49a4b79a6453fa5188fcb3b\"\n\n\n@app.route(\"/\")\ndef home():\n # Get customized headlines, based on user input or from cookies or defaults.\n publication = get_value_with_fallback('publication')\n articles = get_news(publication)\n # Get customized weather, based on user input or from cookies or defaults.\n city = get_value_with_fallback('city')\n weather = get_weather(city)\n # Get currency rate, based on user input or from cookies or defaults.\n currency_from = get_value_with_fallback('currency_from')\n currency_to = get_value_with_fallback('currency_to')\n # Ask for tuple with all data about currencies.\n rate, currencies = get_rate(currency_from, currency_to)\n # Prepare our response object.\n response = make_response(render_template('home.html',\n articles=articles,\n weather=weather,\n currency_from=currency_from,\n currency_to=currency_to,\n rate=rate,\n currencies=sorted(currencies)))\n # Add some cookie data to our response.\n expires = datetime.datetime.now() + datetime.timedelta(days=365)\n response.set_cookie(\"publication\", publication, expires=expires)\n response.set_cookie(\"city\", city, expires=expires)\n response.set_cookie(\"currency_from\", currency_from, expires=expires)\n response.set_cookie(\"currency_to\", currency_to, expires=expires)\n return response\n\n\ndef get_news(query):\n if not query or query.lower() not in RSS_FEEDS:\n publication = DEFAULTS['publication']\n else:\n publication = query.lower()\n # Parse the feed. Function will download the feed, parses it and returns a Python dictionary.\n feed = feedparser.parse(RSS_FEEDS[publication])\n return feed['entries']\n\n\n# Get weather report.\ndef get_weather(query):\n # URLs can not have spaces. This fun handles this for us (it substitute spaces for \"%20\").\n query = urllib.parse.quote(query)\n url = WEATHER_API_URL.format(query)\n # Load the data over HTTP into a Python string.\n json_data = urllib2.urlopen(url).read()\n # Converting the JSON string that we downloaded into a Python dictionary.\n parsed = json.loads(json_data)\n weather = None\n if parsed.get(\"list\"):\n weather = {\"temperature\": parsed[\"list\"][0][\"main\"][\"temp\"],\n \"city\": parsed[\"list\"][0][\"name\"],\n \"description\": parsed[\"list\"][0][\"weather\"][0][\"description\"],\n \"country\": parsed[\"list\"][0]['sys']['country']}\n return weather\n\n\n# Get current currency.\ndef get_rate(currency_from, currency_to):\n all_currency_json = urllib2.urlopen(CURRENCY_URL).read()\n parsed = json.loads(all_currency_json).get('rates')\n frm_rate = parsed.get(currency_from.upper())\n to_rate = parsed.get(currency_to.upper())\n # return tuple\n return to_rate / frm_rate, parsed.keys()\n\n\n# Get data from user input or cookies or default values.\ndef get_value_with_fallback(key):\n # Case when user input some data to choose:\n if request.args.get(key):\n return request.args.get(key)\n # Case when user don't made any input. We are trying to get default values from cookies:\n if request.cookies.get(key):\n return request.cookies.get(key)\n # Case when user don't made any input and we have empty cookies. We are using default hardcoded values:\n return DEFAULTS[key]\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n","sub_path":"headlines.py","file_name":"headlines.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610907851","text":"'''\n1. 2\n2. 2,,\n3. 6\n4. 19\n5. 584\n6. 358\n7. 99\n8. 153\n9. 89\n10. 166\n11. 224\n12. 97\n13. 4\n14. 147\n'''\n# # import sys\n# # sys.stdin = open('tc13.txt')\n# # sys.setrecursionlimit(100000)\n\n# from collections import deque\n\n# dx = [-1, 1, 0, 0]\n# dy = [0, 0, -1, 1]\n\n# def isNotWall(x, y):\n# return (0 <= x < R) and (0 <= y < C)\n\n# def bfs(t):\n# global day\n# # print(t)\n# # if meltT < t:\n# # return\n# while swan1:\n# x, y = swan1.popleft()\n# # print(x, y)\n# visited[x][y] = 1\n# for n in range(4):\n# if isNotWall(x+dx[n], y+dy[n]):\n# if visited[x+dx[n]][y+dy[n]] == 2:\n# day = t\n# return\n# if visited[x+dx[n]][y+dy[n]] == 0 and lake[x+dx[n]][y+dy[n]] <= t:\n# swan1.append((x+dx[n], y+dy[n]))\n# s1.append((x+dx[n], y+dy[n]))\n# # for i in range(R):\n# # print(*visited[i])\n# # print()\n# # print('!!!!!!!!!!!!!!!!!!!!!!')\n# while swan2:\n# x, y = swan2.popleft()\n# visited[x][y] = 2\n# for n in range(4):\n# if isNotWall(x+dx[n], y+dy[n]):\n# if visited[x+dx[n]][y+dy[n]] == 1:\n# day = t\n# return\n# if visited[x+dx[n]][y+dy[n]] == 0 and lake[x+dx[n]][y+dy[n]] <= t:\n# swan2.append((x+dx[n], y+dy[n]))\n# s2.append((x+dx[n], y+dy[n]))\n# # for i in range(R):\n# # print(*visited[i])\n# # print()\n# swan1.extend(s1)\n# swan2.extend(s2)\n# bfs(t+1)\n\n# q = deque()\n# R, C = map(int, input().split())\n# lake = [list(input()) for _ in range(R)]\n# swan1 = deque()\n# swan2 = deque()\n# s1 = []\n# s2 = []\n# visited = [[0 for _ in range(C)] for _ in range(R)]\n\n# for x in range(R):\n# for y in range(C):\n# if lake[x][y] == '.':\n# lake[x][y] = 0\n# q.append((x, y))\n# elif lake[x][y] == 'L':\n# q.append((x, y))\n# lake[x][y] = 0\n# if swan1:\n# swan2.append((x, y))\n# else:\n# swan1.append((x, y))\n# visited[x][y] = 0\n\n# while q:\n# x, y = q.popleft()\n# for n in range(4):\n# if isNotWall(x+dx[n], y+dy[n]):\n# if lake[x+dx[n]][y+dy[n]] == 'X':\n# lake[x+dx[n]][y+dy[n]] = lake[x][y] + 1\n# q.append((x+dx[n], y+dy[n]))\n# # for i in range(R):\n# # print(*lake[i])\n# # print()\n# # meltT = lake[x][y]\n# day = 0\n# bfs(0)\n# # print(day)\n# # print(meltT)\n# # print(swan1, swan2)\n\n\n\n\n'''\n- 얼음 녹이기(while)\n1. '.' 물공간('L'포함)을 0 으로 바꾸고 queue에 저장, \n 'L'의 위치를 따로 저장, swan에 백조 위치 넣고 visited에 그 위치를 0\n2. popleft로 사방을 확인하여 'X'이면 현재+1 하고 queue에 저장\n- 백조 이동시키기(bfs)\n3. queue가 비면 bfs \n4. popleft로 사방을 확인하여 lake의 숫자가 현재+1보다 작으면 visited에 현재+1\n5. 사방 visited 숫자가 현재+1과 같으면 result에 저장후 종료\n'''\n\nfrom collections import deque\nimport sys\n\ninput = sys.stdin.readline\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n\ndef bfs():\n while q:\n x, y = q.popleft()\n if x == x2 and y == y2:\n return 1\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < R and 0 <= ny < C:\n if not c[nx][ny]:\n if lake[nx][ny] == '.':\n q.append([nx, ny])\n else:\n q_temp.append([nx, ny])\n c[nx][ny] = 1\n return 0\n\ndef melt():\n while water:\n x, y = water.popleft()\n if lake[x][y] == 'X':\n lake[x][y] = '.'\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < R and 0 <= ny < C:\n if not wc[nx][ny]:\n if lake[nx][ny] == 'X':\n wq_temp.append([nx, ny])\n else:\n water.append([nx, ny])\n wc[nx][ny] = 1\n\nR, C = map(int, input().split())\nlake = []\nfor _ in range(R):\n lake.append(list(input()))\n\nc = [[0] * C for _ in range(R)]\nwc = [[0] * C for _ in range(R)]\n\n\nswan = []\nq, q_temp, water, wq_temp = deque(), deque(), deque(), deque()\n\n\nfor i in range(R):\n for j in range(C):\n if lake[i][j] == 'L':\n swan.extend([i, j])\n water.append([i, j])\n elif lake[i][j] == '.':\n wc[i][j] = 1\n water.append([i, j])\n\nx1, y1, x2, y2 = swan\nq.append([x1, y1])\nlake[x1][y1], lake[x2][y2], c[x1][y1] = '.', '.', 1\ncnt = 0\nfor i in range(R):\n print(*wc[i])\nwhile True:\n melt()\n if bfs():\n print(cnt)\n break\n q, water = q_temp, wq_temp\n q_temp, wq_temp = deque(), deque()\n cnt += 1\n","sub_path":"boj_study/3197_boj.py","file_name":"3197_boj.py","file_ext":"py","file_size_in_byte":4960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"480991987","text":"## principal functions and objects file\n\n# clear sections are shown in comments\n# go to docstrings for function purpose and arguments\n\nimport pandas as pd\nimport numpy as np\n\n# import matplotlib.pyplot as plt\n# import seaborn as sns\n\nimport argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\nimport re\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport csv\n\n\n\n\n#################################CLEANING#####################################\n\n\n#################################ETL#####################################\nclass EntityETL(object):\n \"\"\"Class for extracting, transforming and loading the prepped text data \n for Named Entity Recognition (NER) - made to interface with NER model\n in training, prediction and out-of-sample use. \n\n Args:\n embedding file (csv filepath): if you have a file with pretrained \n embeddings (e.g. Glove 50 dim), load it here; will be passed to NER \n model\n embedding_dim (int): dimension of embedding, e.g. 50\n lang (str): language of text, default english 'en'\n \n \"\"\"\n def __init__(self, embedding_file = None, embedding_dim = 0, lang = 'en'):\n \"\"\"Instantiating object for extracting, transforming and loading the prepped text data \n for Named Entity Recognition (NER) - made to interface with NER model\n in training, prediction and out-of-sample use. \n\n Args:\n embedding file (csv filepath): if you have a file with pretrained \n embeddings (e.g. Glove 50 dim), load it here; will be passed to NER \n model\n embedding_dim (int): dimension of embedding, e.g. 50\n lang (str): language of text, default english 'en'\n \n \"\"\"\n self.embedding_file = embedding_file\n self.embedding_dim = embedding_dim\n self.lang = lang\n self.vocab = {}\n self.vocab_size = len(self.vocab)\n self.embed_dict={}\n self.batch_starting_point = 0\n\n return\n\n\n\n def load_train_vocab_nn(self, csv_file_path ):\n \"\"\"Method for loading input data for model VOCABULARY for neural model\n\n Args:\n csv_file_path (str): location of csv file\n \"\"\" \n #overwrites vocab as empty dict\n self.vocab = {}\n self.ne_tag_map = {}\n self.pos_tag_map = {}\n\n self.vocab['UNK'] = 1\n self.ne_tag_map['UNK_NE'] = 1\n self.pos_tag_map['UNK_POS'] = 1\n\n self.vocab['PAD'] = 0\n self.ne_tag_map['PAD'] = 0\n self.pos_tag_map['PAD'] = 0\n\n list_ne_tags = []\n list_pos_tags = []\n list_words = []\n with open(csv_file_path) as f:\n csv_reader = csv.reader(f, delimiter=',', quotechar='\"')\n next(csv_reader)\n # for line in f.read().splitlines()[1:]:\n for line in csv_reader:\n # print(line)\n list_words.append(line[2])\n try:\n list_ne_tags.append(line[4])\n list_pos_tags.append(line[3])\n except:\n list_ne_tags.append('O')\n list_pos_tags.append('.')\n print(line)\n continue\n\n set_words = set(list_words)\n set_ne_tags = set(list_ne_tags)\n set_pos_tags = set(list_pos_tags)\n\n for i, word in enumerate(set_words, start=1):\n self.vocab[word] = i\n for i, ne_tag in enumerate(set_ne_tags, start=1):\n self.ne_tag_map[ne_tag] = i\n for i, pos_tag in enumerate(set_pos_tags, start=1):\n self.pos_tag_map[pos_tag] = i\n self.vocab_size = len(self.vocab)\n return \n\n def load_train_input_data(self, csv_file_path):\n \"\"\"Loads the input training data into the object from a file\n for model training purposes. At this point out is still roughly \n compatible with either feature-based or neural model.\n\n Args:\n csv_file_path (str): location of csv file containing training \n data\n split_by (str) : separator to be used for \n\n Returns:\n tuple: train_sentences (list of lists of words, each sublist a sentence);\n train_labels (same format as above)\n \"\"\" \n self.train_sentences = []\n self.train_labels = []\n self.train_pos_tags = []\n with open(csv_file_path) as f:\n csv_reader = csv.reader(f, delimiter=',', quotechar='\"')\n next(csv_reader)\n # for line in f.read().splitlines()[1:]:\n \n s_index='1.0'\n sentence=[]\n sent_labels = []\n sent_pos_tags = []\n for line in csv_reader:\n # print(line)\n \n if s_index != line[1]:\n s_index = line[1]\n \n self.train_sentences.append(sentence)\n self.train_labels.append(sent_labels)\n self.train_pos_tags.append(sent_pos_tags)\n sentence = []\n sent_labels = []\n sent_pos_tags = []\n \n w_index = line[0]\n # s_index = line[1]\n word = line[2]\n pos_tag = line[3]\n label = line[4]\n sentence.append(word)\n sent_labels.append(label)\n sent_pos_tags.append(pos_tag)\n continue\n \n w_index = line[0]\n s_index = line[1]\n word = line[2]\n pos_tag = line[3]\n label = line[4]\n \n \n # s = [self.vocab[token] if token in self.vocab \n # else vocab['UNK'] for token in sentence.split(' ')]\n sentence.append(word)\n sent_labels.append(label)\n sent_pos_tags.append(pos_tag)\n \n return self.train_sentences, self.train_labels\n\n\n def load_embed_vects(self, embedding_file = None, embedding_dim = None):\n \"\"\"Loads and stores embedding vectors. Call function with empty params \n to simply use the prespecified embedding_file and dim set at \n instantiation.\n\n Args:\n embedding file (csv filepath): if you have a file with pretrained \n embeddings (e.g. Glove 50 dim), load it here; will be passed to NER \n model\n embedding_dim (int): dimension of embedding, e.g. 50\n \"\"\" \n if embedding_file!=None:\n self.embedding_file=embedding_file\n if embedding_dim!=None:\n self.embedding_dim=embedding_dim\n\n avg_vect = np.zeros((self.embedding_dim))\n with open(self.embedding_file, 'rb') as f:\n for line in f:\n parts = line.split()\n word = parts[0].decode('utf-8')\n vector = np.array(parts[1:], dtype=np.float32)\n self.embed_dict[word] = vector\n avg_vect += vector\n # creating the vector for new, UNKnown words in the vocabulary\n # NOTE, this is NOT the same as the word \"unk\", which is \n # present in GloVe's vocabulary, for instance\n self.embed_dict['UNK'] = avg_vect/len(self.embed_dict)\n self.embed_dict['PAD'] = np.zeros((self.embedding_dim))\n return\n \n def prep_train_for_nn(self, train_sentences= None, train_labels = None,\n train_pos_tags = None):\n \"\"\"[summary]\n\n Args:\n train_sentences (list): \n train_labels (list): [description]\n train_pos_tags (list)\n Returns:\n tuple : nn_train_sentences, nn_train_pos (optional), nn_train_sentences \n in the format for neural models \n \"\"\" \n if train_sentences!=None:\n self.train_sentences = train_sentences\n if train_labels!=None:\n self.train_labels = train_labels\n if train_pos_tags!=None:\n self.train_pos_tags = train_pos_tags\n\n nn_train_sentences = []\n nn_train_labels = []\n nn_train_pos = []\n\n for sentence in self.train_sentences:\n sent = [self.vocab[token] if token in self.vocab.keys() else vocab['UNK'] \n for token in sentence]\n nn_train_sentences.append(sent)\n\n for label_sent in self.train_labels:\n labels = [self.ne_tag_map[label] if label in self.ne_tag_map else ne_tag_map['UNK_NE'] \n for label in label_sent]\n nn_train_labels.append(labels)\n\n for pos_sent in self.train_pos_tags:\n pos = [self.pos_tag_map[pos] if pos in self.pos_tag_map else self.pos_tag_map['UNK_POS']\n for pos in pos_sent]\n nn_train_pos.append(pos)\n\n if len(nn_train_pos) == 0 :\n return nn_train_sentences, nn_train_labels\n\n return nn_train_sentences, nn_train_pos, nn_train_labels\n\n def nn_train_batch_generator(self, train_sentences_nn, train_labels_nn, train_pos_tags_nn = None, batch_len = 50):\n \"\"\"Performs preprocessing and tokenisation steps for neural model.\n\n Args:\n csv_file_path ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\" \n\n #compute length of longest sentence in batch\n # for b_ind in range(batch_starting_point, batch_starting_point + 50):s\n batch_sentences = train_sentences_nn[self.batch_starting_point:self.batch_starting_point+ batch_len]\n batch_labels = train_labels_nn[self.batch_starting_point:self.batch_starting_point+ batch_len]\n # print(len(batch_sentences[0]))\n # print(len(batch_labels[0]))\n # POS tag implementation stopping here until further notice -- WILL COME BACK TO THIS!\n # batch_pos_tags = train_pos_tags_nn[:self.batch_starting_point+ batch_len]\n while True:\n batch_max_sent_len = max([len(s) for s in batch_sentences])\n\n # BIG QUESTION HERE: WHY WOULD WE ONLY PAD UP TO LENGTH OF MAX SENTENCE IN A SINGLE BATCH??\n # SURELY WE'LL HAVE TO DO THAT FOR THE MAX SENTENCE OF THE ENTIRE TRAINING SET INSTEAD\n # Maybe I'm missing something here, in which case I'll leave it as it is ftm\n # note to self: if fixing, move the np.array sizing outside of the main loop\n batch_data = self.vocab['PAD']*np.ones((len(batch_sentences), batch_max_sent_len))\n batch_ne_tags = self.ne_tag_map['PAD']*np.ones((len(batch_sentences), batch_max_sent_len))\n\n for j in range(len(batch_sentences)):\n cur_len = len(batch_sentences[j])\n batch_data[j][:cur_len] = batch_sentences[j]\n # print(cur_len)\n # print(len(batch_labels[j]))\n \n # print(batch_labels[j])\n batch_ne_tags[j][:cur_len] = batch_labels[j]\n # print(batch_data)\n batch_data, batch_ne_tags = torch.LongTensor(batch_data), torch.LongTensor(batch_ne_tags)\n # batch_data, batch_ne_tags = torch.Tensor(batch_data), torch.LongTensor(batch_ne_tags)\n\n batch_data, batch_ne_tags = Variable(batch_data), Variable(batch_ne_tags)\n\n self.batch_starting_point+=batch_len\n yield batch_data, batch_ne_tags\n\n \n \n\n\n\n#################################DATA TRANSFORMATION#####################################\n\n\ndef load_glove_vects(file = 'glove/glove.6B.50d.txt', vdim=None):\n \"\"\"Function that loads the Global representation Vectors\n and returns them as a dictionary. \n -----------------\n Returns:\n glove_dict - (dict) key - word (str), value - n-dimensional np array \"\"\"\n glove_dict = {}\n# total_vocab = vocab\n if type(vdim)==int:\n file = f'glove/glove.6B.{vdim}d.txt'\n avg_vect = np.zeros((vdim,))\n with open(file, 'rb') as f:\n for line in f:\n parts = line.split()\n word = parts[0].decode('utf-8')\n vector = np.array(parts[1:], dtype=np.float32)\n glove_dict[word] = vector\n avg_vect += vector\n # creating the vector for new, UNKnown words in the vocabulary\n # NOTE, this is NOT the same as the word \"unk\", which is \n # present in glove's vocabulary\n glove_dict['UNK'] = avg_vect/len(glove_dict)\n glove_dict['PAD'] = np.zeros((vdim,))\n return glove_dict\n\ndef generate_tag_set(targets_list : list):\n \"\"\"Function takes in a list of NE tags (which should include at least one instance of \n every possible NE tag) and returns a dict matching each NE tag to a unique int.\n Returns:\n tag_map - (dict) of NE tag - associated int pairs\"\"\"\n ne_dict = {}\n i = 0\n for sublist in targets_list:\n for ne in sublist:\n if ne in ne_dict.keys():\n continue\n else:\n ne_dict[ne] = i\n i += 1\n return ne_dict\n\n\n\ndef sent_to_vect(feature_list : list, targets_list : list, vocab : dict, ne_dict : dict):\n \"\"\"Function takes in list of lists of dictionaries (input data), target NE labels,\n a vocabulary (dictionary) and a dict of NE tags and their corresponding identifiers;\n Returns a list of vectorised input data\"\"\"\n vect_sentences = [] \n vect_label_sentences = []\n \n for sentence in feature_list: \n #replace each token by its index if it is in vocab\n #else use index of UNK\n sentence_vectors = [vocab[token[0]] if token[0] in vocab.keys() \n else vocab['UNK']\n for token in sentence]\n vect_sentences.append(sentence_vectors)\n \n for sentence in targets_list:\n #replace each label by its index\n try:\n label_sent = [ne_dict[label] for label in sentence]\n except:\n generate_tag_set(targets_list)\n label_sent = [ne_dict[label] for label in sentence]\n vect_label_sentences.append(label_sent) \n \n return vect_sentences, vect_label_sentences\n\ndef prep_batch(batch_sentences : list, batch_sentences_labels : list, vocab : dict, word_vect_dim = 50):\n \"\"\"Function takes in a list of lists (each sublist a sentence of n-dimension\n numpy arrays), the associated list of lists of NE labels and a vocabulary (dict)\"\"\"\n #compute length of longest sentence in batch\n batch_max_len = max([len(sentence) for sentence in batch_sentences_labels])\n #prepare a numpy array with the data, initializing the data with 'PAD' \n #and all labels with -1; initializing labels to -1 differentiates tokens \n #with tags from 'PAD' tokens\n #note the dimensional change here as we are effectively about to \n # concatenate the sentences along the 2nd dimension\n batch_data = np.zeros((len(batch_sentences), batch_max_len, word_vect_dim))\n batch_labels = -1*np.ones((len(batch_sentences), batch_max_len))\n #copy the data to the numpy array\n for j in range(len(batch_sentences)):\n #accessing individual sentence below\n cur_len = len(batch_sentences[j])\n \n for k in range(len(batch_sentences[j])):\n #accessing individual word vectors below\n batch_data[j,k, :] = batch_sentences[j][k].reshape(1,-1)\n \n batch_labels[j][:cur_len] = batch_sentences_labels[j]\n\n #since all data are indices, we convert them to torch LongTensors\n batch_data, batch_labels = torch.Tensor(batch_data), torch.Tensor(batch_labels)\n\n #convert Tensors to Variables\n # Torch tensors and torch Variables are almost the same, the latter being a wrapper fn\n # that allows for additional methods to be called onto the underlying tensor. \n # So we're reassigning them as Variables for extra future flexibility\n# batch_data, batch_labels = Variable(batch_data), Variable(batch_labels)\n return batch_data, batch_labels\n \n\n#################################EDA#####################################\n\ndef example_output(num, data, estimator, target_data, word_arg = 'word.lower()'):\n '''Short function that produces a quick inspection table so we can compare how \n our CRF model performs against the labelled sentences. \n Params:\n num - (int) index of sentence in the data\n data - (list) input data that the prediction will be made on. Must be of form of list\n of lists of dicts\n estimator - (object) model to use for prediction - has to be sklearn crf\n target_data - (list) data from which ground truth labels are to be retrieved; has\n to be list of list of strings\n word_arg - (str) which dictionary argument to access to retrieve the original, \n lowercased word - this will depend on how the initial data was defined. \n \n Returns:\n pandas Dataframe with 3 columns: True label; predicted label and the Word\n '''\n predicted_table = pd.DataFrame(columns=['True', 'Pred', 'Word'])\n predicted_table['True'] = target_data[num]\n predicted_table['Word'] = [word[word_arg] for word in data[num]]\n predicted_table['Pred'] = estimator.predict([data[num]])[0]\n return predicted_table\n\n \n\n\n#################################SUMMARY TABLES CREATION#####################################\n\n\n\n#############################MODEL BUILDING, GRIDSEARCH AND PIPELINES#####################################\n\nclass LSTMClassifier(nn.Module):\n \"\"\"\n This is the simplest RNN model we will be using to perform NER.\n \"\"\"\n\n def __init__(self, embedding_dim, hidden_dim, vocab_size):\n \"\"\"\n Initialize the model by settingg up the various layers.\n \"\"\"\n super(LSTMClassifier, self).__init__()\n\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n self.lstm = nn.LSTM(embedding_dim, hidden_dim)\n self.dense = nn.Linear(in_features=hidden_dim, out_features=1)\n self.softm = nn.Softmax()\n \n self.word_dict = None\n\n def forward(self, x):\n \"\"\"\n Perform a forward pass of our model on some input.\n \"\"\"\n x = x.t()\n reviews = x[1:,:]\n embeds = self.embedding(reviews)\n lstm_out, _ = self.lstm(embeds)\n out = self.dense(lstm_out)\n out = out[lengths - 1, range(len(lengths))]\n return self.softm(out.squeeze())\n\n\n#############################MODEL EVALUATION (METZ, ROC CURVE, CONF_MAT)#####################################\n\n","sub_path":"NER_ETL.py","file_name":"NER_ETL.py","file_ext":"py","file_size_in_byte":18676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"301030719","text":"from selenium.webdriver.common.keys import Keys\n\nfrom enter import StartTestLog\nimport unittest, time, re\n\nfrom random_ch import randInt\n\n\"\"\"\nРынок. Биды.\nВ таблице с серым бэкграундом выбираем переменные, заполняем цену, нажимаем сохранить.\nДолжна появитсья запись в таблице внизу, а также на странице Индикаторы - вкладка Биды.\n\"\"\"\n\n\nclass CreatingBidInMarket(StartTestLog):\n def test_creating_bid_in_market(self):\n driver = self.login()\n\n self.assertTrue(driver.title.__eq__('— Грейнтрек'))\n self.assertEqual(driver.title, '— Грейнтрек')\n\n print('test_creating_bid_in_market')\n\n self.click_css(\".prices_tab .fa-area-chart\")\n\n self.click_css(\n \".centered-text:nth-of-type(1) li:nth-of-type(2) > a.btn.btn-default\")\n\n self.click_css(\n \".centered-text:nth-of-type(3) li:nth-of-type(4) > a.btn.btn-default\")\n\n self.click_css(\n \".centered-text > gt-resource-select .text-muted\")\n self.click_css(\"div.active\")\n\n self.click_css(\n \".prices-section-filters > div:nth-of-type(2) gt-resource-select .text-muted\")\n self.click_css(\"div.active\")\n inp2 = str(531)\n self.click_css('div[permission-only=\"\\'add_request\\'\"] .input-group>input.form-control')\n driver.find_element_by_css_selector(\n 'div[permission-only=\"\\'add_request\\'\"] .input-group>input.form-control').send_keys(inp2)\n inp1 = randInt(5)\n r = '{} {}'.format(inp1[:2], inp1[2:])\n driver.find_element_by_css_selector(\n \"#central-home-purchase div:nth-child(6) > input\").clear()\n driver.find_element_by_css_selector(\n \"#central-home-purchase div:nth-child(6) > input\").send_keys(inp1)\n\n self.click_css(\"i.fa.fa-floppy-o\")\n\n time.sleep(5)\n if self.assertTrue(driver.page_source.__contains__(r), msg='Oh My God, selenium cant found bid'):\n False\n else:\n print('Python found bid volume: ' + str(r))\n self.click_css('.prices_tab .fa-file-o')\n time.sleep(3)\n if self.assertTrue(driver.page_source.__contains__(r), msg='Oh My God, selenium cant found bid'):\n False\n else:\n print('Python found indikator volume: ' + str(r))\n self.dell_indicator(driver)\n self.log_out()\n\n def dell_indicator(self, driver):\n self.click_css('tr .fa-cog')\n self.click_css('tr .fa-pencil-square')\n time.sleep(1)\n self.click_css(\".btn-fixed i.fa-trash\")\n driver.switch_to_alert().accept()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"gt_tests/Deals/creating_bid_in_market.py","file_name":"creating_bid_in_market.py","file_ext":"py","file_size_in_byte":2823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"567298819","text":"# devices/md.py\n#\n# Copyright (C) 2009-2014 Red Hat, Inc.\n#\n# This copyrighted material is made available to anyone wishing to use,\n# modify, copy, or redistribute it subject to the terms and conditions of\n# the GNU General Public License v.2, or (at your option) any later version.\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY expressed or implied, including the implied warranties of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details. You should have received a copy of the\n# GNU General Public License along with this program; if not, write to the\n# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the\n# source code or documentation are not subject to the GNU General Public\n# License and may only be used or replicated with the express permission of\n# Red Hat, Inc.\n#\n# Red Hat Author(s): David Lehman \n#\n\nimport os\nimport time\n\nfrom ..devicelibs import lvm\nfrom ..devicelibs import mdraid, raid\n\nfrom .. import errors\nfrom .. import util\nfrom ..flags import flags\nfrom ..storage_log import log_method_call\nfrom .. import udev\nfrom ..i18n import P_\nfrom ..size import Size\n\nimport logging\nlog = logging.getLogger(\"blivet\")\n\nfrom .storage import StorageDevice\nfrom .container import ContainerDevice\n\nclass MDRaidArrayDevice(ContainerDevice):\n \"\"\" An mdraid (Linux RAID) device. \"\"\"\n _type = \"mdarray\"\n _packages = [\"mdadm\"]\n _devDir = \"/dev/md\"\n _formatClassName = property(lambda s: \"mdmember\")\n _formatUUIDAttr = property(lambda s: \"mdUuid\")\n\n def __init__(self, name, level=None, major=None, minor=None, size=None,\n memberDevices=None, totalDevices=None,\n uuid=None, fmt=None, exists=False, metadataVersion=None,\n parents=None, sysfsPath='', chunkSize=None):\n \"\"\"\n :param name: the device name (generally a device node's basename)\n :type name: str\n :keyword exists: does this device exist?\n :type exists: bool\n :keyword size: the device's size\n :type size: :class:`~.size.Size`\n :keyword parents: a list of parent devices\n :type parents: list of :class:`StorageDevice`\n :keyword fmt: this device's formatting\n :type fmt: :class:`~.formats.DeviceFormat` or a subclass of it\n :keyword sysfsPath: sysfs device path\n :type sysfsPath: str\n :keyword chunkSize: chunk size for the device\n :type chunkSize: :class:`~.size.Size`\n :keyword uuid: the device UUID\n :type uuid: str\n\n :keyword level: the device's RAID level\n :type level: any valid RAID level descriptor\n :keyword int memberDevices: the number of active member devices\n :keyword int totalDevices: the total number of member devices\n :keyword metadataVersion: the version of the device's md metadata\n :type metadataVersion: str (eg: \"0.90\")\n :keyword minor: the device minor (obsolete?)\n :type minor: int\n\n .. note::\n\n An instance of this class whose :attr:`exists` attribute is\n True and whose parent/member devices are all partitionable is\n also considered to be partitionable.\n\n .. note::\n\n An instance of this class whose :attr:`exists` attribute is\n True and whose parent/member devices are all disks is also\n treated like a disk.\n\n \"\"\"\n # pylint: disable=unused-argument\n\n # These attributes are used by _addParent, so they must be initialized\n # prior to instantiating the superclass.\n self._memberDevices = 0 # the number of active (non-spare) members\n self._totalDevices = 0 # the total number of members\n\n if level == \"container\":\n self._type = \"mdcontainer\"\n self.level = level\n\n super(MDRaidArrayDevice, self).__init__(name, fmt=fmt, uuid=uuid,\n exists=exists, size=size,\n parents=parents,\n sysfsPath=sysfsPath)\n\n # For new arrays check if we have enough members\n if (not exists and parents and len(parents) < self.level.min_members):\n for dev in self.parents:\n dev.removeChild()\n raise errors.DeviceError(P_(\"A %(raidLevel)s set requires at least %(minMembers)d member\",\n \"A %(raidLevel)s set requires at least %(minMembers)d members\",\n self.level.min_members) % \\\n {\"raidLevel\": self.level, \"minMembers\": self.level.min_members})\n\n self.uuid = uuid\n self._totalDevices = util.numeric_type(totalDevices)\n self.memberDevices = util.numeric_type(memberDevices)\n\n if self.exists:\n self._chunkSize = self.readChunkSize()\n else:\n self._chunkSize = chunkSize or mdraid.MD_CHUNK_SIZE\n\n if not self.exists and not isinstance(metadataVersion, str):\n self.metadataVersion = \"default\"\n else:\n self.metadataVersion = metadataVersion\n\n # For container members probe size now, as we cannot determine it\n # when teared down.\n if self.parents and self.parents[0].type == \"mdcontainer\":\n self._size = self.currentSize\n self._type = \"mdbiosraidarray\"\n\n if self.exists and self.mdadmFormatUUID and not flags.testing:\n # this is a hack to work around mdadm's insistence on giving\n # really high minors to arrays it has no config entry for\n with open(\"/etc/mdadm.conf\", \"a\") as c:\n c.write(\"ARRAY %s UUID=%s\\n\" % (self.path, self.mdadmFormatUUID))\n\n def _verifyMemberFormat(self, member):\n if member.type == \"mdcontainer\":\n return None\n\n return super(MDRaidArrayDevice, self)._verifyMemberFormat(member)\n\n @property\n def mdadmFormatUUID(self):\n \"\"\" This array's UUID, formatted for external use.\n\n :returns: the array's UUID in mdadm format, if available\n :rtype: str or NoneType\n \"\"\"\n formatted_uuid = None\n\n if self.uuid is not None:\n try:\n formatted_uuid = mdraid.mduuid_from_canonical(self.uuid)\n except errors.MDRaidError:\n pass\n\n return formatted_uuid\n\n @property\n def level(self):\n \"\"\" Return the raid level\n\n :returns: raid level value\n :rtype: an object that represents a RAID level\n \"\"\"\n return self._level\n\n @level.setter\n def level(self, value):\n \"\"\" Set the RAID level and enforce restrictions based on it.\n\n :param value: new raid level\n :param type: a valid raid level descriptor\n :returns: None\n \"\"\"\n self._level = mdraid.RAID_levels.raidLevel(value) # pylint: disable=attribute-defined-outside-init\n\n @property\n def chunkSize(self):\n if self.exists and self._chunkSize == Size(0):\n self._chunkSize = self.readChunkSize()\n return self._chunkSize\n\n @chunkSize.setter\n def chunkSize(self, newsize):\n if not isinstance(newsize, Size):\n raise ValueError(\"new chunk size must be of type Size\")\n\n if newsize % Size(\"4 KiB\") != Size(0):\n raise ValueError(\"new chunk size must be multiple of 4 KiB\")\n\n if self.exists:\n raise ValueError(\"cannot set chunk size for an existing device\")\n\n self._chunkSize = newsize\n\n def readChunkSize(self):\n log_method_call(self, exists=self.exists, path=self.path,\n sysfsPath=self.sysfsPath)\n chunkSize = Size(0)\n if self.status:\n chunkSize = Size(util.get_sysfs_attr(self.sysfsPath, \"md/chunk_size\") or \"0\")\n\n return chunkSize\n\n @property\n def createBitmap(self):\n \"\"\" Whether or not a bitmap should be created on the array.\n\n If the the array is sufficiently small, a bitmap yields no benefit.\n\n If the array has no redundancy, a bitmap is just pointless.\n \"\"\"\n return self.level.has_redundancy and self.size >= 1000 and self.format.type != \"swap\"\n\n def getSuperBlockSize(self, raw_array_size):\n \"\"\"Estimate the superblock size for a member of an array,\n given the total available memory for this array and raid level.\n\n :param raw_array_size: total available for this array and level\n :type raw_array_size: :class:`~.size.Size`\n :returns: estimated superblock size\n :rtype: :class:`~.size.Size`\n \"\"\"\n return mdraid.get_raid_superblock_size(raw_array_size,\n version=self.metadataVersion)\n\n @property\n def size(self):\n \"\"\"Returns the actual or estimated size depending on whether or\n not the array exists.\n \"\"\"\n # For container members return probed size, as we cannot determine it\n # when teared down.\n if self.type == \"mdbiosraidarray\":\n return self._size\n\n if not self.exists or not self.mediaPresent:\n try:\n size = self.level.get_size([d.size for d in self.devices],\n self.memberDevices,\n self.chunkSize,\n self.getSuperBlockSize)\n except (errors.MDRaidError, errors.RaidError) as e:\n log.info(\"could not calculate size of device %s for raid level %s: %s\", self.name, self.level, e)\n size = 0\n log.debug(\"non-existent RAID %s size == %s\", self.level, size)\n else:\n size = self.currentSize\n log.debug(\"existing RAID %s size == %s\", self.level, size)\n\n return size\n\n def updateSize(self):\n # container size is determined by the member disks, so there is nothing\n # to update in that case\n if self.type != \"mdcontainer\":\n # pylint: disable=bad-super-call\n super(ContainerDevice, self).updateSize()\n\n @property\n def description(self):\n if self.type == \"mdcontainer\":\n return \"BIOS RAID container\"\n else:\n levelstr = self.level.nick if self.level.nick else self.level.name\n if self.type == \"mdbiosraidarray\":\n return \"BIOS RAID set (%s)\" % levelstr\n else:\n return \"MDRAID set (%s)\" % levelstr\n\n def __repr__(self):\n s = StorageDevice.__repr__(self)\n s += (\" level = %(level)s spares = %(spares)s\\n\"\n \" members = %(memberDevices)s\\n\"\n \" total devices = %(totalDevices)s\"\n \" metadata version = %(metadataVersion)s\" %\n {\"level\": self.level, \"spares\": self.spares,\n \"memberDevices\": self.memberDevices,\n \"totalDevices\": self.totalDevices,\n \"metadataVersion\": self.metadataVersion})\n return s\n\n @property\n def dict(self):\n d = super(MDRaidArrayDevice, self).dict\n d.update({\"level\": str(self.level),\n \"spares\": self.spares, \"memberDevices\": self.memberDevices,\n \"totalDevices\": self.totalDevices,\n \"metadataVersion\": self.metadataVersion})\n return d\n\n @property\n def mdadmConfEntry(self):\n \"\"\" This array's mdadm.conf entry. \"\"\"\n if self.memberDevices is None or not self.mdadmFormatUUID:\n raise errors.DeviceError(\"array is not fully defined\", self.name)\n\n # containers and the sets within must only have a UUID= parameter\n if self.type == \"mdcontainer\" or self.type == \"mdbiosraidarray\":\n fmt = \"ARRAY %s UUID=%s\\n\"\n return fmt % (self.path, self.mdadmFormatUUID)\n\n fmt = \"ARRAY %s level=%s num-devices=%d UUID=%s\\n\"\n return fmt % (self.path, self.level, self.memberDevices, self.mdadmFormatUUID)\n\n @property\n def totalDevices(self):\n \"\"\" Total number of devices in the array, including spares. \"\"\"\n if not self.exists:\n return self._totalDevices\n else:\n return len(self.parents)\n\n def _getMemberDevices(self):\n return self._memberDevices\n\n def _setMemberDevices(self, number):\n if not isinstance(number, int):\n raise ValueError(\"memberDevices is an integer\")\n\n if not self.exists and number > self.totalDevices:\n raise ValueError(\"memberDevices cannot be greater than totalDevices\")\n self._memberDevices = number\n\n memberDevices = property(_getMemberDevices, _setMemberDevices,\n doc=\"number of member devices\")\n\n def _getSpares(self):\n spares = 0\n if self.memberDevices is not None:\n if self.totalDevices is not None and \\\n self.totalDevices > self.memberDevices:\n spares = self.totalDevices - self.memberDevices\n elif self.totalDevices is None:\n spares = self.memberDevices\n self._totalDevices = self.memberDevices\n return spares\n\n def _setSpares(self, spares):\n max_spares = self.level.get_max_spares(len(self.parents))\n if spares > max_spares:\n log.debug(\"failed to set new spares value %d (max is %d)\",\n spares, max_spares)\n raise errors.DeviceError(\"new spares value is too large\")\n\n if self.totalDevices > spares:\n self.memberDevices = self.totalDevices - spares\n\n spares = property(_getSpares, _setSpares)\n\n def _addParent(self, member):\n super(MDRaidArrayDevice, self)._addParent(member)\n\n ## XXX TODO: remove this whole block of activation code\n if self.exists and member.format.exists and flags.installer_mode:\n member.setup()\n udev.settle()\n\n if self.spares <= 0:\n try:\n mdraid.mdnominate(member.path)\n # mdadd causes udev events\n udev.settle()\n except errors.MDRaidError as e:\n log.warning(\"failed to add member %s to md array %s: %s\",\n member.path, self.path, e)\n\n if self.status and member.format.exists:\n # we always probe since the device may not be set up when we want\n # information about it\n self._size = self.currentSize\n\n # These should be incremented when adding new member devices except\n # during devicetree.populate. When detecting existing arrays we will\n # have gotten these values from udev and will use them to determine\n # whether we found all of the members, so we shouldn't change them in\n # that case.\n if not member.format.exists:\n self._totalDevices += 1\n self.memberDevices += 1\n\n # The new member hasn't been added yet, so account for it explicitly.\n is_disk = self.isDisk and member.isDisk\n for p in self.parents:\n p.format._hidden = is_disk\n\n member.format._hidden = is_disk\n\n def _removeParent(self, member):\n \"\"\" If this is a raid array that is not actually redundant and it\n appears to have formatting and therefore probably data on it,\n removing one of its devices is a bad idea.\n \"\"\"\n if not self.level.has_redundancy and self.exists and member.format.exists:\n raise errors.DeviceError(\"cannot remove members from existing raid0\")\n\n super(MDRaidArrayDevice, self)._removeParent(member)\n self.memberDevices -= 1\n\n @property\n def status(self):\n \"\"\" This device's status.\n\n For now, this should return a boolean:\n True the device is open and ready for use\n False the device is not open\n \"\"\"\n # check the status in sysfs\n status = False\n if not self.exists:\n return status\n\n if os.path.exists(self.path) and not self.sysfsPath:\n # the array has been activated from outside of blivet\n self.updateSysfsPath()\n\n # make sure the active array is the one we expect\n info = udev.get_device(self.sysfsPath)\n uuid = udev.device_get_md_uuid(info)\n if uuid and uuid != self.uuid:\n log.warning(\"md array %s is active, but has UUID %s -- not %s\",\n self.path, uuid, self.uuid)\n self.sysfsPath = \"\"\n return status\n\n state_file = \"%s/md/array_state\" % self.sysfsPath\n try:\n state = open(state_file).read().strip()\n if state in (\"clean\", \"active\", \"active-idle\", \"readonly\", \"read-auto\"):\n status = True\n # mdcontainers have state inactive when started (clear if stopped)\n if self.type == \"mdcontainer\" and state == \"inactive\":\n status = True\n except IOError:\n status = False\n\n return status\n\n def memberStatus(self, member):\n if not (self.status and member.status):\n return\n\n member_name = os.path.basename(member.sysfsPath)\n path = \"/sys/%s/md/dev-%s/state\" % (self.sysfsPath, member_name)\n try:\n state = open(path).read().strip()\n except IOError:\n state = None\n\n return state\n\n @property\n def degraded(self):\n \"\"\" Return True if the array is running in degraded mode. \"\"\"\n rc = False\n degraded_file = \"%s/md/degraded\" % self.sysfsPath\n if os.access(degraded_file, os.R_OK):\n val = open(degraded_file).read().strip()\n if val == \"1\":\n rc = True\n\n return rc\n\n @property\n def members(self):\n \"\"\" Returns this array's members.\n\n If the array is a BIOS RAID array then its unique parent\n is a container and its actual member devices are the\n container's parents.\n\n :rtype: list of :class:`StorageDevice`\n \"\"\"\n if self.type == \"mdbiosraidarray\":\n members = self.parents[0].parents\n else:\n members = self.parents\n return list(members)\n\n @property\n def complete(self):\n \"\"\" An MDRaidArrayDevice is complete if it has at least as many\n component devices as its count of active devices.\n \"\"\"\n return (self.memberDevices <= len(self.members)) or not self.exists\n\n @property\n def devices(self):\n \"\"\" Return a list of this array's member device instances. \"\"\"\n return self.parents\n\n def _postSetup(self):\n super(MDRaidArrayDevice, self)._postSetup()\n self.updateSysfsPath()\n\n def _setup(self, orig=False):\n \"\"\" Open, or set up, a device. \"\"\"\n log_method_call(self, self.name, orig=orig, status=self.status,\n controllable=self.controllable)\n disks = []\n for member in self.devices:\n member.setup(orig=orig)\n disks.append(member.path)\n\n mdraid.mdactivate(self.path,\n members=disks,\n array_uuid=self.mdadmFormatUUID)\n\n def _postTeardown(self, recursive=False):\n super(MDRaidArrayDevice, self)._postTeardown(recursive=recursive)\n # mdadm reuses minors indiscriminantly when there is no mdadm.conf, so\n # we need to clear the sysfs path now so our status method continues to\n # give valid results\n self.sysfsPath = ''\n\n def teardown(self, recursive=None):\n \"\"\" Close, or tear down, a device. \"\"\"\n log_method_call(self, self.name, status=self.status,\n controllable=self.controllable)\n # we don't really care about the return value of _preTeardown here.\n # see comment just above mddeactivate call\n self._preTeardown(recursive=recursive)\n\n # Since BIOS RAID sets (containers in mdraid terminology) never change\n # there is no need to stop them and later restart them. Not stopping\n # (and thus also not starting) them also works around bug 523334\n if self.type == \"mdcontainer\" or self.type == \"mdbiosraidarray\":\n return\n\n if self.isDisk:\n # treat arrays whose members are disks as partitionable disks\n return\n\n # We don't really care what the array's state is. If the device\n # file exists, we want to deactivate it. mdraid has too many\n # states.\n if self.exists and os.path.exists(self.path):\n mdraid.mddeactivate(self.path)\n\n self._postTeardown(recursive=recursive)\n\n def preCommitFixup(self):\n \"\"\" Determine create parameters for this set \"\"\"\n log_method_call(self, self.name)\n # UEFI firmware/bootloader cannot read 1.1 or 1.2 metadata arrays\n if getattr(self.format, \"mountpoint\", None) == \"/boot/efi\":\n self.metadataVersion = \"1.0\"\n\n def _postCreate(self):\n # this is critical since our status method requires a valid sysfs path\n self.exists = True # this is needed to run updateSysfsPath\n self.updateSysfsPath()\n StorageDevice._postCreate(self)\n\n # update our uuid attribute with the new array's UUID\n # XXX this won't work for containers since no UUID is reported for them\n info = mdraid.mddetail(self.path)\n self.uuid = info.get(\"UUID\")\n for member in self.devices:\n member.format.mdUuid = self.uuid\n\n def removeStaleLVM():\n \"\"\" Remove any stale LVM metadata that pre-existed in a new array's on-disk footprint. \"\"\"\n log.debug(\"waiting 5s for activation of stale lvm on new md array %s\", self.path)\n time.sleep(5)\n udev.settle()\n\n try:\n pv_info = lvm.pvinfo(device=self.path)[self.path]\n except (errors.LVMError, KeyError) as e:\n return\n\n vg_uuid = None\n try:\n vg_uuid = udev.device_get_vg_uuid(pv_info)\n except KeyError:\n return\n\n if vg_uuid:\n log.info(\"removing stale LVM metadata found on %s\", self.name)\n try:\n lvm.vgremove(None, vg_uuid=vg_uuid)\n except errors.LVMError as e:\n log.error(\"Failed to remove stale volume group from newly-created md array %s: %s\",\n self.path, str(e))\n raise\n\n removeStaleLVM()\n\n def _create(self):\n \"\"\" Create the device. \"\"\"\n log_method_call(self, self.name, status=self.status)\n disks = [disk.path for disk in self.devices]\n spares = len(self.devices) - self.memberDevices\n mdraid.mdcreate(self.path,\n self.level,\n disks,\n spares,\n metadataVer=self.metadataVersion,\n bitmap=self.createBitmap,\n chunkSize=self.chunkSize)\n udev.settle()\n\n def _remove(self, member):\n self.setup()\n # see if the device must be marked as failed before it can be removed\n fail = (self.memberStatus(member) == \"in_sync\")\n mdraid.mdremove(self.path, member.path, fail=fail)\n\n def _add(self, member):\n self.setup()\n if self.level.has_redundancy:\n raid_devices = None\n else:\n raid_devices = self.memberDevices\n\n mdraid.mdadd(self.path, member.path, raid_devices=raid_devices)\n\n @property\n def formatArgs(self):\n formatArgs = []\n if self.format.type == \"ext2\":\n recommended_stride = self.level.get_recommended_stride(self.memberDevices)\n if recommended_stride:\n formatArgs = ['-R', 'stride=%d' % recommended_stride ]\n return formatArgs\n\n @property\n def mediaPresent(self):\n # Containers should not get any format handling done\n # (the device node does not allow read / write calls)\n if self.type == \"mdcontainer\":\n return False\n else:\n return super(MDRaidArrayDevice, self).mediaPresent\n\n @property\n def model(self):\n return self.description\n\n @property\n def partitionable(self):\n return (self.type != \"mdcontainer\" and\n (self.type == \"mdbiosraidarray\" or\n (self.exists and self.parents and\n all(p.partitionable for p in self.parents))))\n\n @property\n def isDisk(self):\n return (self.type != \"mdcontainer\" and\n (self.type == \"mdbiosraidarray\" or\n (self.exists and self.parents and\n all(p.isDisk for p in self.parents))))\n\n def dracutSetupArgs(self):\n return set([\"rd.md.uuid=%s\" % self.mdadmFormatUUID])\n\n def populateKSData(self, data):\n if self.isDisk:\n return\n\n super(MDRaidArrayDevice, self).populateKSData(data)\n data.level = self.level.name\n data.spares = self.spares\n data.members = [\"raid.%d\" % p.id for p in self.parents]\n data.preexist = self.exists\n data.device = self.name\n\n if not self.exists:\n # chunk size is meaningless on RAID1, so do not add our default\n # value to generated kickstart\n if self.level != raid.RAID1:\n data.chunk_size = self.chunkSize.convertTo(\"KiB\")\n","sub_path":"1/blivet/devices/md.py","file_name":"md.py","file_ext":"py","file_size_in_byte":25847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278659402","text":"'''\nAuthor : Praveen\nDate : 10-08-2018\n'''\n\ndef integer_division(x, a):\n \"\"\"\n x: a non-negative integer argument\n a: a positive integer argument\n\n returns: integer, the integer division of x divided by a.\n \"\"\"\n count = 0\n while x >= a:\n count += 1\n x = x - a\n return count\n\ndef main():\n data = input()\n data1 = data.split()\n print(integer_division(int(data1[0]), int(data1[1])))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cspp1_practise/cspp1-assignments/m11/Integer Division Exercise/integer_division.py","file_name":"integer_division.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583725986","text":"import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 参数\nn = 60\nd = 60\nd_vnsp = 10\n\ndef get_factors(path_start, path_end):\n files = os.listdir(path_start)\n for file in files:\n df_data= pd.read_csv(path_start + '/' + file)\n\n # 最早从2005-01-04开始\n df_data = pd.read_csv(path_start + '/' + file, index_col=0)\n df_data.index = pd.to_datetime(df_data.index).values.astype('datetime64[D]')\n df1 = df_data.loc['2005':].replace(0, np.nan) # 把0替换成nan\n df1.dropna(axis=0, inplace=True) # 删除所有为nan的行\n\n # 因子构建\n for i in range(n, len(df1.index)-1):\n # 得到i-1到i-n这段的数据, 换手率计算权重\n initial_array = 1 - df1.loc[df1.index[i-n]:df1.index[i-1], '成交均价'] / df1.loc[df1.index[i], '成交均价']\n array_1 = df1.loc[df1.index[i-n]:df1.index[i-1], '换手率']\n array_2 = (1-array_1).iloc[::-1].shift().fillna(1).cumprod().iloc[::-1]\n w_array = array_1 * array_2\n w_array_pct = w_array / w_array.sum()\n gain_loss_array = initial_array * w_array_pct\n gain = 0\n loss = 0\n for j in gain_loss_array:\n if j > 0 :\n gain += j\n elif j < 0:\n loss += j\n df1.loc[df1.index[i+1], 'gain'] = gain\n df1.loc[df1.index[i+1], 'loss'] = loss\n df1.dropna(axis=0, inplace=True)\n\n # 因子平滑化,d越大,延迟越高,曲线越平滑\n def LLT(data, d):\n llt = [(data.iloc[0]+data.iloc[1])/2, (data.iloc[0]+data.iloc[1])/2]\n alpha = 2/(d+1)\n for i in range(2, len(data.index)):\n llt.append((alpha-alpha**2/4)*data.iloc[i] + alpha**2/2*data.iloc[i-1] - (alpha-3*alpha**2/4)*data.iloc[i-2] \\\n + 2*(1-alpha)*llt[i-1] - (1-alpha)**2*llt[i-2])\n return llt\n\n df1['gain_llt'] = LLT(df1['gain'], d)\n df1['loss_llt'] = LLT(df1['loss'], d)\n df1['vnsp_llt'] = LLT(df1['gain_llt'] + np.square(df1['loss_llt']), d_vnsp)\n\n df2 = df1.iloc[(d+d_vnsp):, :].copy()\n\n #绘制第一个Y轴\n fig = plt.figure(figsize=(20,8), dpi=80)\n ax = fig.add_subplot(111)\n lin1 = ax.plot(df2.index, df2['收盘'], label='close price')\n ax.set_ylabel('close price')\n \n #绘制另一Y轴 \n ax1 = ax.twinx()\n\n lin2 = ax1.plot(df2.index, df2['gain'], label='gain', color='orange')\n lin3 = ax1.plot(df2.index, df2['loss'], label='loss', color='yellow')\n lin4 = ax1.plot(df2.index, df2['gain_llt'], label='gain_llt', color='red')\n lin5 = ax1.plot(df2.index, df2['loss_llt'], label='loss_llt', color='green')\n lin6 = ax1.plot(df2.index, df2['vnsp_llt'], label='vnsp_llt', color='purple')\n ax1.set_ylabel('factor value')\n \n #合并图例\n lins = lin2 + lin3 + lin4 + lin5 + lin6 + lin1\n labs = [l.get_label() for l in lins]\n ax.legend(lins, labs, bbox_to_anchor=(1.05, 0), loc=3, borderaxespad=0, fontsize=15)\n plt.savefig('./figures/' + file[:-4] + '_factors.png', dpi=600, bbox_inches='tight')\n \n df2.to_csv(path_end + '/' + file)","sub_path":"codes/getFactors.py","file_name":"getFactors.py","file_ext":"py","file_size_in_byte":3305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"582629470","text":"# Задача №2\n# На лекции мы рассматривали пример для военкомата.\n# Сейчас мы знаем мы про его рост. Расширить это приложение следующим условиями:\n#\n# Проверка на возраст призывника;\n# Количество детей;\n# Учится ли он сейчас.\n# Задание №3\n# Разработать приложение для определе\n# ния знака зодиака по дате рождения.\n# Пример:\n#\n# Введите месяц: март\n# Введите число: 6\n#\n# Вывод:\n# Рыбы\n\n# Задача №2\n\nheight = 300\nage = 23\nchildren = 0\nstudies = 0\n\nif age in range(18, 26) and children < 2 and not studies:\n if height < 170:\n print('В танкисты')\n elif height < 185:\n print('На флот')\n elif height < 200:\n print('В десантники')\n else:\n print('В другие войска')\nelse:\n print('У Вас отсрочка или непризывной возраст')\n\n# Задание №3\n\nmonth = input('Введите месяц: ')\ndate = int(input('Введите число: '))\n\nzodiac = [['Козерог', 19], ['Водолей', 16], ['Рыбы', 12], ['Овен', 19], ['Телец', 14], ['Близнецы', 20], ['Рак', 21],\n ['Лев', 10], ['Дева', 16], ['Весы', 31], ['Скорпион', 22], ['Стрелец', 18]]\n\nmonths = ['январь', 'февраль', 'март', 'апрель', 'май', 'июнь', 'июль', 'август', 'сентябрь', 'октябрь', 'ноябрь',\n 'декабрь']\n\nnumb_month = months.index(month)\n\nprint('Вывод:')\n\nfor item in zodiac:\n i = zodiac.index(item)\n if numb_month == i and date <= zodiac[i][1]:\n print(zodiac[i][0])\n elif numb_month == i and date >= zodiac[i][1]:\n if numb_month == 11:\n print(zodiac[0][0])\n else:\n print(zodiac[i + 1][0])\n","sub_path":"netology_basic_python_tasks/2_conditional_constructions_comparison_operations/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"240760101","text":"\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport random\nimport pprint\nimport datetime\nimport dateutil.tz\nimport argparse\nimport numpy as np\nfrom miscc.config import cfg, cfg_from_file\nfrom datasets import T2SDataset\n\n\nfrom models.model32 import RNN_ENCODER, CNN_ENCODER\nfrom models.dcgan32 import G_NET, D_NET_32, KL_loss\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.backends.cudnn as cudn\n\nfrom nltk.tokenize import RegexpTokenizer\nimport json\n\nimport onnx\n\nclass Eval(object):\n def __init__(self):\n cfg_from_file(\"./cfgs/shape16.yml\")\n self.dataset = T2SDataset(cfg.DATA_DIR, 'train', cfg.TREE.BASE_SIZE)\n self.wordtoix = self.dataset.wordtoix\n\n\n text_encoder = RNN_ENCODER(self.dataset.n_words, nhidden=cfg.TEXT.EMBEDDING_DIM)\n state_dict = torch.load(\"./data/models/text_encoder66.pth\", map_location=lambda storage, loc: storage)\n text_encoder.load_state_dict(state_dict)\n for p in text_encoder.parameters():\n p.requires_grad = False\n print('Load text encoder from:', cfg.TRAIN.NET_E)\n text_encoder.eval()\n self.text_encoder = text_encoder\n\n\n netG = G_NET()\n self.netG = torch.nn.DataParallel(netG)\n state_dict = torch.load('./data/models/netG_epoch_239.pth', map_location=lambda storage, loc: storage)\n self.netG.load_state_dict(state_dict)\n print('Load G from: ', cfg.TRAIN.NET_G)\n self.netG.eval()\n\n self.noise = torch.FloatTensor(1, 128)\n self.noise.data.normal_(0.0, 1.0)\n\n def create(self, text):\n\n tokenizer = RegexpTokenizer(r'\\w+')\n tokens = tokenizer.tokenize(text.lower())\n\n pprint.pprint(tokens)\n sent = []\n for item in tokens:\n try:\n w_id = self.wordtoix[item]\n sent.append(w_id)\n except:\n print(\"An exception occurred\")\n\n if len(sent) >= 1:\n cap = np.zeros((cfg.TEXT.WORDS_NUM, 1), dtype='int64')\n cap_len = len(sent)\n cap[:cap_len, 0] = sent\n cap = np.squeeze(cap)\n\n cap = np.array([cap])\n cap_lens = np.array([cap_len])\n\n caps = torch.from_numpy(cap)\n cap_lens = torch.from_numpy(cap_lens)\n\n hidden = self.text_encoder.init_hidden(1)\n words_embs, sent_emb = self.text_encoder(caps, cap_lens, hidden)\n fake_shapes, mu, logvar = self.netG(self.noise, sent_emb)\n\n if False:\n f = open('output.json', 'w')\n json.dump(self.wordtoix, f)\n torch.onnx.export(self.text_encoder,(caps, cap_lens, hidden),'text_encoder.proto',verbose=False)\n torch.onnx.export(self.netG.module,(self.noise, sent_emb),'netG.proto',verbose=False)\n\n\n return fake_shapes[0]\n\n\nif __name__ == \"__main__\":\n algo = Eval()\n algo.create(\"this blue table .\")","sub_path":"ui/algo.py","file_name":"algo.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"647235428","text":"\n# import plotly\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n\n# Scientific libraries\nimport numpy as np\n\n# points = np.array([(1, 1), (2, 4), (3, 1), (9, 3)])\npoints = np.array([\n (-3, 19925.7612291),\n (-2, 11205.3989125),\n (-1, 4978.38659603),\n (0, 1244.7242795),\n (1, 4.41196298026),\n (2, 1257.44964646),\n (3, 5003.83732993),\n (4, 11243.5750134),\n (5, 19976.6626969),\n])\n\n# get x and y vectors\nx = points[:, 0]\ny = points[:, 1]\n\n# calculate polynomial\nz = np.polyfit(x, y, 2)\nf = np.poly1d(z)\nprint(f)\n\n# calculate new x's and y's\nx_new = np.linspace(x[0], x[-1], 50)\ny_new = f(x_new)\n\n# Creating the dataset, and generating the plot\ntrace1 = go.Scatter(\n x=x,\n y=y,\n mode='markers',\n marker=go.Marker(color='rgb(255, 127, 14)'),\n name='Data'\n)\n\ntrace2 = go.Scatter(\n x=x_new,\n y=y_new,\n mode='lines',\n marker=go.Marker(color='rgb(31, 119, 180)'),\n name='Fit'\n)\n\nannotation = go.Annotation(\n x=6,\n y=-4.5,\n text='$\\textbf{Fit}: 0.43X^3 - 0.56X^2 + 16.78X + 10.61$',\n showarrow=False\n)\nlayout = go.Layout(\n title='Polynomial Fit in Python',\n plot_bgcolor='rgb(229, 229, 229)',\n xaxis=go.XAxis(zerolinecolor='rgb(255,255,255)',\n gridcolor='rgb(255,255,255)'),\n yaxis=go.YAxis(zerolinecolor='rgb(255,255,255)',\n gridcolor='rgb(255,255,255)'),\n annotations=[annotation]\n)\n\ndata = [trace1, trace2]\nfig = go.Figure(data=data, layout=layout)\n\npy.plot(fig, filename='Polynomial-Fit-in-python')\n","sub_path":"send_to_plotly.py","file_name":"send_to_plotly.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281552044","text":"U#!/usr/bin/python3/env python\r\n#Gets user input in centimeters\r\nCentigrade = float(input(\"Enter temp in C \"))\r\n\r\n#Applies the centigrade to fahrenheit conversion formula\r\nFahrenheit = (Centigrade*1.8) + 32\r\n\r\n#prints the answer\r\nprint(Centigrade, \" Centigrade equals \", Fahrenheit, \" Fahrenheit\")\r\n\r\n","sub_path":"CtoF.py","file_name":"CtoF.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554028158","text":"#from statistics import median\n#import collections\n#aa = collections.Counter(a) # list to list || .most_common(2)で最大の2個とりだせるお a[0][0]\nfrom fractions import gcd\nfrom itertools import combinations,permutations,accumulate # (string,3) 3回\n#from collections import deque\nfrom collections import deque,defaultdict,Counter\nimport decimal\nimport re\n#import bisect\n#\n# d = m - k[i] - k[j]\n# if kk[bisect.bisect_right(kk,d) - 1] == d:\n#\n#\n#\n# pythonで無理なときは、pypyでやると正解するかも!!\n#\n#\n# my_round_int = lambda x:np.round((x*2 + 1)//2)\n# 四捨五��\nimport sys\nsys.setrecursionlimit(10000000)\nmod = 10**9 + 7\n#mod = 9982443453\ndef readInts():\n return list(map(int,input().split()))\ndef I():\n return int(input())\nn,m = readInts()\nbase = []\nif n == 1 and m == 0:\n print(0)\n exit()\nelif n == 2 and m == 0:\n print(10)\n exit()\nelif n == 3 and m == 0:\n print(100)\n exit()\nif n >= 1:\n base.append('1')\nif n >= 2:\n base.append('0')\nif n == 3:\n base.append('0')\ndic = defaultdict(int)\nfor i in range(m):\n s,c = readInts()\n s -= 1\n if dic[s] != 0:\n if int(base[s]) == c:\n pass\n else:\n print(-1)\n exit()\n else:\n dic[s] = 1\n base[s] = str(c)\n if s == 0 and c == 0 and n >= 2:\n print(-1)\n exit()\nprint(*base,sep='')\n","sub_path":"Python_codes/p02761/s117466130.py","file_name":"s117466130.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"490813385","text":"from random import randrange\n\n# Un noeud de la treap\nclass TreapNode:\n\tdef __init__(self, data, priority=100, left=None, right=None):\n\t\tself.data = data\n\t\tself.priority = randrange(priority)\n\t\tself.left = left\n\t\tself.right = right\n\n# Fonction pour faire une rotation à gauche\ndef rotateLeft(root):\n\tR = root.right\n\tX = root.right.left\n\n\t# rotate\n\tR.left = root\n\troot.right = X\n\n\t# set root\n\treturn R\n\n\n# Fonction pour faire une rotation à droite\ndef rotateRight(root):\n\tL = root.left\n\tY = root.left.right\n\n\t# rotation\n\tL.right = root\n\troot.left = Y\n\n\t# retourne la nouvelle racine\n\treturn L\n\n# Fonction récursive pour insérer une clé avec une priorité dans une Treap\ndef insertNode(root, data):\n\tif root is None:\n\t\treturn TreapNode(data)\n\n # si data est inférieure à celle la racine root, insérer dans le sous-arbre gauche\n # sinon insérer dans le sous-arbre droit\n\tif data < root.data:\n\t\troot.left = insertNode(root.left, data)\n\n\t\t# faire une rotation à droite si la propriété de la heap est violée \n\t\tif root.left and root.left.priority > root.priority:\n\t\t\troot = rotateRight(root)\n\telse:\n\t\troot.right = insertNode(root.right, data)\n\n\t\t# faire une rotation à gauche si la propriété de la heap est violée \n\t\tif root.right and root.right.priority > root.priority:\n\t\t\troot = rotateLeft(root)\n\n\treturn root\n\n\n# Affiche les noeuds de la treap\ndef printTreap(root, space):\n\theight = 10\n\n\tif root is None:\n\t\treturn\n\n\tspace += height\n\tprintTreap(root.right, space)\n\n\tfor i in range(height, space):\n\t\tprint(' ', end='')\n\n\tprint((root.data, root.priority))\n\tprintTreap(root.left, space)\n\n\nif __name__ == '__main__':\n\t# Clés de la treap\n\tkeys = [5, 2, 1, 4, 9, 8, 10]\n\n\t# Construction de la treap\n\troot = None\n\tfor key in keys:\n\t\troot = insertNode(root, key)\n\n\tprintTreap(root, 0)\n","sub_path":"2021/week11/treap_solution_complète.py","file_name":"treap_solution_complète.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432006698","text":"import discord\nimport datetime\nimport random\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport os\n\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n print(\"login\")\n print(client.user.name)\n print(client.user.id)\n print(\"------------------\")\n game = discord.Game(\"음메 대신\")\n await client.change_presence(status=discord.Status.online, activity=game)\n\n\n@client.event\nasync def on_message(message, value=None):\n if message.content.startswith(\"/코로나\"):\n\n url = 'https://search.naver.com/search.naver?sm=top_hty&fbm=1&ie=utf8&query=%EC%BD%94%EB%A1%9C%EB%82%98'\n html = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(html, 'html.parser')\n\n people = soup.findAll('strong', {'class': 'num'})\n data_list = []\n for i in people:\n data_list.append(i.get_text().replace('\\n', '').replace(' ', ''))\n embed = discord.Embed(color=0xff0000)\n embed.add_field(name=\"코로나 실시간 현황\", value=\"확진자 : \" + data_list[0] + \" 명\" + \"\\n격리해제 : \" + data_list[1] + \" 명\" + \"\\n검사 중 : \" + data_list[2] + \" 명\" + \"\\n사망자 : \" + data_list[3] + \" 명\", inline=True)\n embed.set_thumbnail(\n url=\"http://post.phinf.naver.net/MjAyMDAyMDNfMjIw/MDAxNTgwNjk0MzkwOTY2.pKNl4PbotKUn_vmYoHNTpKdsDx5HuuAvpA1p8NSQDaYg.1AYI3_Uf7Bk7ALP2lHevuR9ZThmuiHGi0fTNuMPPxnsg.PNG/IBuNUMjY84YotKgVXGthVvcdYVi4.jpg\")\n await message.channel.send(embed=embed) \n if message.content.startswith(\"/음메\"):\n date = datetime.datetime.utcfromtimestamp(((int(message.author.id) >> 22) + 1420070400000) / 1000)\n embed = discord.Embed(color=0xff0000)\n embed.add_field(name=\"서버\", value=\"NEON\", inline=True)\n embed.add_field(name=\"사용자\", value=message.author.name, inline=True)\n embed.add_field(name=\"직군\", value=message.author.top_role, inline=True)\n embed.add_field(name=\"사용자 정보\", value=message.author.display_name, inline=True)\n embed.set_thumbnail(url=message.author.avatar_url)\n embed.add_field(name=\"제작자\", value=\"음메#7491\", inline=True)\n embed.set_image(url=\"https://o.remove.bg/uploads/a3011d27-6843-495a-85ad-fd723ef974f9/prop_ron_drop_sign_.png\")\n await message.channel.send(embed=embed)\n if message.content.startswith(\"/제작자\"):\n await message.channel.send(\"제작자 : 음메#7491\")\n if message.content.startswith(\"/안녕\"):\n await message.channel.send(\"안녕 나는 음메야 !\")\n if message.content.startswith(\"/가위바위보 가위\"):\n rsp = \"123\"\n rsp1 = random.choice(rsp)\n if rsp1 == \"1\":\n emb = discord.Embed(title='가위바위보', color=0xfff000)\n emb.add_field(name='승부결과!!', value='음메 :v: 당신 :v: 무승부!')\n await message.channel.send(content=None, embed=emb)\n if rsp1 == \"2\":\n emb = discord.Embed(title='가위바위보', color=0xff0000)\n emb.add_field(name='승부결과!!', value='음메 :fist: 당신 :v: 봇 승리!')\n await message.channel.send(content=None, embed=emb)\n if rsp1 == \"3\":\n emb = discord.Embed(title='가위바위보', color=0x0dff00)\n emb.add_field(name='승부결과!!', value='음메 :raised_hand: 당신 :v: 당신 승리!')\n await message.channel.send(content=None, embed=emb)\n\n if message.content.startswith(\"/가위바위보 바위\"):\n rsp = \"123\"\n rsp1 = random.choice(rsp)\n if rsp1 == \"1\":\n emb = discord.Embed(title='가위바위보', color=0x0dff00)\n emb.add_field(name='승부결과!!', value='음메 :v: 당신 :fist: 당신 승리!')\n await message.channel.send(content=None, embed=emb)\n if rsp1 == \"2\":\n emb = discord.Embed(title='가위바위보', color=0xfff000)\n emb.add_field(name='승부결과!!', value='음메 :fist: 당신 :fist: 무승부!')\n await message.channel.send(content=None, embed=emb)\n if rsp1 == \"3\":\n emb = discord.Embed(title='가위바위보', color=0xff0000)\n emb.add_field(name='승부결과!!', value='음메 :raised_hand: 당신 :fist: 봇 승리!')\n await message.channel.send(content=None, embed=emb)\n if message.content.startswith(\"/가위바위보 보\"):\n rsp = \"123\"\n rsp1 = random.choice(rsp)\n if rsp1 == \"1\":\n emb = discord.Embed(title='가위바위보', color=0xff0000)\n emb.add_field(name='승부결과!!', value='음메 :v: 당신 :raised_hand: 봇 승리!')\n await message.channel.send(content=None, embed=emb)\n if rsp1 == \"2\":\n emb = discord.Embed(title='가위바위보', color=0x0dff00)\n emb.add_field(name='승부결과!!', value='음메 :fist: 당신 :raised_hand: 당신 승리!')\n await message.channel.send(content=None, embed=emb)\n if rsp1 == \"3\":\n emb = discord.Embed(title='가위바위보', color=0xfff000)\n emb.add_field(name='승부결과!!', value='음메 :raised_hand: 당신 :raised_hand: 무승부!')\n await message.channel.send(content=None, embed=emb)\n if message.content.startswith('/명령어'):\n embed = discord.Embed(title=\"명령어\", description=\"\"\"\\n\\n\n /음메\\n\n /가위바위보 가위\\n\n /가위바위보 바위\\n\n /가위바위보 보\\n\n /출근\\n\n /퇴근\\n\n /clear\\n\n /코로나\\n\"\"\", color=0XFF0000)\n\n await message.channel.send(embed=embed)\n \naccess_token = os.environ[\"BOT_TOKEN\"]\nclient.run(access_token)\n","sub_path":"neontest.py","file_name":"neontest.py","file_ext":"py","file_size_in_byte":5758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"326448539","text":"# coding: utf-8\nimport datetime\nimport json\nimport select\nfrom threading import currentThread\n\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.conf import settings\nfrom transliterate import translit\n\nfrom chat_app.helpers import connection\nfrom chat_app.models import Message, ExtendedEAVSetting as Setting, Channel\nfrom chat_app.enums import BracesDict, ReasonsEnum, SettingsEnum, ModerationActionsEnum\nfrom chat_app.illegal_words import ILLEGAL_WORDS\nfrom chat_app.command import CommandManager\n\nPING = 'PING'\nPONG = 'PONG'\n\n\n########################################################################################################################\n\n\nclass MsgSender(object):\n \"\"\"Класс, о��вечающий за отправку сообщений\"\"\"\n def __init__(self, twitch_socket, channel_name):\n super().__init__()\n self.twitch_socket = twitch_socket\n self.channel_name = channel_name\n\n def send_message(self, message):\n \"\"\"Отправка сообщения в чат канала\"\"\"\n self.twitch_socket.send(bytes(f'PRIVMSG #{self.channel_name} :{message}\\r\\n'.encode()))\n\n def pong(self, response):\n \"\"\"Ответ на пинг твича\"\"\"\n self.twitch_socket.send(bytes(f'{PONG} {response}\\r\\n'.encode()))\n\n def get_mods(self):\n \"\"\"Получить имена модераторов\"\"\"\n self.send_message('/mods')\n\n def disconnect(self):\n \"\"\"Команда на отключение от сервера чата\"\"\"\n self.send_message('/disconnect')\n\n # инструменты для модерации\n def timeout(self, username, seconds=600):\n \"\"\"Команда на таймаут\"\"\"\n timeout = ModerationActionsEnum.TIMEOUT\n self.send_message(f'/{timeout} {username} {seconds}')\n\n def purge(self, username):\n \"\"\"Команда на пурж. Пурж - внутренне понятие, на самом деле это таймаут на 1 секунду\"\"\"\n self.timeout(username, 1)\n\n def ban(self, username):\n \"\"\"Команда на бан\"\"\"\n ban = ModerationActionsEnum.BAN\n self.send_message(f'/{ban} {username}')\n\n def unban(self, username):\n \"\"\"Команда на анбан\"\"\"\n unban = ModerationActionsEnum.UNBAN\n self.send_message(f'/{unban} {username}')\n\n\n########################################################################################################################\n\n\nclass MsgValidator(object):\n \"\"\"Абстрактный класс валидатора\"\"\"\n def validate(self, message):\n \"\"\"Общий метод валидации, возвращает кортеж (проверка пройдена, причина)\"\"\"\n raise NotImplementedError()\n\n\nclass BracesValidator(MsgValidator):\n \"\"\"Валидатор на скобки\"\"\"\n # словарь со счетчиками скобок\n counters = {x: 0 for x in BracesDict.ALL}\n # индексы скобок в строке\n brace_indexes = []\n # возможные комбинации символов\n # внутренний кортеж: (символ для скобки слева, символ для скобки справа)\n possible_smile_combo_symbols = (\n (':', ':'),\n ('=', '='),\n ('-:', ':-'),\n )\n\n def validate(self, message):\n # подсчет количества открывающихся и закрывающихся скобок\n count_validation_result = self._validate_braces_count(message)\n if count_validation_result:\n return True, None\n\n # смотрим на предмет исключений\n index_validation_result = self._validate_braces_index(message)\n if index_validation_result:\n return True, None\n\n return False, ReasonsEnum.BRACES\n\n def _validate_braces_count(self, message):\n \"\"\"Валидация по количеству скобок\"\"\"\n for idx, symbol in enumerate(filter(lambda x: x in BracesDict.ALL, message)):\n self.brace_indexes.append(idx)\n self.counters[symbol] += 1\n\n result = True\n for opener, closer in (BracesDict.BRACES, BracesDict.PARENTHESES, BracesDict.SQUARES):\n # сравниваем количество попарно. если неравно - помечаем на следующий этап валидации\n if self.counters[opener] != self.counters[closer]:\n result = False\n break\n\n return result\n\n def _validate_braces_index(self, message):\n \"\"\"Проверка по исключениям. Примеры на основе круглых скобок\"\"\"\n is_exception = False\n for i in self.brace_indexes:\n # проверки на всякое :), )=, :-(\n for combo_left, combo_right in self.possible_smile_combo_symbols:\n # предполагается, что комбинации одинаковой длины\n if combo_left == combo_right:\n # проверка любой скобки\n if message[i - 1] == combo_left or message[i + 1] == combo_left:\n is_exception = True\n break\n\n else:\n if (\n message[i + 1:i + len(combo_left) + 1] == combo_left or\n message[i - len(combo_right):i] == combo_right\n ):\n is_exception = True\n break\n\n if not is_exception:\n # проверка на x)D\n if message[i - 1].lower() == 'x' and message[i + 1].lower() == 'd' and message[i] in BracesDict.CLOSERS:\n is_exception = True\n\n return is_exception\n\n\nclass IllegalWordsValidator(MsgValidator):\n \"\"\"Валидатор на запрещенные слова\"\"\"\n keymap_dict = {\n 'q': 'й',\n 'w': 'ц',\n 'e': 'у',\n 'r': 'к',\n 't': 'е',\n 'y': 'н',\n 'u': 'г',\n 'i': 'ш',\n 'p': 'з',\n '[': 'х',\n ']': 'ъ',\n 'a': 'ф',\n 's': 'ы',\n 'd': 'в',\n 'g': 'п',\n 'h': 'р',\n 'j': 'о',\n 'k': 'л',\n 'l': 'д',\n ';': 'ж',\n \"'\": 'э',\n 'z': 'я',\n 'x': 'ч',\n 'c': 'с',\n 'v': 'м',\n 'b': 'и',\n 'n': 'т',\n 'm': 'ь',\n ',': 'б',\n '.': 'ю',\n }\n\n def validate(self, message):\n original = message\n traslitted = translit(message, 'ru')\n keymapped = message.translate(str.maketrans(self.keymap_dict))\n\n # для ускорения проверки слепим все три результата в одну строку\n mess = ' '.join((original, traslitted, keymapped))\n\n if any(word in mess for word in ILLEGAL_WORDS):\n return False, ReasonsEnum.ILLEGAL_WORD\n\n return True, None\n\n\n########################################################################################################################\n\n\nclass Connection(object):\n MOTD = False\n twitch_socket = None\n\n def __init__(self, twitch_socket, msg_sender, validators):\n super().__init__()\n self.twitch_socket = twitch_socket\n self.msg_sender = msg_sender\n self.validators = validators\n\n self.channel_name = msg_sender.channel_name\n self.channel, _ = Channel.objects.get_or_create(channel_name=self.channel_name)\n self.moderators = []\n self.is_owner = settings.NICK == self.channel_name\n self.is_mod = settings.NICK.lower() in self.moderators or self.is_owner\n\n self._start_loop()\n \n def _start_loop(self):\n # подключение к вебсокетам\n websocket = get_channel_layer()\n\n # получаем тред, в котором крутится твич сокет, чтобы прокидывать флаг отключения\n t = currentThread()\n\n # инициализация центра комманд\n command_centre = CommandManager()\n\n readbuffer = ''\n twitch_socket = self.twitch_socket\n\n # настройка сохранения сообщений в бд\n save_messages = Setting.objects.filter(setting_name='save_messages').first()\n if save_messages:\n save_messages = save_messages.setting_switch\n\n while getattr(t, 'do_run', True):\n r, _, _ = select.select([twitch_socket], [], [])\n if r:\n # обработка пришедшего сообщения\n readbuffer = readbuffer + twitch_socket.recv(4096).decode()\n temp = readbuffer.split('\\n')\n readbuffer = temp.pop()\n\n for line in temp:\n # ответка твичу на пинг\n if line[0] == PING:\n self.msg_sender.pong(line[1])\n continue\n\n # парсинг строки\n parts = line.split(':')\n stop_commands = ('QUIT', 'JOIN', 'PART')\n process_condition = all((\n command not in parts[1] for command in stop_commands\n ))\n\n if process_condition:\n username, message = self._process_incoming(parts)\n if self.MOTD:\n print(f'{username}: {message}')\n\n # сохранение сообщений\n db_message_id = None\n if save_messages and (username and message):\n db_message = Message.objects.create(\n channel=self.channel,\n username=username,\n message=message\n )\n db_message_id = db_message.pk\n\n # валидация (напр. на скобки)\n is_valid = True\n reason = None\n\n # проверяем, команда ли пришла\n command_centre.resolve_command(username, message, self.msg_sender)\n\n if self.is_mod or SettingsEnum.get_setting(SettingsEnum.ALWAYS_VALIDATE):\n for validator in self.validators:\n is_valid, reason = validator.validate(message)\n if not is_valid:\n break\n\n # отправка сообщения на страницу через вебсокет\n _now = datetime.datetime.now().strftime('%H:%M:%S')\n async_to_sync(websocket.group_send)(\n settings.WEBSOCKET_CHANNEL,\n {\n # название метода в классе консумера, в модуле consumers\n 'type': 'chat_message',\n # отправляемое сообщение\n 'message': json.dumps({\n 'is_valid': is_valid,\n 'reason': reason,\n 'datetime': _now,\n 'username': username,\n 'message': message,\n 'db_message_id': db_message_id,\n 'is_mod': self.is_mod,\n }, ensure_ascii=False)\n })\n\n self._motd_pass(parts)\n\n self.msg_sender.disconnect()\n\n @staticmethod\n def _process_incoming(parts):\n \"\"\"\n Хаос какой-то, ему похуй, чё тут, сухарики или странный парсер строки (не мой)\n \"\"\"\n try:\n message = parts[2][:len(parts[2]) - 1]\n except:\n message = ''\n\n usernamesplit = parts[1].split('!')\n username = usernamesplit[0]\n\n return username, message\n\n def _get_mods(self):\n mods_line = self.msg_sender.get_mods()\n # TODO: должен быть список модеров\n return []\n\n def _motd_pass(self, parts):\n \"\"\"\n Первое сообщение от твича при коннекте - MOTD (Message of the Day),\n необходимо обработать его первым прежде чем работать с чатом\n \"\"\"\n for l in parts:\n if 'End of /NAMES list' in l:\n self.MOTD = True\n\n # тут получим список модераторов\n self.moderators = self._get_mods()\n\n\ndef thread_loop_init(channel_name):\n \"\"\"Функция, запускающая сокет. Предназначена для использования в отдельном треде (иначе приложение повиснет)\"\"\"\n with connection(channel_name) as twitch_socket:\n msg_sender = MsgSender(twitch_socket, channel_name)\n # для использования возможности посылания сообщений из мэйн треда\n # возможно надо будет переделать\n t = currentThread()\n t.msg_sender = msg_sender\n\n Connection(twitch_socket, msg_sender, validators=(BracesValidator, ))\n\n\n\n","sub_path":"chat_app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":14095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"245496280","text":"import requests\nfrom bs4 import BeautifulSoup\n\nrank_url = \"http://section.cafe.naver.com/CafeRankingList.nhn\"\nresponse = requests.get(rank_url)\n\nbs = BeautifulSoup(response.text, 'html.parser')\nresults = bs.select(\"tbody > tr > td > div > a\")\nfor result in results :\n print (result.text.strip())","sub_path":"scrappingTest.py","file_name":"scrappingTest.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260486999","text":"import numpy as np\nimport cv2\nfrom collections import defaultdict\n\nclass BoundingBox:\n \"\"\" Constructor.\n Args:\n cls: String value representing object class.\n box: Array containing 4 int as [left, top, right, bottom]\n conf: Float value representing confidence\n \"\"\"\n def __init__(self, cls, box, bid=0, conf=1.0):\n self.cls = cls\n self.box = box\n self.conf = conf\n self.bid = bid\n\n\n'''\n Create a list of BoundingBox from text content, each line of the text represents a detection with format as: CLASS CONFIDENCE LEFT TOP RIGHT BOTTOM\n Args:\n path: String value representing the file path\n'''\ndef readDetctions(path):\n ds = []\n with open(path, 'r') as f:\n for t in f.readlines():\n segs = t.split(' ')\n ds.append(BoundingBox(segs[0], [int(i) for i in segs[2:6]], float(segs[1])))\n return ds\n\n\n'''\n Create a list of BoundingBox from text content, each line of the text represents a ground truth with format as: CLASS LEFT TOP RIGHT BOTTOM\n Args:\n path: String value representing the file path\n'''\ndef readGroundTruths(path):\n gs = []\n with open(path, 'r') as f:\n for t in f.readlines():\n segs = t.split(' ')\n gs.append(BoundingBox(segs[0], [int(i) for i in segs[1:5]]))\n return gs\n\n\"\"\"\nArgs:\n dets: List of BoundingBox representing detection result\n gts: List of BoundingBox representing ground truth\n\n For the deteils of the calculation, refer to https://github.com/rafaelpadilla/Object-Detection-Metrics#interpolating-all-points\n\"\"\"\ndef getIoUs(dets, gts, IoUThreshold=0.5):\n ld = len(dets)\n lg = len(gts)\n TP = np.zeros(ld)\n FP = np.zeros(ld)\n matched = [0] * lg\n dic = defaultdict(list)\n ious = []\n for i, g in enumerate(gts):\n dic[g.cls].append((g, i))\n\n for i, d in enumerate(dets):\n if d.cls not in dic:\n FP[i] = 1\n continue\n iouMax, jMax = 0, -1\n # find the best matched ground truth\n for _, (g, j) in enumerate(dic[d.cls]):\n iou = IoU(d.box, g.box)\n\n# if g.bid == d.bid:\n# iouMax, jMax = iou, j\n# if matched[j]:\n# ious[matched[j] - 1] = (0, -1)\n# break\n #if iou > iouMax:\n if iou > iouMax and not matched[j]:\n #elif iou > iouMax and not matched[j]:\n iouMax, jMax = iou, j \n\n if iouMax:\n ious.append((iouMax, jMax))\n else:\n ious.append((0, -1))\n if iouMax >= IoUThreshold and matched[jMax] == 0 and gts[jMax].bid == d.bid:\n TP[i], matched[jMax] = 1, i + 1\n else:\n FP[i] = 1\n '''\n ious.append((iouMax, jMax))\n if iouMax >= IoUThreshold:\n if matched[jMax] == 0:\n TP[i], matched[jMax] = 1, i + 1\n else:\n FP[i] = 1\n else:\n FP[i] = 1\n '''\n return ious\n\n\n\"\"\"\nArgs:\n dets: List of BoundingBox representing detection result\n gts: List of BoundingBox representing ground truth\n\n For the deteils of the calculation, refer to https://github.com/rafaelpadilla/Object-Detection-Metrics#interpolating-all-points\n\"\"\"\ndef calculateAccuracy(dets, gts, IoUThreshold=0.5):\n ld = len(dets)\n lg = len(gts)\n TP = np.zeros(ld)\n FP = np.zeros(ld)\n matched = [0] * lg\n dic = defaultdict(list)\n ious = []\n for i, g in enumerate(gts):\n dic[g.cls].append((g, i))\n\n for i, d in enumerate(dets):\n if d.cls not in dic:\n FP[i] = 1\n continue\n iouMax, jMax = 0, 0\n # find the best matched ground truth\n for _, (g, j) in enumerate(dic[d.cls]):\n iou = IoU(d.box, g.box)\n\n# if g.bid == d.bid:\n# iouMax, jMax = iou, j\n# if matched[j]:\n# TP[matched[j] - 1] = 0\n# FP[matched[j] - 1] = 1\n# matched[j] = 0\n# break\n #if iou > iouMax:\n if iou > iouMax and not matched[j]:\n #elif iou > iouMax and not matched[j]:\n iouMax, jMax = iou, j \n\n ious.append(iouMax)\n if iouMax >= IoUThreshold and matched[jMax] == 0 and gts[jMax].bid == d.bid:\n TP[i], matched[jMax] = 1, i + 1\n else:\n FP[i] = 1\n acc_FP = np.cumsum(FP)\n acc_TP = np.cumsum(TP)\n rec = acc_TP / lg\n prec = np.divide(acc_TP, (acc_FP + acc_TP))\n ret = calculateAveragePrecision(rec, prec)\n return ret, ((sum(ious) / len(ious)) if ious else 0)\n #print(lg, acc_TP, acc_FP, rec, prec, ret)\n return ret\n\n\ndef calculateAveragePrecision(rec, prec):\n mrec = [0] + rec.tolist() + [1]\n mpre = [0] + prec.tolist() + [0]\n for i in range(len(mpre) - 1, 0, -1):\n mpre[i - 1] = max(mpre[i - 1], mpre[i])\n ap = 0\n for i in range(1, len(mrec)):\n ap += mpre[i] * (mrec[i] - mrec[i - 1])\n return ap\n\n\ndef IoU(boxA, boxB):\n # if boxes dont intersect\n if boxesIntersect(boxA, boxB) is False:\n return 0\n interArea = getIntersectionArea(boxA, boxB)\n union = getUnionAreas(boxA, boxB, interArea=interArea)\n # intersection over union\n iou = interArea / union\n assert iou >= 0\n return iou\n\n\ndef boxesIntersect(boxA, boxB):\n if boxA[0] > boxB[2]:\n return False # boxA is right of boxB\n if boxB[0] > boxA[2]:\n return False # boxA is left of boxB\n if boxA[3] < boxB[1]:\n return False # boxA is above boxB\n if boxA[1] > boxB[3]:\n return False # boxA is below boxB\n return True\n\n\ndef getIntersectionArea(boxA, boxB):\n xA = max(boxA[0], boxB[0])\n yA = max(boxA[1], boxB[1])\n xB = min(boxA[2], boxB[2])\n yB = min(boxA[3], boxB[3])\n # intersection area\n return (xB - xA + 1) * (yB - yA + 1)\n\n\ndef getUnionAreas(boxA, boxB, interArea=None):\n area_A = getArea(boxA)\n area_B = getArea(boxB)\n if interArea is None:\n interArea = Evaluator._getIntersectionArea(boxA, boxB)\n return float(area_A + area_B - interArea)\n\n\ndef getArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)\n\n\ndef createTracker(tracker_type):\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\n if int(major_ver) < 4 and int(minor_ver) < 3:\n tracker = cv2.Tracker_create(tracker_type)\n else:\n if tracker_type == 'BOOSTING':\n tracker = cv2.TrackerBoosting_create()\n if tracker_type == 'MIL':\n tracker = cv2.TrackerMIL_create()\n if tracker_type == 'KCF':\n tracker = cv2.TrackerKCF_create()\n if tracker_type == 'TLD':\n tracker = cv2.TrackerTLD_create()\n if tracker_type == 'MEDIANFLOW':\n tracker = cv2.TrackerMedianFlow_create()\n if tracker_type == 'GOTURN':\n tracker = cv2.TrackerGOTURN_create()\n if tracker_type == 'MOSSE':\n tracker = cv2.TrackerMOSSE_create()\n if tracker_type == \"CSRT\":\n tracker = cv2.TrackerCSRT_create()\n return tracker\n\n","sub_path":"bk_utils_iou.py","file_name":"bk_utils_iou.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"43364401","text":"from django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.shortcuts import redirect\nfrom django.views.generic import ListView, DetailView, TemplateView\n\nfrom recruitment.controller import EvaluationController\nfrom recruitment.models import Applicant, Application, ApplicantApplication, Evaluation, \\\n AnswerEvaluation, EvaluationQuestion, Interviewee\n\n\n# Create your views here.\n\n\nclass RecruitmentMainView(PermissionRequiredMixin, TemplateView):\n template_name = 'recruitment/main.html'\n permission_required = 'recruitment.view_applicationevaluation'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"모집\"\n context['application'] = Evaluation.get_application()\n context['application_count'] = Evaluation.get_application_count()\n evaluation_controller = EvaluationController(context['application'], self.request.user)\n evaluation_controller.process_evaluated_check()\n context['evaluations'] = evaluation_controller.get_application_evaluation_states()\n context['accepted'] = Interviewee.objects.filter(accepted=True)\n return context\n\n\nclass ApplicantListView(PermissionRequiredMixin, ListView):\n template_name = 'recruitment/applicant/list.html'\n model = Applicant\n permission_required = 'recruitment.view_applicant'\n context_object_name = 'applicants'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"모집 지원자 정보\"\n return context\n\n\nclass ApplicantDetailView(PermissionRequiredMixin, DetailView):\n template_name = 'recruitment/applicant/detail.html'\n model = Applicant\n permission_required = 'recruitment.view_applicant'\n context_object_name = 'applicant'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"%s님 지원 정보\" % self.get_object().name\n return context\n\n\nclass ApplicationListView(PermissionRequiredMixin, ListView):\n template_name = 'recruitment/application/list.html'\n model = Application\n permission_required = 'recruitment.view_application'\n context_object_name = 'applications'\n\n\nclass ApplicationDetailView(PermissionRequiredMixin, DetailView):\n template_name = 'recruitment/application/detail.html'\n model = Application\n permission_required = 'recruitment.view_application'\n context_object_name = 'application'\n\n\nclass EvaluationView(PermissionRequiredMixin, TemplateView):\n template_name = 'recruitment/evaluation/main.html'\n permission_required = 'recruitment'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n application = Evaluation.get_application()\n evaluation_controller = EvaluationController(application, self.request.user)\n evaluation_controller.process_evaluated_data()\n context['title'] = '지원서 평가'\n context['evaluation_controller'] = evaluation_controller\n return context\n\n\nclass EvaluationDetailView(PermissionRequiredMixin, DetailView):\n template_name = 'recruitment/evaluation/main.html'\n model = ApplicantApplication\n permission_required = ['recruitment.view_applicantapplication','recruitment.add_applicantapplication', 'recruitment.change_applicantapplication']\n context_object_name = 'applicant_application'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.get_object().pk\n application = Evaluation.get_application()\n evaluation_controller = EvaluationController(application, self.request.user)\n evaluation_controller.process_evaluated_data()\n application_evaluation, created = self.get_object().applicationevaluation_set.get_or_create(\n user=self.request.user, application=self.get_object(), evaluation=Evaluation.get_active_evaluation())\n evaluation_question, created = EvaluationQuestion.objects.get_or_create(\n application_evaluation=application_evaluation)\n context['title'] = '지원서 평가'\n context['pk'] = pk\n context['evaluation_controller'] = evaluation_controller\n context['evaluation_question'] = evaluation_question.question\n return context\n\n def post(self, *args, **kwargs):\n query_dict = self.request.POST\n application = Evaluation.get_application()\n evaluation_controller = EvaluationController(application, self.request.user)\n questions = evaluation_controller.get_questions()\n applicant_application = self.get_object()\n application_evaluation, created = applicant_application.applicationevaluation_set.get_or_create(user=self.request.user, application=applicant_application, evaluation=Evaluation.get_active_evaluation())\n for question in questions:\n score = query_dict.get(str(question.order))\n answer_evaluation, created = AnswerEvaluation.objects.get_or_create(answer=question.answer_set.get(applicant=applicant_application.get_applicant()), application_evaluation=application_evaluation)\n if score is '':\n score = 0\n answer_evaluation.score = score\n answer_evaluation.save()\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n context['msg'] = (True, '성공적으로 저장되었습니다.')\n return self.render_to_response(context)\n\n\nclass EvaluationQuestionView(PermissionRequiredMixin, DetailView):\n template_name = 'recruitment/evaluation/main.html'\n model = ApplicantApplication\n permission_required = ['recruitment.view_applicantapplication','recruitment.add_applicantapplication', 'recruitment.change_applicantapplication', 'recruitment.view_evaluationquestion', 'recruitment.add_evaluationquestion', 'recruitment.change_evaluationquestion']\n context_object_name = 'applicant_application'\n\n def post(self, *args, **kwargs):\n query_dict = self.request.POST\n applicant_application = self.get_object()\n application_evaluation, created = applicant_application.applicationevaluation_set.get_or_create(user=self.request.user, application=applicant_application, evaluation=Evaluation.get_active_evaluation())\n question = query_dict.get('question')\n evaluation_question, created = EvaluationQuestion.objects.get_or_create(application_evaluation=application_evaluation)\n evaluation_question.question = question\n evaluation_question.save()\n return redirect('recruitment-evaluation-detail', pk=self.get_object().pk)\n","sub_path":"recruitment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"530613792","text":"import requests\nimport json\nimport re\nimport pandas as pd\nimport time\nimport argparse\nimport math\nfrom tqdm import tqdm\nimport datetime\n\nfrom google_drive_utils import upload_df_to_gd\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--github_username', required=True, type=str, help='Username for GitHub')\nparser.add_argument('--access_token', required=True, type=str, help='Personal Access Token')\nargs = parser.parse_args()\n\ndef get_json_data_from_url(url):\n try:\n r = requests.get(url, auth=(args.github_username, args.access_token))\n except Exception as err:\n connection_error_timeout_seconds = 300\n print(f\"Timing out for {connection_error_timeout_seconds}s because error thrown when requesting data from <<< {url} >>>\\n{err}\\n\\n\")\n time.sleep(connection_error_timeout_seconds)\n return None\n\n # Sleep and return None if URL is not working. Sleep in case non-200 is due to rate limiting.\n if r.status_code != 200:\n # Time out more if we get a 403 (telling us we are making too many calls)\n timeout_time_seconds = 10 if r.status_code == 403 or r.status_code == 433 else 0.1\n print(f\"Timing out for {timeout_time_seconds} seconds after getting a {r.status_code} status code from {url}\")\n time.sleep(timeout_time_seconds)\n return None\n\n data = json.loads(r.content)\n return data\n\ndef get_earliest_dup_date():\n # Get all issues, sorted by date, ascending\n earliest_duplicates = get_json_data_from_url(\"https://api.github.com/search/issues?q=label:duplicate&per_page=100&page=10&sort=created&order=asc\")\n\n if earliest_duplicates is None:\n timeout_time_seconds = 120\n print(f\"Retrying call to get earliest date in {timeout_time_seconds} seconds\")\n time.sleep(timeout_time_seconds)\n return get_earliest_dup_date()\n\n # Take the bottom result in the list (I.e. the 1000th earliest over all issues) and get its creation date\n # We do not take the very earliest, as the earliest was from 2000, and the 100th earliest is from 2003. The 1000th is from 2008, and the next ~270,000 are from the following 12 years.\n # There is a lot of sparsity between days, meaning that we start to hit a rate limit quickly if we do not take a slightly later date.\n # We therefore do not mind sacraficing the first 1000 entries so that we do not have to cycle through 8 years of dates with queries\n earliest_date_duplicate_string = earliest_duplicates[\"items\"][99][\"created_at\"]\n\n # Only get the date of the creation date (I.e. not time)\n earliest_date_duplicate_string = earliest_date_duplicate_string.split(\"T\")[0]\n\n # Convert to datetime\n earliest_date_duplicate = datetime.datetime.strptime(earliest_date_duplicate_string, \"%Y-%m-%d\")\n\n return earliest_date_duplicate\n\ndef get_date_iteration_max():\n # Get earliest date of duplicate issue\n earliest_date = get_earliest_dup_date()\n\n # Find the time between now and the earliest date\n date_delta = datetime.datetime.now() - earliest_date\n\n # Get the number of days from this difference in time\n return date_delta.days\n\ndef iterate_date(date):\n return date + datetime.timedelta(days=1)\n\nsearch_date = get_earliest_dup_date()\n\ndaily_iteration_bar = tqdm(range(get_date_iteration_max()))\n\nfor _ in daily_iteration_bar:\n search_date_string = search_date.strftime(\"%Y-%m-%d\")\n\n daily_iteration_bar.set_description(f\"Searching for issues on date {search_date_string}\")\n\n issues = get_json_data_from_url(f\"https://api.github.com/search/issues?q=label:duplicate+created:{search_date_string}&per_page=100&page=1&sort=created&order=asc\")\n\n search_date = iterate_date(search_date)\n\n if issues is None:\n continue\n\n number_pages = math.ceil(issues[\"total_count\"] / 100)\n\n # GitHub API Only shows the first 1000 results, meaning that we cannot get any issue data past page 10\n number_pages = min([10, number_pages])\n\n page_bar = tqdm(range(1, number_pages+1), position=1, leave=True)\n\n issue_data_list = []\n\n for page in page_bar:\n page_bar.set_description(f\"Page number {page}\")\n\n # Get duplicate issues\n issues = get_json_data_from_url(f\"https://api.github.com/search/issues?q=label:duplicate+created:{search_date_string}&per_page=100&page={page}&sort=created&order=asc\")\n\n # Finds all mentions of a hash followed by numbers (E.g. #1234)\n issue_finder_regex = re.compile(\"#\\d+\")\n\n # Removes all code between code blocks (In order to reduce size of comments and only retain more human readable bits)\n code_cleaner_regex = re.compile(\"```([\\S\\s]+)```\")\n\n if issues is None:\n continue\n\n issue_bar = tqdm(issues[\"items\"], position=2, leave=True)\n\n for issue in issue_bar:\n try:\n url = issue[\"url\"]\n issue_bar.set_description(f\"Scraping issue {url}\")\n\n issue_title = issue[\"title\"]\n issue_body_raw = issue[\"body\"]\n issue_body = code_cleaner_regex.sub(\"[CODE]\", issue_body_raw) if issue_body_raw is not None else issue_body_raw\n issue_labels = [x[\"name\"] for x in issue[\"labels\"]]\n issue_number = url.split(\"/\")[-1]\n\n # Get comments\n comment_data = get_json_data_from_url(issue[\"comments_url\"])\n\n if comment_data is None:\n continue\n\n dup_issues = issue_finder_regex.findall(\"\".join([x[\"body\"] for x in comment_data]))\n\n # Make sure that we don't simply capture a reference to the current issue or 0\n dup_issues = [x for x in dup_issues if x != f\"#{issue_number}\" and x != \"#0\"]\n\n if len(dup_issues) <= 0:\n continue\n\n first_dup_issue = dup_issues[0]\n duplicate_issue_url = \"/\".join(url.split(\"/\")[:-1]) + dup_issues[0].replace(\"#\", \"/\")\n\n duplicate_data = get_json_data_from_url(duplicate_issue_url)\n\n if duplicate_data is None:\n continue\n\n duplicate_body_raw = duplicate_data[\"body\"]\n duplicate_body = code_cleaner_regex.sub(\"[CODE]\", duplicate_body_raw) if duplicate_body_raw is not None else duplicate_body_raw\n duplicate_title = duplicate_data[\"title\"]\n duplicate_labels = [x[\"name\"] for x in duplicate_data[\"labels\"]]\n\n issue_data_list.append({\n \"url\": url,\n \"issue_title\": issue_title,\n \"issue_body\": issue_body,\n \"issue_body_raw\": issue_body_raw,\n \"issue_labels\": issue_labels,\n \"dup_issues\": dup_issues,\n \"first_dup_issue_url\": duplicate_issue_url,\n \"duplicate_body\": duplicate_body,\n \"duplicate_body_raw\": duplicate_body_raw,\n \"duplicate_title\": duplicate_title,\n \"duplicate_labels\": duplicate_labels\n })\n except Exception as e:\n current_url = issue[\"url\"]\n print(f\"Error when processing/scraping {current_url}:\\n{e}\\n\\n\")\n\n if len(issue_data_list) > 0:\n file_date_string = search_date_string.replace(\"-\", \"_\")\n upload_df_to_gd(f\"github_issues_{file_date_string}.csv\", pd.DataFrame(issue_data_list), \"1lbS874mV9ImWe8PDZNucOds8hX0yjFWe\")\n","sub_path":"issue_scraper.py","file_name":"issue_scraper.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"17957232","text":"def main():\n import discord\n import os\n import json\n from discord.ext.tasks import loop\n from discord.ext import commands\n from info import TOKEN\n\n intents = discord.Intents().all()\n\n def get_prefix(client, message): # pulls prefix from prefixes.json\n if message.guild is None:\n return 'py!'\n else:\n with open('server-info.json', 'r') as f:\n prefixes = json.load(f)\n return prefixes[f'{str(message.guild.id)}_prefix']\n\n client = commands.Bot(command_prefix=get_prefix, intents=intents)\n\n class NewHelpName(commands.MinimalHelpCommand): # Better help command\n async def send_pages(self):\n destination = self.get_destination()\n for page in self.paginator.pages:\n emby = discord.Embed(description=page)\n await destination.send(embed=emby)\n\n client.help_command = NewHelpName(no_category='Other')\n\n @client.event\n async def on_ready():\n activity_loop.start()\n print(f'Logged in as {client.user}')\n\n @loop(seconds=5)\n async def activity_loop(): # resets the discord status every 5 seconds, in case something modifies it\n await client.change_presence(status=discord.Status.dnd, activity=discord.Game('py!help'))\n\n @client.event\n async def on_guild_join(guild):\n with open('server-info.json', 'r') as f:\n file = json.load(f)\n\n file[f'{str(guild.id)}_muterole'] = 'muted'\n file[f'{str(guild.id)}_prefix'] = 'py!'\n\n with open('server-info.json', 'w') as f:\n json.dump(file, f, indent=4)\n\n @client.event\n async def on_guild_remove(guild):\n with open('server-info.json', 'r') as f:\n file = json.load(f)\n\n file.pop(f'{str(guild.id)}_prefix')\n file.pop(f'{str(guild.id)}_muterole')\n\n with open('server-info.json', 'w') as f:\n json.dump(file, f, indent=4)\n\n @client.command(help='Change the prefix.')\n @commands.has_permissions(administrator=True)\n async def prefix(ctx, prefix_):\n with open('server-info.json', 'r') as f:\n prefixes = json.load(f)\n prefixes[f'{str(ctx.guild.id)}_prefix'] = prefix_\n with open('server-info.json', 'w') as f:\n json.dump(prefixes, f, indent=4)\n\n await ctx.send(f'prefix changed to {prefix_}.')\n\n @client.event\n async def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n await ctx.send('Invalid command.')\n if isinstance(error, commands.MissingPermissions):\n await ctx.send('You do not have permission to use this command.')\n if isinstance(error, commands.MissingAnyRole):\n await ctx.send('You do not have the role for this command.')\n\n @client.event\n async def on_member_join(member):\n guild_name = str(member.guild.name)\n print(f'{member} has joined {guild_name}.')\n\n @client.event\n async def on_member_remove(member):\n guild_name = str(member.guild.name)\n print(f'{member} has left {guild_name}.')\n\n @client.command(help='Load a cog.')\n @commands.has_permissions(administrator=True)\n async def load(extension):\n client.load_extension(f'cogs.{extension}')\n\n @client.command(help='Unload a cog.')\n @commands.has_permissions(administrator=True)\n async def unload(extension):\n client.unload_extension(f'cogs.{extension}')\n\n for filename in os.listdir('./cogs'): # loads all cogs on runtime\n if filename.endswith('.py'):\n client.load_extension(f'cogs.{filename[:-3]}')\n\n @client.command(help='Reload a cog.')\n @commands.has_permissions(administrator=True)\n async def reload(extension):\n client.unload_extension(f'cogs.{extension}')\n client.load_extension(f'cogs.{extension}')\n\n client.run(TOKEN)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"296155837","text":"# comms.py to run the communcations of powercomms.py\n\nimport paho.mqtt.client as mqtt\n\ndef on_connect(client, userdata, flags, rc):\n print (\"Connected with result code \" +str(rc))\n # Subscribing in on_connect() means:\n # If we lose the connection and reconnect, subscriptions will be renewed.\n print(flags)\n client.publish(\"rico/pub/button1\", \"Rico's publish!\", qos=0, retain=True)\n client.subscribe(\"rico/pub/button1\", qos=0)\n\ndef on_message(client, userdata, msg):\n print(msg.topic + \" \" + str(msg.payload))\n\nprint(\"comms on here?\")\nclient = mqtt.Client()\n# The following lines define the on_connect and on_message callbacks\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"iot.eclipse.org\", 1883, 60)\n\n#client.loop_forever()\n","sub_path":"comms.py","file_name":"comms.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"616291339","text":"from tkinter import ttk, Label, StringVar, Entry, OptionMenu\nfrom tkinter import BOTH, W, EW\nfrom editor.character.alignment_info import ALIGNMENTS\nfrom editor.character.kingdom_info import KingdomInfo\nfrom editor.character.party_info import PartyInfo\nfrom editor.widgets.defaults import DEFAULT_BACKGROUND\nfrom editor.widgets.name_frame import NameFrame\n\n\nclass Tabs():\n def __init__(self, parent):\n self._parent = parent\n self._party = None\n self._name_panel = NameFrame(parent)\n self._notebook = ttk.Notebook(parent, style='Default.TNotebook')\n self._player_tab = PlayerInfoTab(self._notebook)\n self._skill_tab = SkillInfoTab(self._notebook)\n self._kingdom_tab = KingdomInfoTab(self._notebook)\n self._notebook.pack(expand=1, fill=BOTH)\n\n def load_info(self, path):\n self._party = PartyInfo(self._parent.temp_path)\n self._name_panel.load_info(self._party.main_character)\n self._player_tab.load_info(self._party)\n self._skill_tab.load_info(self._party)\n self._kingdom_tab.load_info(self._party)\n\n def update_info(self, path):\n self._player_tab.update_info(self._party)\n self._skill_tab.update_info(self._party)\n self._kingdom_tab.update_info(self._party)\n self._party.save()\n\n\nclass Tab():\n # pylint: disable=too-few-public-methods\n def __init__(self, notebook):\n self._panel = ttk.Frame(notebook, style='Default.TFrame')\n self._party = None\n\n def _add_large_label(self, a_row, colspan, label_text):\n label = Label(self._panel, text=label_text, borderwidth=1, fg='red')\n label.configure(background=DEFAULT_BACKGROUND)\n label.grid(row=a_row, columnspan=colspan, sticky=W)\n\n def _add_field(self, a_row, a_col, label_text):\n col = a_col*2\n self._add_label(a_row, col, label_text)\n variable = StringVar()\n entry = Entry(self._panel, textvariable=variable)\n entry.grid(row=a_row, column=col+1, sticky=W)\n return variable\n\n def _add_dropdown(self, a_row, a_col, label_text, choices):\n col = a_col*2\n self._add_label(a_row, col, label_text)\n variable = StringVar()\n entry = OptionMenu(self._panel, variable, *choices)\n entry.grid(row=a_row, column=col+1, sticky=EW)\n return variable\n\n def _add_label(self, a_row, a_col, label_text):\n label = Label(self._panel, text=label_text, borderwidth=1)\n label.configure(background=DEFAULT_BACKGROUND)\n label.grid(row=a_row, column=a_col, sticky=W)\n return label\n\n\nclass PlayerInfoTab(Tab):\n # pylint: disable=too-many-instance-attributes\n def __init__(self, notebook):\n super(PlayerInfoTab, self).__init__(notebook)\n notebook.add(self._panel, text=\"Player\")\n self._money = self._add_field(0, 0, 'Money:')\n self._experience = self._add_field(0, 1, 'Experience:')\n self._alignment = self._add_dropdown(1, 0, 'Alignment:',\n ALIGNMENTS.keys())\n self._strength = self._add_field(2, 0, 'Strength:')\n self._dexterity = self._add_field(2, 1, 'Dexterity:')\n self._constitution = self._add_field(3, 0, 'Constitution:')\n self._intelligence = self._add_field(3, 1, 'Intelligence:')\n self._wisdom = self._add_field(4, 0, 'Wisdom:')\n self._charisma = self._add_field(4, 1, 'Charisma:')\n self._panel.config()\n\n def load_info(self, party):\n self._money.set(party.money())\n character = party.main_character\n self._experience.set(character.experience())\n self._alignment.set(character.alignment.alignment())\n self._strength.set(character.stats.strength())\n self._dexterity.set(character.stats.dexterity())\n self._constitution.set(character.stats.constitution())\n self._intelligence.set(character.stats.intelligence())\n self._wisdom.set(character.stats.wisdom())\n self._charisma.set(character.stats.charisma())\n\n def update_info(self, party):\n character = party.main_character\n party.update_money(self._money.get())\n character.update_experience(self._experience.get())\n character.alignment.update_alignment(self._alignment.get())\n character.stats.update_strength(self._strength.get())\n character.stats.update_dexterity(self._dexterity.get())\n character.stats.update_constitution(self._constitution.get())\n character.stats.update_intelligence(self._intelligence.get())\n character.stats.update_wisdom(self._wisdom.get())\n character.stats.update_charisma(self._charisma.get())\n\n\nclass SkillInfoTab(Tab):\n # pylint: disable=too-many-instance-attributes\n def __init__(self, notebook):\n super(SkillInfoTab, self).__init__(notebook)\n notebook.add(self._panel, text=\"Skills\")\n self._athletics_field = self._add_field(0, 0, 'Athletics:')\n self._mobility_field = self._add_field(0, 1, 'Mobility:')\n self._arcana_field = self._add_field(1, 0, 'Knowledge Arcana:')\n self._knowledge_world_field = self._add_field(1, 1, 'Knowledge World:')\n self._lore_nature_field = self._add_field(2, 0, 'Lore Nature:')\n self._lore_religion_field = self._add_field(2, 1, 'Lore Religion:')\n self._perception_field = self._add_field(3, 0, 'Perception:')\n self._persuasion_field = self._add_field(3, 1, 'Persuasion:')\n self._stealth_field = self._add_field(4, 0, 'Stealth:')\n self._theivery_field = self._add_field(4, 1, 'Theivery:')\n self._use_magic_device_field = self._add_field(5, 0,\n 'Use Magic Device:')\n\n def load_info(self, party):\n character = party.main_character\n self._athletics_field.set(character.skills.athletics())\n self._arcana_field.set(character.skills.knowledge_arcana())\n self._knowledge_world_field .set(character.skills.knowledge_world())\n self._lore_nature_field.set(character.skills.lore_nature())\n self._lore_religion_field.set(character.skills.lore_religion())\n self._mobility_field.set(character.skills.mobility())\n self._perception_field.set(character.skills.perception())\n self._persuasion_field.set(character.skills.persuasion())\n self._stealth_field.set(character.skills.stealth())\n self._theivery_field.set(character.skills.theivery())\n self._use_magic_device_field.set(character.skills.use_magic_device())\n\n def update_info(self, party):\n skills = party.main_character.skills\n skills.update_athletics(self._athletics_field.get())\n skills.update_knowledge_arcana(self._arcana_field.get())\n skills.update_knowledge_world(self._knowledge_world_field.get())\n skills.update_mobility(self._mobility_field.get())\n skills.update_lore_nature(self._lore_nature_field.get())\n skills.update_lore_religion(self._lore_religion_field.get())\n skills.update_perception(self._perception_field.get())\n skills.update_persuasion(self._persuasion_field.get())\n skills.update_stealth(self._stealth_field.get())\n skills.update_theivery(self._theivery_field.get())\n skills.update_use_magic_device(self._use_magic_device_field.get())\n\n\nclass KingdomInfoTab(Tab):\n # pylint: disable=too-many-instance-attributes\n def __init__(self, notebook):\n super(KingdomInfoTab, self).__init__(notebook)\n notebook.add(self._panel, text=\"Kingdom\")\n self._kingdom_name_field = self._add_field(0, 0, 'Kingdom Name:')\n self._build_points_field = self._add_field(0, 1, 'Build Points:')\n self._community_field = self._add_field(1, 0, 'Community:')\n self._loyalty_field = self._add_field(1, 1, 'Loyalty:')\n self._military_field = self._add_field(2, 0, 'Military:')\n self._economy_field = self._add_field(2, 1, 'Economy:')\n self._relations_field = self._add_field(3, 0, 'Relations:')\n self._divine_field = self._add_field(3, 1, 'Divine')\n self._arcane_field = self._add_field(4, 0, 'Arcane')\n self._stability_field = self._add_field(4, 1, 'Stability:')\n self._culture_field = self._add_field(5, 0, 'Culture:')\n self._espionage_field = self._add_field(5, 1, 'Espionage:')\n\n def load_info(self, party):\n kingdom_info = party.kingdom\n if kingdom_info.has_kingdom_data():\n self._kingdom_name_field.set(kingdom_info.kingdom_name())\n self._build_points_field.set(kingdom_info.build_points())\n self._community_field.set(kingdom_info.community())\n self._loyalty_field.set(kingdom_info.loyalty())\n self._military_field.set(kingdom_info.military())\n self._economy_field.set(kingdom_info.economy())\n self._relations_field.set(kingdom_info.relations())\n self._divine_field.set(kingdom_info.divine())\n self._arcane_field.set(kingdom_info.arcane())\n self._stability_field.set(kingdom_info.stability())\n self._culture_field.set(kingdom_info.culture())\n self._espionage_field.set(kingdom_info.espionage())\n\n def update_info(self, party):\n kingdom_info = party.kingdom\n if kingdom_info.has_kingdom_data():\n kingdom_info.update_kingdom_name(self._kingdom_name_field.get())\n kingdom_info.update_build_points(self._build_points_field.get())\n kingdom_info.update_community(self._community_field.get())\n kingdom_info.update_loyalty(self._loyalty_field.get())\n kingdom_info.update_military(self._military_field.get())\n kingdom_info.update_economy(self._economy_field.get())\n kingdom_info.update_relations(self._relations_field.get())\n kingdom_info.update_divine(self._divine_field.get())\n kingdom_info.update_arcane(self._arcane_field.get())\n kingdom_info.update_stability(self._stability_field.get())\n kingdom_info.update_culture(self._culture_field.get())\n kingdom_info.update_espionage(self._espionage_field.get())\n","sub_path":"src/editor/widgets/tabs.py","file_name":"tabs.py","file_ext":"py","file_size_in_byte":10164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280850456","text":"#!/usr/bin/python3\n\"\"\" Reviews template \"\"\"\nfrom flask import request, jsonify, abort\nfrom api.v1.views import app_views\nfrom models import storage, place, amenity\nfrom os import environ\n\n\n@app_views.route('places//amenities', methods=['GET'],\n strict_slashes=False)\ndef get_amenities_by_place(place_id):\n \"\"\" Return all amenities linked to a place \"\"\"\n\n s_place = storage.get(place.Place, place_id)\n if s_place is None:\n abort(404)\n\n if (environ.get('HBNB_TYPE_STORAGE') == 'db'):\n amenities = [a.to_dict() for a in s_place.amenities]\n else:\n amenities = [storage.get(amenity.Amenity, a).to_dict()\n for a in s_place.amenity_ids]\n return (jsonify(amenities), 200)\n\n\n@app_views.route('/places//amenities/',\n methods=['DELETE'], strict_slashes=False)\ndef delete_amenity_place(place_id, amenity_id):\n \"\"\" Delete an amenity with the given ID and\n ID of a Place Object\"\"\"\n\n s_place = storage.get(place.Place, place_id)\n if not s_place:\n abort(404)\n\n s_amenity = storage.get(amenity.Amenity, amenity_id)\n if not s_amenity:\n abort(404)\n\n if (environ.get('HBNB_TYPE_STORAGE') == 'db'):\n if s_amenity not in s_place.amenities:\n abort(404)\n s_place.amenities.remove(s_amenity)\n else:\n if amenity_id not in s_place.amenity_ids:\n abort(404)\n s_place.amenity_ids.remove(amenity_id)\n storage.save()\n return (jsonify({}), 200)\n\n\n@app_views.route(\"places//amenities/\",\n methods=['POST'], strict_slashes=False)\ndef post_amenity_place(place_id, amenity_id):\n \"\"\" Add a new amenity based in place_id \"\"\"\n\n s_place = storage.get(place.Place, place_id)\n s_amenity = storage.get(amenity.Amenity, amenity_id)\n\n if not s_place:\n abort(404)\n\n if not s_amenity:\n abort(404)\n\n if (environ.get('HBNB_TYPE_STORAGE') == 'db'):\n if s_amenity in s_place.amenities:\n return (jsonify(s_amenity.to_dict()), 200)\n else:\n place.amenities.append(s_amenity)\n else:\n if amenity_id in s_place.amenity_ids:\n return (jsonify(s_amenity.to_dict()), 200)\n else:\n place.amenity_ids.append(amenity_id)\n\n storage.save()\n return (jsonify(s_amenity.to_dict()), 200)\n","sub_path":"api/v1/views/places_amenities.py","file_name":"places_amenities.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305972916","text":"def delta(xs):\n\treturn sum([abs(xs[i - 1] - xs[i]) for i in range(len(xs))])\n\n\nif __name__ == '__main__':\n\n import tour2 as tour\n\n import matplotlib.pyplot as plt\n\n xs = [complex(a, b) for a, b in tour.tour]\n ds = delta(xs)\n\n xs, ys = zip(*tour.tour + [tour.tour[0]])\n\n plt.title('ds : {}'.format(ds))\n plt.plot(xs, ys, 'ro')\n plt.plot(xs, ys, 'k-')\n plt.savefig('tsp2.png')\n","sub_path":"deidos_tsp_data/plot2.py","file_name":"plot2.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"535632441","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# updates - Semplice update preferences\n# Copyright (C) 2015 Eugenio \"g7\" Paolantonio\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# Authors:\n# Eugenio \"g7\" Paolantonio \n#\n\nimport os\nfrom distutils.core import setup, Command\nfrom distutils.command.build import build\nfrom distutils.command.install import install\n\nimport subprocess\nimport shutil\n\nl10n_path = \"./po\"\n\nAPP_NAME = \"vera-control-center-module-updates\"\n\n\nclass CreatePotTemplate(Command):\n\t\"\"\"\n\tCreates a .pot template.\n\t\"\"\"\n\t\n\tdescription = \"creates a .pot localization template from the program sources.\"\n\tuser_options = []\n\t\n\tdef initialize_options(self):\n\t\t\"\"\"\n\t\tInitialize options\n\t\t\"\"\"\n\t\t\n\t\tself.cwd = None\n\t\n\tdef finalize_options(self):\n\t\t\"\"\"\n\t\tFinalize options\n\t\t\"\"\"\n\t\t\n\t\tself.cwd = os.getcwd()\n\t\n\tdef run(self):\n\t\t\"\"\"\n\t\tDoes things.\n\t\t\"\"\"\n\t\t\n\t\tassert os.getcwd() == self.cwd, \"You must be in the package root: %s\" % self.cwd\n\t\t\n\t\toutput_file = os.path.join(self.cwd, l10n_path, APP_NAME, \"%s.pot\" % APP_NAME)\n\t\t\n\t\t# Blank-out the output file\n\t\twith open(output_file, \"w\") as f:\n\t\t\tf.write(\"\")\n\t\t\n\t\tpy_files = []\n\t\tglade_files = []\n\t\tdesktop_files = []\n\t\t\n\t\tfor directory, dirnames, filenames in os.walk(\".\"):\n\t\t\tfor file_ in filenames:\n\t\t\t\tif file_.endswith(\".py\"):\n\t\t\t\t\tpy_files.append(os.path.join(directory, file_))\n\t\t\t\telif file_.endswith(\".glade\"):\n\t\t\t\t\tglade_files.append(os.path.join(directory, file_))\n\t\t\t\telif file_.endswith(\".desktop\"):\n\t\t\t\t\tdesktop_files.append(os.path.join(directory, file_))\n\t\t\t\t\t\n\t\tsubprocess.call([\n\t\t\t\"xgettext\",\n\t\t\t\"--language=Python\",\n\t\t\t\"--from-code=utf-8\",\n\t\t\t\"--keyword=_\",\n\t\t\t\"--output=%s\" % output_file,\n\t\t] + py_files)\n\n\t\tsubprocess.call([\n\t\t\t\"xgettext\",\n\t\t\t\"--language=Desktop\",\n\t\t\t\"--from-code=utf-8\",\n\t\t\t\"-j\",\n\t\t\t\"--output=%s\" % os.path.join(self.cwd, l10n_path, APP_NAME, \"%s.pot\" % APP_NAME)\n\t\t] + desktop_files)\n\t\t\n\t\tfor file_ in glade_files:\n\t\t\tsubprocess.call([\n\t\t\t\t\"intltool-extract\",\n\t\t\t\t\"--type=gettext/glade\",\n\t\t\t\tfile_\n\t\t\t])\n\t\t\tsubprocess.call([\n\t\t\t\t\"xgettext\",\n\t\t\t\t\"--from-code=utf-8\",\n\t\t\t\t\"--language=C\",\n\t\t\t\t\"--keyword=N_\",\n\t\t\t\t\"-j\",\n\t\t\t\t\"--output=%s\" % output_file,\n\t\t\t\t\"-j\",\n\t\t\t\tfile_ + \".h\"\n\t\t\t])\n\t\t\tos.remove(file_ + \".h\")\n\nclass CustomBuild(build):\n\t\"\"\"\n\tHooks.\n\t\"\"\"\n\t\n\tdef run(self):\n\t\t\"\"\"\n\t\tRuns the installation.\n\t\t\"\"\"\n\t\t\n\t\tsuper().run()\n\t\t\n\t\t# Build mos\n\t\tfor directory, dirnames, filenames in os.walk(l10n_path):\n\t\t\tfor file_ in filenames:\n\t\t\t\tif file_.endswith(\".po\"):\n\t\t\t\t\tsource = os.path.join(directory, file_)\n\t\t\t\t\ttarget_dir = os.path.join(\"./build\", directory)\n\t\t\t\t\ttarget = os.path.join(target_dir, file_.replace(\".po\",\".mo\"))\n\t\t\t\t\t\n\t\t\t\t\tif not os.path.exists(target_dir):\n\t\t\t\t\t\tos.makedirs(target_dir)\n\t\t\t\t\t\n\t\t\t\t\tprint(\"Compiling translation %s\" % file_)\n\t\t\t\t\tsubprocess.call([\"msgfmt\", \"--output-file=%s\" % target, source])\n\nclass CustomInstall(install):\n\t\"\"\"\n\tHooks.\n\t\"\"\"\n\t\n\tdef run(self):\n\t\t\"\"\"\n\t\tRuns the installation.\n\t\t\"\"\"\n\t\t\n\t\tsuper().run()\n\t\t\n\t\t# Install mos\n\t\tfor directory, dirnames, filenames in os.walk(os.path.join(\"./build\", l10n_path)):\n\t\t\tfor file_ in filenames:\n\t\t\t\tif file_.endswith(\".mo\"):\n\t\t\t\t\tsource = os.path.join(directory, file_)\n\t\t\t\t\ttarget_dir = os.path.join(\n\t\t\t\t\t\tself.root if self.root else \"/\",\n\t\t\t\t\t\t\"usr/share/locale\",\n\t\t\t\t\t\tfile_.replace(\".mo\",\"\"),\n\t\t\t\t\t\t\"LC_MESSAGES\"\n\t\t\t\t\t)\n\t\t\t\t\ttarget = os.path.join(target_dir, os.path.basename(directory) + \".mo\")\n\t\t\t\t\t\n\t\t\t\t\tif not os.path.exists(target_dir):\n\t\t\t\t\t\tos.makedirs(target_dir)\n\t\t\t\t\t\n\t\t\t\t\tshutil.copyfile(source, target)\n\t\t\t\t\tos.chmod(target, 644)\n\nsetup(\n\tcmdclass={\n\t\t\"pot\": CreatePotTemplate,\n\t\t\"build\": CustomBuild,\n\t\t\"install\": CustomInstall\n\t},\n\tname=APP_NAME,\n\tversion='0.70.4',\n\tdescription='Semplice update preferences',\n\tauthor='Eugenio Paolantonio',\n\tauthor_email='me@medesimo.eu',\n\turl='https://github.com/semplice/vera-control-center-module-updates',\n\tpackages=[\n\t\t'modules',\n\t\t'modules.updates',\n\t\t'modules.updates.core',\n\t\t'modules.updates.widgets',\n\t],\n\tdata_files=[\n\t\t(\n\t\t\t\"/usr/share/vera-control-center/modules/updates\",\n\t\t\t[\n\t\t\t\t\"modules/updates/updates.desktop\",\n\t\t\t\t\"modules/updates/updates.glade\"\n\t\t\t]\n\t\t)\n\t],\n\trequires=[\n\t\t'gi.repository.Gio',\n\t\t'gi.repository.GObject',\n\t\t'gi.repository.Gtk',\n\t\t# FIXME pending AppStream API update. See #4\n\t\t#'gi.repository.AppStream',\n\t\t'gi.repository.Pango',\n\t\t'gi.repository.PangoCairo',\n\t\t'cairo',\n\t\t'quickstart',\n\t]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228259538","text":"from woocommerce import API\nfrom chair.product_info import PRODUCT_INFO\nfrom chair.models import Order, Customer\nfrom chair.order_processing.validate import custom_validate\nfrom scraper.settings import WC_KEY, WC_SECRET, WC_KEY_MG, WC_SECRET_MG\n\nWC_PULSELABZ_SOURCE = 'woocommerce_pulselabz'\nWC_MOTIONGREY_SOURCE = 'woocommerce_motiongrey'\n\nwc_pulselabz = API(\n url=\"https://pulselabz.com\",\n consumer_key=WC_KEY,\n consumer_secret=WC_SECRET,\n wp_api=True,\n version=\"wc/v1\"\n)\n\nwc_motiongrey = API(\n url=\"https://motiongrey.com/\",\n consumer_key=WC_KEY_MG,\n consumer_secret=WC_SECRET_MG,\n wp_api=True,\n version=\"wc/v1\"\n)\n\ndef grab_orders_woocommerce(date):\n pulselabz_orders = wc_pulselabz.get(f'orders?per_page=100&after={date}').json()\n motiongrey_orders = wc_motiongrey.get(f'orders?per_page=100&after={date}').json()\n\n for order in pulselabz_orders:\n load_order_wc(order, WC_PULSELABZ_SOURCE)\n\n for order in motiongrey_orders:\n load_order_wc(order, WC_MOTIONGREY_SOURCE)\n\n\n# fill in information needed for an order\n# validate the order\ndef load_order_wc(order_info, wc_source):\n try:\n customer_id = order_info['_links'].get('customer')[0]['href'].split('/customers/')[1]\n except:\n customer_id = order_info.get('id')\n customer = update_customer_info_wc(customer_id, order_info.get('billing'), order_info.get('shipping'))\n for i in range(len(order_info.get('line_items'))):\n item = order_info.get('line_items')[i]\n product_name = item.get('name')\n order, created = Order.objects.get_or_create(\n order_id=order_info.get('number'), product_name=product_name)\n order.customer_id = customer\n if order_info.get('status') == 'processing':\n order.status = \"SHIPPING\"\n elif order_info.get('status') == 'completed':\n order.status = 'RECEIVED'\n else:\n order.status = order_info.get('status')\n order.part_number = item.get('sku')\n order.quantity = item.get('quantity')\n order.received = order_info.get('date_created')\n order.shipping_type = order_info.get('shipping_lines')[0].get('method_title')\n order.total_price = order_info.get('total')\n order.payment_method = order_info.get('payment_method')\n order.source = wc_source\n try:\n order.part_number = PRODUCT_INFO.get(product_name)[1]\n except:\n pass\n order.save()\n\n custom_validate(order, customer)\n\n\ndef update_customer_info_wc(customer_id, billing_info, customer_info):\n customer, created = Customer.objects.get_or_create(\n customer_id=customer_id)\n customer.firstname = customer_info.get('first_name')\n customer.lastname = customer_info.get('last_name')\n try:\n customer.country = customer_info.get('country')\n customer.city = customer_info.get('city')\n customer.phone = billing_info.get('phone')\n customer.state = customer_info.get('state')\n customer.street = customer_info.get('address_1')\n customer.email = billing_info.get('email')\n customer.company = billing_info.get('company')\n if customer_info.get('address_2'):\n customer.street = '{} - {}'.format(customer_info.get('address_2'), customer_info.get('address_1'))\n customer.zip = customer_info.get('postcode')\n except AttributeError:\n print('could not parse customer {}, {}'.format(\n customer.customer_id, customer_info))\n customer.save()\n return customer\n\n\ndef send_tracking_woocommerce(order): \n tracking_data = {'tracking_provider': \"UPS\", # Only uses UPS\n 'tracking_number': order.tracking_id}\n\n complete_data = {\n \"status\": \"completed\"\n }\n\n if 'pulselabz' in order.source and not order.wc_filled:\n response = wc_pulselabz.post(f'orders/{order.order_id}/shipment-trackings', tracking_data)\n complete_response = wc_pulselabz.put(f'orders/{order.order_id}', complete_data)\n elif 'motiongrey' in order.source and not order.wc_filled:\n response = wc_motiongrey.post(f'orders/{order.order_id}/shipment-trackings', tracking_data)\n complete_response = wc_motiongrey.put(f'orders/{order.order_id}', complete_data)\n else: \n return -1\n\n if response.status_code in [200, 201]:\n return 1\n return -1\n","sub_path":"chair/order_processing/woocommerce.py","file_name":"woocommerce.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19944917","text":"import sys\nimport os\nimport logging\nimport argparse\nimport json\n\ntry:\n import matplotlib\n matplotlib.use('agg')\nexcept:\n pass\nimport matplotlib.pyplot as plt\n\nfrom chainer import functions as F\nfrom chainer import links as L\nfrom tqdm import tqdm\nfrom chainer import serializers\nimport numpy as np\nfrom rdkit import RDLogger, Chem\n\nfrom chainer_chemistry.dataset.converters import concat_mols\nfrom chainer_chemistry.datasets import NumpyTupleDataset\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\nfrom models import predictor\nfrom saliency.calculator.gradient_calculator import GradientCalculator\nfrom saliency.calculator.integrated_gradients_calculator import IntegratedGradientsCalculator\nfrom saliency.calculator.occlusion_calculator import OcclusionCalculator\n\nimport data\nfrom data import PYRIDINE_SMILES, hassubst\n\n\ndef percentile_index(ar, num):\n \"\"\"\n ar (numpy.ndarray): array\n num (float): rate\n\n Extract `num` rate of largest index in this array.\n \"\"\"\n threshold = int(len(ar) * num)\n idx = np.argsort(ar)\n return idx[-threshold:]\n\n\ndef calc_recall_precision_for_rate(grads, rate, haspiindex):\n recall_list = []\n hit_rate_list = []\n for i in range(len(grads)):\n largest_index = percentile_index(grads[i], float(rate))\n set_largest_index = set(largest_index)\n hit_index = set_largest_index.intersection(haspiindex[i])\n hit_num = len(hit_index)\n hit_rate = float(hit_num) / float(len(set_largest_index))\n\n recall_list.append(float(hit_num) / len(haspiindex[i]))\n hit_rate_list.append(hit_rate)\n recall = np.mean(np.array(recall_list))\n precision = np.mean(np.array(hit_rate_list))\n return recall, precision\n\n\ndef calc_recall_precision(grads, rates, haspiindex):\n r_list = []\n p_list = []\n for rate in rates:\n r, p = calc_recall_precision_for_rate(grads, rate, haspiindex)\n r_list.append(r)\n p_list.append(p)\n return r_list, p_list\n\n\ndef parse():\n parser = argparse.ArgumentParser(\n description='Multitask Learning with Tox21.')\n parser.add_argument('--batchsize', '-b', type=int, default=128,\n help='batch size')\n parser.add_argument('--gpu', '-g', type=int, default=-1,\n help='GPU ID to use. Negative value indicates '\n 'not to use GPU and to run the code in CPU.')\n parser.add_argument('--dirpath', '-d', type=str, default='results',\n help='path to train results directory')\n parser.add_argument('--calculator', type=str, default='gradient')\n args = parser.parse_args()\n return args\n\n\ndef main(method, labels, unit_num, conv_layers, class_num, n_layers,\n dropout_ratio, model_path, save_path):\n # Dataset preparation\n train, val, test, train_smiles, val_smiles, test_smiles = data.load_dataset(method, labels)\n\n # --- model preparation ---\n model = predictor.build_predictor(\n method, unit_num, conv_layers, class_num, dropout_ratio, n_layers)\n\n classifier = L.Classifier(model,\n lossfun=F.sigmoid_cross_entropy,\n accfun=F.binary_accuracy)\n\n print('Loading model parameter from ', model_path)\n serializers.load_npz(model_path, model)\n\n target_dataset = val\n target_smiles = val_smiles\n\n val_mols = [Chem.MolFromSmiles(smi) for smi in tqdm(val_smiles)]\n\n pyridine_mol = Chem.MolFromSmarts(PYRIDINE_SMILES)\n pyridine_index = np.where(np.array([mol.HasSubstructMatch(pyridine_mol) for mol in val_mols]) == True)\n val_pyridine_mols = np.array(val_mols)[pyridine_index]\n\n # It only extracts one substructure, not expected behavior\n # val_pyridine_pos = [set(mol.GetSubstructMatch(pi)) for mol in val_pyridine_mols]\n def flatten_tuple(x):\n return [element for tupl in x for element in tupl]\n\n val_pyridine_pos = [flatten_tuple(mol.GetSubstructMatches(pyridine_mol)) for mol in val_pyridine_mols]\n\n # print('pyridine_index', pyridine_index)\n # print('val_pyridine_mols', val_pyridine_mols.shape)\n # print('val_pyridine_pos', val_pyridine_pos)\n # print('val_pyridine_pos length', [len(k) for k in val_pyridine_pos])\n\n pyrigine_dataset = NumpyTupleDataset(*target_dataset.features[pyridine_index, :])\n pyrigine_smiles = target_smiles[pyridine_index]\n print('pyrigine_dataset', len(pyrigine_dataset), len(pyrigine_smiles))\n\n atoms = pyrigine_dataset.features[:, 0]\n num_atoms = [len(a) for a in atoms]\n\n def clip_original_size(saliency, num_atoms):\n \"\"\"`saliency` array is 0 padded, this method align to have original\n molecule's length\n \"\"\"\n assert len(saliency) == len(num_atoms)\n saliency_list = []\n for i in range(len(saliency)):\n saliency_list.append(saliency[i, :num_atoms[i]])\n return saliency_list\n\n def preprocess_fun(*inputs):\n atom, adj, t = inputs\n # HACKING for now...\n atom_embed = classifier.predictor.graph_conv.embed(atom)\n return atom_embed, adj, t\n\n def eval_fun(*inputs):\n atom_embed, adj, t = inputs\n prob = classifier.predictor(atom_embed, adj)\n out = F.sum(prob)\n return out\n\n calculator_method = args.calculator\n print('calculator method', calculator_method)\n if calculator_method == 'gradient':\n # option1: Gradient\n calculator = GradientCalculator(\n classifier, eval_fun=eval_fun,\n # target_key='embed', eval_key='out',\n target_key=0,\n # multiply_target=True # this will calculate grad * input\n )\n elif calculator_method == 'integrated_gradients':\n # option2: IntegratedGradients\n calculator = IntegratedGradientsCalculator(\n classifier, eval_fun=eval_fun,\n # target_key='embed', eval_key='out',\n target_key=0, steps=10\n )\n elif calculator_method == 'occlusion':\n # option3: Occlusion\n def eval_fun_occlusion(*inputs):\n atom_embed, adj, t = inputs\n prob = classifier.predictor(atom_embed, adj)\n # Do not take sum, instead return batch-wise score\n out = F.sigmoid(prob)\n return out\n calculator = OcclusionCalculator(\n classifier, eval_fun=eval_fun_occlusion,\n # target_key='embed', eval_key='out',\n target_key=0, slide_axis=1\n )\n else:\n raise ValueError(\"[ERROR] Unexpected value calculator_method={}\".format(calculator_method))\n\n M = 100\n num = 20\n rates = np.linspace(0.1, 1, num=num)\n print('M', M)\n\n # --- VanillaGrad ---\n saliency_arrays = calculator.compute_vanilla(\n pyrigine_dataset, converter=concat_mols, preprocess_fn=preprocess_fun)\n saliency = calculator.transform(\n saliency_arrays, ch_axis=3, method='square')\n # saliency_arrays -> M, batch_size, max_atom, ch_dim\n # print('saliency_arrays', saliency_arrays.shape)\n # saliency -> batch_size, max_atom\n # print('saliency', saliency.shape)\n saliency_vanilla = clip_original_size(saliency, num_atoms)\n\n # recall & precision\n vanilla_recall, vanilla_precision = calc_recall_precision(saliency_vanilla, rates, val_pyridine_pos)\n print('vanilla_recall', vanilla_recall)\n print('vanilla_precision', vanilla_precision)\n\n # --- SmoothGrad ---\n saliency_arrays = calculator.compute_smooth(\n pyrigine_dataset, converter=concat_mols, preprocess_fn=preprocess_fun,\n M=M,\n mode='absolute', scale=0.15 # previous implementation\n # mode='relative', scale=0.05\n )\n saliency = calculator.transform(\n saliency_arrays, ch_axis=3, method='square')\n\n saliency_smooth = clip_original_size(saliency, num_atoms)\n\n # recall & precision\n smooth_recall, smooth_precision = calc_recall_precision(saliency_smooth, rates, val_pyridine_pos)\n print('smooth_recall', smooth_recall)\n print('smooth_precision', smooth_precision)\n\n # --- BayesGrad ---\n # bayes grad is calculated by compute_vanilla with train=True\n saliency_arrays = calculator.compute_vanilla(\n pyrigine_dataset, converter=concat_mols, preprocess_fn=preprocess_fun,\n M=M, train=True)\n saliency = calculator.transform(\n saliency_arrays, ch_axis=3, method='square', lam=0)\n saliency_bayes = clip_original_size(saliency, num_atoms)\n\n bayes_recall, bayes_precision = calc_recall_precision(saliency_bayes, rates, val_pyridine_pos)\n print('bayes_recall', bayes_recall)\n print('bayes_precision', bayes_precision)\n\n plt.figure(figsize=(7, 5), dpi=200)\n plt.plot(vanilla_recall, vanilla_precision, 'k-', color='blue', label='VanillaGrad')\n plt.plot(smooth_recall, smooth_precision, 'k-', color='green', label='SmoothGrad')\n plt.plot(bayes_recall, bayes_precision, 'k-', color='red', label='BayesGrad(Ours)')\n plt.axhline(y=vanilla_precision[-1], color='gray', linestyle='--')\n plt.legend()\n plt.xlabel(\"recall\")\n plt.ylabel(\"precision\")\n if save_path:\n print('saved to ', save_path)\n plt.savefig(save_path)\n # plt.savefig('artificial_pr.eps')\n else:\n plt.show()\n\n\nif __name__ == '__main__':\n # Disable errors by RDKit occurred in preprocessing Tox21 dataset.\n lg = RDLogger.logger()\n lg.setLevel(RDLogger.CRITICAL)\n # show INFO level log from chainer chemistry\n logging.basicConfig(level=logging.INFO)\n\n args = parse()\n # --- extracting configs ---\n dirpath = args.dirpath\n json_path = os.path.join(dirpath, 'args.json')\n if not os.path.exists(json_path):\n raise ValueError(\n 'json_path {} not found! Execute train_tox21.py beforehand.'.format(json))\n with open(json_path, 'r') as f:\n train_args = json.load(f)\n\n method = train_args['method']\n labels = train_args['label'] # 'pyridine'\n\n unit_num = train_args['unit_num']\n conv_layers = train_args['conv_layers']\n class_num = 1\n n_layers = train_args['n_layers']\n dropout_ratio = train_args['dropout_ratio']\n num_train = train_args['num_train']\n # seed = train_args['seed']\n # --- extracting configs end ---\n\n model_path = os.path.join(dirpath, 'predictor.npz')\n save_path = os.path.join(\n dirpath, 'precision_recall_{}.png'.format(args.calculator))\n # --- config end ---\n\n main(method, labels, unit_num, conv_layers, class_num, n_layers,\n dropout_ratio, model_path, save_path)\n","sub_path":"experiments/tox21/plot_precision_recall.py","file_name":"plot_precision_recall.py","file_ext":"py","file_size_in_byte":10534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9859238","text":"import bs4\r\nimport re\r\nimport html2text\r\n\r\nfrom remote import request\r\nfrom api import deactivate, save, save_img, get_event_list\r\nfrom sportmaster import conf\r\n\r\n\r\ndef parse_category(url_category, replace_category, exists_goods):\r\n def check(_res):\r\n return bool(re.search('dataLayer = ', _res.text))\r\n\r\n cookies = _set_city()\r\n page = 1\r\n while True:\r\n url = 'http://www.sportmaster.ru/catalog/{category_p}?pageSize=120&page={page_p}&f-promotion:globalpromo=true'\r\n res = request(\r\n url.format(category_p=url_category, page_p=page),\r\n id_client=conf.id_client,\r\n check_function=check,\r\n cookies=cookies,\r\n encoding=conf.encoding\r\n )\r\n if not res:\r\n return None\r\n\r\n bsoup = bs4.BeautifulSoup(res.text, 'html5lib')\r\n sub_category_list = bsoup.select('.sm-subcategory__content a.sm-image-holder')\r\n if len(sub_category_list) > 0:\r\n del bsoup\r\n\r\n for sub_category in sub_category_list:\r\n id_subcategory = re.search('catalog/(.+)$', sub_category['href'])\r\n if id_subcategory is not None:\r\n parse_category(id_subcategory.group(1), replace_category, exists_goods)\r\n\r\n return None\r\n\r\n # Если в категории нет акционных товаров\r\n if bsoup.select_one('.sm-search__nothing') is not None:\r\n return None\r\n\r\n for good in bsoup.select('.sm-category__item a.sm-image-holder'):\r\n id_good = re.search('product/(\\d+)/$', good['href'])\r\n if id_good is None or id_good.group(1) in exists_goods:\r\n continue\r\n\r\n data = {\r\n 'id_client': conf.id_client,\r\n 'id_category': replace_category[0],\r\n 'id_sub_category': replace_category[1],\r\n }\r\n if parse_good(id_good.group(1), data):\r\n exists_goods.append(id_good.group(1))\r\n\r\n next_page = bsoup.select_one('a.sm-category__main-sorting_pager_right')\r\n if next_page is None:\r\n break\r\n\r\n page += 1\r\n\r\n\r\ndef parse_good(id_good, data):\r\n def check(_res):\r\n return bool(re.search('window.globals.cardModel = ', _res.text))\r\n\r\n cookies = _set_city()\r\n url = 'http://www.sportmaster.ru/product/{id_p}/'.format(id_p=id_good)\r\n res = request(\r\n url,\r\n id_client=conf.id_client,\r\n check_function=check,\r\n cookies=cookies,\r\n encoding=conf.encoding\r\n )\r\n if not res:\r\n return None\r\n\r\n taking_avail = re.search('\"anySkuAvailableForTakingOn\":true', res.text)\r\n shiping_avail = re.search('\"anySkuAvailableForShipping\":true', res.text)\r\n\r\n bsoup = bs4.BeautifulSoup(res.text, 'html5lib')\r\n price_block = bsoup.select_one('.sm-goods_main_details_prices_left')\r\n old_price = price_block.select_one('.sm-goods_main_details_prices_old-price')\r\n price = price_block.select_one('.sm-goods_main_details_prices_actual-price')\r\n\r\n if old_price is None or (taking_avail is None and shiping_avail is None):\r\n return None\r\n\r\n data['link'] = url\r\n data['name'] = bsoup.select_one('.sm-goods_main_details h1').get_text(strip=True)\r\n data['current_price'] = price.get_text(strip=True).replace(',', '')\r\n data['old_price'] = old_price.get_text(strip=True).replace(',', '')\r\n\r\n html_text = html2text.HTML2Text(bodywidth=9999)\r\n html_text.ul_item_mark = ''\r\n html_text.strong_mark = ''\r\n data['description'] = html_text.handle(bsoup.select_one('.sm-goods__description-text').prettify())\r\n\r\n save_res = save(data)\r\n if not save_res:\r\n return None\r\n\r\n img_count = 0\r\n for photo in bsoup.select('.sm-goods_main_photo-slider a'):\r\n if img_count > 4:\r\n break\r\n\r\n img = request(photo['href'], stream=True, use_proxy=False)\r\n if not img:\r\n continue\r\n img_count += 1\r\n\r\n import tempfile\r\n with tempfile.NamedTemporaryFile() as f:\r\n for chunk in img.iter_content(1024):\r\n f.write(chunk)\r\n\r\n f.seek(0, 0)\r\n save_img(conf.id_client, img_count, save_res['response']['id_event'], f)\r\n\r\n return True\r\n\r\n\r\ndef check_activity(id_good):\r\n def check(_res):\r\n return bool(re.search('window.globals.cardModel = ', _res.text))\r\n\r\n cookies = _set_city()\r\n res = request(\r\n 'http://www.sportmaster.ru/product/{good_p}/'.format(good_p=id_good),\r\n id_client=conf.id_client,\r\n check_function=check,\r\n cookies=cookies,\r\n encoding=conf.encoding\r\n )\r\n if not res:\r\n return None\r\n\r\n taking_avail = re.search('\"anySkuAvailableForTakingOn\":true', res.text)\r\n shiping_avail = re.search('\"anySkuAvailableForShipping\":true', res.text)\r\n\r\n return bool(re.search('sm-goods_main_details_prices_old-price', res.text)) \\\r\n and not(taking_avail is None and shiping_avail is None)\r\n\r\n\r\ndef research():\r\n exists_goods = []\r\n\r\n # Полчим список всех существующих акций текущего сайта\r\n event_list = get_event_list(conf.id_client, conf.domain)\r\n if event_list is not None:\r\n for event in event_list['response']['events']:\r\n id_good = re.search('product/(\\d+)/$', event['link'])\r\n is_active = check_activity(id_good.group(1))\r\n if is_active is not None and is_active:\r\n exists_goods.append(id_good.group(1))\r\n\r\n elif is_active is not None and not is_active:\r\n deactivate(conf.id_client, event['id'])\r\n\r\n return exists_goods\r\n\r\n\r\ndef start():\r\n exists_goods = research()\r\n\r\n for id_category in conf.category_replace:\r\n if id_category is not None:\r\n parse_category(id_category, conf.category_replace[id_category], exists_goods)\r\n\r\n\r\ndef _set_city():\r\n def check(_res):\r\n return bool(re.search('OK', _res.text)) and 'apple' in _res.cookies\r\n\r\n res = request(\r\n 'http://www.sportmaster.ru/user/profile/city_confirm.do?cityid={city_p}'.format(city_p=conf.id_city),\r\n id_client=conf.id_client,\r\n check_function=check\r\n )\r\n\r\n if res is None:\r\n return None\r\n\r\n else:\r\n return {\r\n 'apple': res.cookies['apple'],\r\n }\r\n","sub_path":"robots/parser/sportmaster/grabber.py","file_name":"grabber.py","file_ext":"py","file_size_in_byte":6365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436383456","text":"# -*- coding: utf-8 -*-\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.core.context_processors import csrf\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django import template\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.views import login as django_login\nfrom django.utils import timezone\nimport os\nimport json\nfrom plugin.translate_baidu import translate_baidu\nfrom plugin.translate_iciba import translate_iciba\nfrom fanyi.models import *\n\ndef index(request):\n return HttpResponseRedirect('/translate')\n \ndef default_404(request):\n ctx ={}\n ctx.update(csrf(request))\n filename = os.getcwd()+'/templates/404.html'\n fin = open(filename,'rb')\n html = fin.read()\n fin.close()\n t = template.Template(html)\n s = t.render(template.Context(ctx))\n return HttpResponse(s)\n \ndef is_big0(s):\n bFlag = True\n if len(s)>0 and ord(s[0])>=ord('A') and ord(s[0])<=ord('Z'):\n for _ in s[1:]:\n if ord(_)>=ord('A') and ord(_)<=ord('Z'):\n bFlag = False\n break\n else:\n bFlag = False\n \n return bFlag\n \ndef get_translate_result(q):\n q = ' '.join(q.split())\n if not q: return ''\n \n bWanted = True\n r = q.split()\n if len(r)<=3:\n if is_big0(r[0]): #By contrast => by contrast ;很多时候,复制的时候,句子的第一个字母是大写的,这会影响查词。\n if 1==len(r):\n q = r[0].lower()\n else:\n q = r[0].lower()+' '+' '.join(r[1:])\n else:\n bWanted = False\n \n if history.objects.filter(q=q).count()>0: # ==1\n bWanted = False\n hs = history.objects.filter(q=q)\n h = hs[0]\n res_baidu = h.j_baidu\n res_iciba = h.j_iciba\n h.status += 1\n h.update_time = timezone.now()\n h.save()\n else:\n try:\n res_baidu = translate_baidu(q)\n except:\n res_baidu = ''\n \n try:\n if bWanted:\n res_iciba = translate_iciba(q)\n else:\n res_iciba = ''\n except:\n res_iciba = ''\n bWanted = False\n try:\n if res_baidu:\n j = json.loads(res_baidu)\n res_baidu_show = j['trans_result'][0]['dst']\n else:\n res_baidu_show = ''\n except Exception as e:\n res_baidu_show = ''\n print(e)\n \n try:\n h_abstract = ''\n if res_iciba:\n j = json.loads(res_iciba)\n res_iciba_show = '英 ['+j['symbols'][0]['ph_en']+'] 美 ['+j['symbols'][0]['ph_am']+']\\n'\n \n for p in j['symbols'][0]['parts']:\n part = p['part']\n res_iciba_show += part+'\\n'\n for m in p['means']:\n res_iciba_show += m+'; '\n h_abstract+= m+'; '\n res_iciba_show += '\\n'\n else:\n res_iciba_show = ''\n bWanted = False\n except Exception as e:\n res_iciba_show = ''\n bWanted = False\n print(e)\n \n if bWanted:\n h = history()\n h.q = q\n h.h_abstract = h_abstract[0:128]\n h.j_baidu = res_baidu\n h.j_iciba = res_iciba\n h.status = 1\n h.save()\n \n return res_iciba_show if res_iciba_show else res_baidu_show #优先返回金山词霸的结果\n \ndef get_history():\n hs = history.objects.all().order_by('-update_time')\n s = ''\n for _ in hs:\n h_abstract = _.h_abstract.split(';')[0]\n s += _.q+' '+h_abstract+'\\n'\n return s\n \ndef translate(request):\n if not request.POST:\n ctx ={}\n ctx['history'] = get_history()\n ctx.update(csrf(request))\n filename = os.getcwd()+'/templates/translate.html'\n fin = open(filename,'rb')\n html = fin.read()\n fin.close()\n t = template.Template(html)\n s = t.render(template.Context(ctx))\n return HttpResponse(s)\n else:\n q = request.POST.get('q','')\n r = get_translate_result(q)\n \n ctx ={}\n ctx['q'] = q\n ctx['translate_result'] = r\n ctx['history'] = get_history()\n ctx.update(csrf(request))\n filename = os.getcwd()+'/templates/translate.html'\n fin = open(filename,'rb')\n html = fin.read()\n fin.close()\n t = template.Template(html)\n s = t.render(template.Context(ctx))\n return HttpResponse(s)\n \n ","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322778219","text":"#!//usr/bin/env python\n#-*- coding:utf-8 -*-\n\nfrom django.shortcuts import render,HttpResponse\nfrom app01.forms import account as AccountForm\nfrom app01 import models\n\n\n\ndef login(request):\n obj = AccountForm.LoginForm(request.POST)\n if request.method == 'POST':\n return render(request,'account/login.html',{'obj':obj})\n return render(request, 'account/login.html',{'obj':obj})\n\ndef useradd(request):\n #添加用户类型\n models.UserType.objects.create(typelist='超级用户')\n models.UserType.objects.create(typelist='金牌用户')\n models.UserType.objects.create(typelist='普通用户')\n #添加用户组\n models.UserGroup.objects.create(caption='CEO',user_type_id=1)\n models.UserGroup.objects.create(caption='CFO',user_type_id=2)\n models.UserGroup.objects.create(caption='CTO',user_type_id=3)\n #添加主机\n models.Host.objects.create(hostname='a01.shuaige.com',ip='1.1.1.1',user_group_id=1)\n models.Host.objects.create(hostname='a02.shuaige.com',ip='2.2.2.2',user_group_id=2)\n models.Host.objects.create(hostname='a03.shuaige.com',ip='3.3.3.3',user_group_id=3)\n return HttpResponse('OK')","sub_path":"day19/study/Django_lastday/app01/views/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"547707913","text":"# 1.导入unittest\nimport time\nimport unittest\nfrom selenium import webdriver\n\n\n# 所有的测试用例类,都是需要继承unitest.TestCase\nclass TestCaseSearch(unittest.TestCase):\n\n # 成员方法管理测试用例:以test_开头命名\n def test_01_search_success(self):\n \"\"\"\n 搜索商品的测试用例\n \"\"\"\n driver = webdriver.Chrome(executable_path='drivers/chromedriver.exe')\n driver.maximize_window()\n driver.get('http://132.232.44.158:9999/shopxo/')\n driver.find_element_by_id('search-input').send_keys('华为')\n driver.find_element_by_id('ai-topsearch').click()\n time.sleep(3) \n\n title = \"华为 - ShopXO企业级B2C电商系统提供商 - 演示站点\"\n # assert driver.title == title\n self.assertEqual(driver.title, title)\n\n driver.quit()\n\n def test_02_error(self):\n a = [1,3]\n print(a[3])\n\n def test_03_failed(self):\n self.assertEqual(1,2, \"1不等于2\")\n\n def test_04_assert(self):\n self.assertTrue(1==1)\n\n# 这是调试脚本的采用的方法\n# 去查找所有test_开头的方法\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"UnittestTest/cases/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568661746","text":"\"\"\" \nThe Command handler\n\"\"\"\nfrom . import command\ntry:\n import command\nexcept:\n pass\n\nclass CommandHandler():\n \"\"\" The command handler object - handles triggering commands \"\"\"\n def __init__(self, bot, plugins):\n self.initializers = command.plugins_to_initializers(plugins)\n for function in self.initializers:\n function(bot)\n self.commands = command.plugins_to_commands(plugins)\n self.command_names = [command.main_hook for command in self.commands]\n if self.commands == []:\n raise RuntimeError(\"No commands found!\")\n print(\"%d commands initialized.\" % (len(self.commands)))\n\n def trigger(self, trigger):\n \"\"\" Try to trigger a command with hook \"\"\"\n for command_ in self.commands:\n if command_.is_triggered_by(trigger):\n return command_\n \n return False\n\n def trigger_short(self, trigger):\n \"\"\" Try to trigger a command with shorthook \"\"\"\n for command_ in self.commands:\n if command_.is_triggered_by(trigger, short=True):\n return command_\n\n return False\n","sub_path":"utils/commandhandler.py","file_name":"commandhandler.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139092416","text":"import os\nimport argparse\nfrom os.path import abspath, dirname\nimport sys\nimport time\nimport logging\nimport numpy as np\n\nimport torch\nimport torchvision.transforms as T\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\nsys.path.append(dirname(dirname(abspath(__file__))))\nfrom datasets import modelnet\nfrom models.config_svm import config as cfg\nfrom utils import data_transform\nfrom models import voxelnet_svm\n\n\ndef main():\n torch.backends.cudnn.enable = True\n logger = init_logger()\n logger.info(str(args))\n save_scripts()\n bin_count = voxel_count()\n logger.info(\"First 30 bins of training data: \\n{}\".format(str(bin_count[0][:30])))\n logger.info(\"Vacant percentage: {:2f}%\".format(100*(1-np.sum(bin_count[0])/30**3)))\n logger.info(\"First 30 bins of validation data: \\n{}\".format(str(bin_count[1][:30])))\n logger.info(\"Vacant percentage: {:2f}%\".format(100*(1-np.sum(bin_count[1])/30**3)))\n transform = T.Compose([data_transform.RotatePC(),\n data_transform.JitterPC(0.01, 0.05),\n data_transform.AppendCenteredCoord(cfg)])\n # To Tensor done in collate function\n # data_transform.ToTensor()])\n if args.few:\n train_dataset = modelnet.FewModelNet(args.dset_dir, 'train', transform, args.num_class,\n num_perclass=9)\n else:\n train_dataset = modelnet.ModelNet(args.dset_dir, 'train', transform)\n\n val_dataset = modelnet.ModelNet(args.dset_dir, 'test', transform)\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=False,\n num_workers=6, collate_fn=customized_collate, pin_memory=True, drop_last=False)\n # shuffle val_dataset since I drop last batch\n val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False,\n num_workers=6, collate_fn=customized_collate, pin_memory=True, drop_last=False)\n\n net = voxelnet_svm.VoxelNet(args.num_class, input_shape=(cfg.D, cfg.H, cfg.W))\n logger.info(\"Total # parameters: {}\".format(sum([p.numel() for p in net.parameters()])))\n load_checkpoint(logger, net)\n # logger.info(\"trainable # parameters: {}\".format(sum([p.numel() for p in net.parameters() if p.requires_grad])))\n for name, param in net.named_parameters():\n if param.requires_grad == True:\n logger.info(\"Trainable params {}: {} {}\".format(name, param.size(), param.numel()))\n else:\n logger.info(\"Non-trainable params {}: {} {}\".format(name, param.size(), param.numel()))\n net.cuda()\n # optimizer = optim.SGD(net.parameters(), args.lr, weight_decay=1e-5)\n # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.8)\n\n # criterion = torch.nn.CrossEntropyLoss()\n\n # best_acc = 0.\n # for i in range(args.max_epoch):\n # scheduler.step()\n # logger.info(\"Epoch: {} Lr: {}\".format(i+1, scheduler.get_lr()[0]))\n # train_one_epoch(net, train_loader, optimizer, criterion, logger)\n # acc = val_one_epoch(net, val_loader, logger, best_acc)\n # if acc > best_acc:\n # best_acc = acc\n # torch.save(net.state_dict(), os.path.join(args.log_dir, \"best.pth.tar\"))\n # if i % 5 == 0 or (i+1) % args.max_epoch == 0:\n # torch.save(net.state_dict(), os.path.join(args.log_dir, \"last.pth.tar\"))\n logger.info(\"Start computing train dset features...\")\n train_features, train_labels = get_svm_feature(net, train_dataset, train_loader, logger)\n print(train_features)\n np.save(\"../data/svm/m10_train_features.npy\", train_features)\n np.save(\"../data/svm/m10_train_labels.npy\", train_labels)\n logger.info(\"Start computing val dset features...\")\n val_features, val_labels = get_svm_feature(net, val_dataset, val_loader, logger)\n print(val_features)\n np.save(\"../data/svm/m10_val_features.npy\", val_features)\n np.save(\"../data/svm/m10_val_labels.npy\", val_labels)\n return\n\n\ndef get_svm_feature(net, dataset, loader, logger):\n net.eval()\n\n t0 = time.time()\n features = np.zeros((len(dataset), 128), dtype=np.float32)\n labels = np.zeros((len(dataset),), dtype=np.int)\n features = torch.from_numpy(features).cuda()\n for i, (voxel_features, voxel_coords, label) in enumerate(loader):\n voxel_features = Variable(voxel_features.cuda())\n voxel_coords = Variable(voxel_coords.cuda())\n # label = Variable(label.cuda())\n\n features[i] = net(voxel_features, voxel_coords).data\n labels[i] = label\n\n features = features.cpu().numpy()\n t1 = time.time()\n logger.info(\"Finish computing all features using {} sec.\".format(t1 - t0))\n return features, labels\n\n\ndef train_one_epoch(net, train_loader, optimizer, criterion, logger):\n net.train()\n\n t0 = time.time()\n correct = 0\n total_ins = 0\n num_batch = len(train_loader)\n total_loss = 0.\n\n if args.few:\n log_interval = len(train_loader)\n else:\n log_interval = 100 if args.num_class == 30 else 20\n\n for i, (voxel_features, voxel_coords, label) in enumerate(train_loader):\n voxel_features = Variable(voxel_features.cuda())\n voxel_coords = Variable(voxel_coords.cuda())\n label = Variable(label.cuda())\n\n optimizer.zero_grad()\n score = net(voxel_features, voxel_coords)\n loss = criterion(score, label)\n loss.backward()\n optimizer.step()\n\n _, pred = torch.max(score, dim=1)\n correct += label.eq(pred).sum().data.cpu().numpy()[0]\n total_ins += len(label)\n total_loss += loss.data.cpu().numpy()[0]\n\n if (i + 1) % log_interval == 0:\n logger.info(\"\\t\\titer {}/{}: loss {:.4f}, train_acc {:.2f}%\".format\n (i+1, num_batch, total_loss/log_interval, 100.*correct/total_ins))\n correct = 0\n total_ins = 0\n total_loss = 0.\n # print(type(i), type(num_batch), type(total_loss), type(correct), type(total_ins))\n # logger.info(\"\\titer {}/{}: loss, train_acc \".format(int(i+1), int(num_batch)))\n\n t1 = time.time()\n logger.info(\"\\t\\tTimer: {:.2f} sec.\".format(t1-t0))\n return\n\n\ndef val_one_epoch(net, val_loader, logger, best_acc):\n net.eval()\n\n t0 = time.time()\n correct = 0.\n total_ins = 0.\n\n for i, (voxel_features, voxel_coords, label) in enumerate(val_loader):\n voxel_features = Variable(voxel_features.cuda())\n voxel_coords = Variable(voxel_coords.cuda())\n label = Variable(label.cuda())\n score = net(voxel_features, voxel_coords)\n _, pred = torch.max(score, dim=1)\n correct += label.eq(pred).sum().data.cpu().numpy()[0]\n total_ins += len(label)\n\n t1 = time.time()\n acc = 100. * correct / total_ins\n logger.info(\"\\t\\tVal: val_acc {:.2f}%/{:.2f}%\".format(acc, best_acc))\n logger.info(\"\\t\\tTimer: {:.2f} sec.\".format(t1 - t0))\n return acc\n\n\ndef init_logger():\n os.makedirs(args.log_dir, exist_ok=False)\n\n logger = logging.getLogger('logger')\n logger.setLevel(logging.DEBUG)\n log_fname = os.path.join(args.log_dir, \"log.txt\")\n fh = logging.FileHandler(log_fname)\n ch = logging.StreamHandler()\n formatter = logging.Formatter('[%(asctime)s:%(levelname)s] %(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger\n\n\ndef customized_collate(batch):\n voxel_features = []\n voxel_coords = []\n label = []\n for i, sample in enumerate(batch):\n voxel_features.append(sample[0])\n voxel_coords.append(np.pad(sample[1], ((0, 0), (1, 0)),\n mode='constant', constant_values=i))\n label.append(sample[2])\n\n voxel_features = torch.from_numpy(np.concatenate(voxel_features))\n # voxel_coords = torch.from_numpy(np.concatenate(voxel_coords).astype(np.long))\n voxel_coords = torch.from_numpy(np.concatenate(voxel_coords).astype(np.long))\n # label = torch.from_numpy(np.concatenate(label))\n label = torch.from_numpy(np.array(label))\n return voxel_features, voxel_coords, label\n\n\ndef voxel_count():\n train_data = np.load(os.path.join(args.dset_dir, \"pc_train.npy\"))\n val_data = np.load(os.path.join(args.dset_dir, \"pc_test.npy\"))\n bin_count = [np.zeros((2048,), dtype=np.float), np.zeros((2048,), dtype=np.float)]\n for i, dataset in enumerate([train_data, val_data]):\n for j in range(len(dataset)):\n pc = dataset[j]\n voxel_coords = ((pc - np.array([cfg.xrange[0], cfg.yrange[0], cfg.zrange[0]])) /\n (cfg.vw, cfg.vh, cfg.vd)).astype(np.int32)\n\n # voxel_coords = voxel_coords[:, [2, 1, 0]]\n _, _, voxel_counts = np.unique(voxel_coords, axis=0,\n return_inverse=True, return_counts=True)\n bin_count_j = np.bincount(voxel_counts, minlength=2048)\n bin_count[i] += bin_count_j\n\n bin_count[0] = bin_count[0] / len(train_data)\n # bin_count[0][0] = cfg.D * cfg.H * cfg.W - np.sum(bin_count[0])\n bin_count[1] = bin_count[1] / len(val_data)\n # bin_count[1][0] = cfg.D * cfg.H * cfg.W - np.sum(bin_count[1])\n return bin_count\n\n\ndef save_scripts():\n os.system(\"cp {} {}\".format(\"../datasets/modelnet.py\", os.path.join(args.log_dir, \"modelnet.py\")))\n os.system(\"cp {} {}\".format(\"../models/config.py\", os.path.join(args.log_dir, \"config.py\")))\n os.system(\"cp {} {}\".format(\"../models/voxelnet.py\", os.path.join(args.log_dir, \"voxelnet.py\")))\n os.system(\"cp {} {}\".format(\"../utils/data_transform.py\", os.path.join(args.log_dir, \"data_transform.py\")))\n os.system(\"cp {} {}\".format(__file__, os.path.join(args.log_dir, \"script.py\")))\n\n\ndef load_checkpoint(logger, net):\n logger.info(\"Loading saved model...\")\n assert os.path.isfile(args.ckpt_fname), \"Error: no checkpoint file found!\"\n\n old_state_dict = torch.load(args.ckpt_fname)\n new_state_dict = net.state_dict()\n\n same_weight = {}\n for k, v in old_state_dict.items():\n if k in new_state_dict and new_state_dict[k].size() == old_state_dict[k].size():\n logger.info(\"same weight: {}\".format(k))\n same_weight[k] = v\n else:\n logger.info(\"discarded weight: {}\".format(k))\n\n new_state_dict.update(same_weight)\n net.load_state_dict(new_state_dict)\n\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dset_dir\", type=str, required=True)\n parser.add_argument(\"--num_class\", type=int, required=True)\n parser.add_argument(\"--max_epoch\", type=int, required=True)\n parser.add_argument(\"--lr\", type=float, required=True)\n parser.add_argument(\"--few\", action='store_true')\n parser.add_argument(\"--log_dir\", type=str, required=True)\n parser.add_argument(\"--ckpt_fname\", type=str)\n args = parser.parse_args()\n main()\n","sub_path":"scripts/train_svm.py","file_name":"train_svm.py","file_ext":"py","file_size_in_byte":11015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"287684094","text":"# -*- coding: utf-8 -*-\n\nfrom sanic import Sanic\nfrom sanic import Blueprint\nfrom sanic.response import json\n\nbp = Blueprint('my_blueprint', url_prefix='/blueprint')\n\n@bp.route('/')\nasync def bp_root(request):\n return json({'my': 'blueprint'})\n\n# multiprocessor\n# @bp.listen('before_server_start')\n# async def before_start():\n# print('before_server_start')\n\n# @bp.listen('after_server_start')\n# async def after_start():\n# print('after_server_start')\n\n# @bp.listen('before_server_stop')\n# async def before_stop():\n# print('before_server_stop')\n\n# @bp.listen('after_server_stop')\n# async def after_stop():\n# print('after_server_stop')\n\napp = Sanic(__name__)\napp.blueprint(bp)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n","sub_path":"practice_sanic/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60518542","text":"#% matplotlib inline\n#% config InlineBackend.figure_format = 'retina' # For mac users with Retina display\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pygtc\nimport os, sys\nimport matplotlib.pyplot as plt\n#import matplotlib.gridspec as gridspec\nimport emcee\n#import corner\n\nwdir = '/Users/justinvega/Documents/GitHub/dyn-masses/fit_Mdyn/post/posteriors/postpack/'\n\n# emcee backend files\npostfiles = np.loadtxt(wdir+'posteriors.txt', dtype=str)\n\n#fname = 'simp3_std_medr_highv_1024pix_noiseless'\n\n# scale burn-in\nburnin = 500\n\n# calculate autocorrelation time as a function of step?\ncalc_tau = False\nNtau = 100\n\n# set parameter labels, truths\nlbls = [r'$i$', r'$PA$', r'$M$', r'$r_l$', r'$z0$', r'$z_{\\psi}$', r'$Tb_{0}$', r'$Tb_q$', r'$T_{\\rm{back}}$', r'$dV_{0}$', r'$v_{\\rm{sys}}$', r'$dx$', r'$dy$']\ntheta = [40, 130, 0.7, 200, 2.3, 1.0, 205., 0.5, 20., 347.7, 4., 0., 0.]\n\n# load the backend; parse the samples\ndef make_chain(filename):\n reader = emcee.backends.HDFBackend(wdir + filename)\n all_samples = reader.get_chain(discard=0, flat=False)\n samples = reader.get_chain(discard=burnin, flat=False)\n log_prob_samples = reader.get_log_prob(discard=burnin, flat=False)\n log_prior_samples = reader.get_blobs(discard=burnin, flat=False)\n maxlnprob = np.max(reader.get_log_prob(discard=0, flat=False))\n minlnprob = np.min(reader.get_log_prob(discard=0, flat=False))\n #print(minlnprob, maxlnprob)\n nsteps, nwalk, ndim = samples.shape[0], samples.shape[1], samples.shape[2]\n # corner plot to view covariances\n levs = 1. - np.exp(-0.5*(np.arange(3)+1)**2)\n flat_chain = samples.reshape(-1, ndim)\n return(flat_chain)\n\n # for trace plot\n for i in range(ndim):\n fig = plt.figure(figsize=(5, 5))\n plt.plot(np.arange(nsteps), samples[:,:,i], alpha=0.3)\n plt.ylabel(lbls[i], fontsize=12)\n plt.title('Trace %s' % lbls[i])\n plt.xticks(fontsize=12)\n plt.yticks(fontsize=12)\n plt.show()\n\nnoiseless = make_chain(postfiles[0])\nregrid2x_noSSPnative = make_chain(postfiles[1])\nSRFonly = make_chain(postfiles[2])\nnoSSPnative = make_chain(postfiles[3])\nnobincov = make_chain(postfiles[4])\nregridonly = make_chain(postfiles[5])\n\nchainLabels= ['noiseless', 'regrid2x_noSSPnative', 'SRFonly', 'noSSPnative', 'nobincov', 'regridonly']\n\n#plt.figure(figsize=())\n\n#fig.tight_layout()\n\n# GTC = pygtc.plotGTC(chains=[noiseless,noSSPnative, SRFonly], truths=theta, paramNames=lbls, chainLabels=(chainLabels[0], chainLabels[3], chainLabels[2]),\n# truthLabels='Truth', legendMarker = 'All', customTickFont= {'family':'Arial', 'size':4}, figureSize=7.5)\n# GTC.savefig(wdir + 'comparisons/cornerplot_%s_%s_%s.png' % (chainLabels[0], chainLabels[3], chainLabels[2]), dpi=300)\n\n\n# # plot the traces\n# fig = plt.figure(figsize=(5, 5))\n# for idim in np.arange(ndim):\n# for iw in np.arange(nwalk):\n#\n#\n#\n#\n#\n#\n\n\n# noiseless, regrid2x_noSSPnative, nobincov [0, 1, 4]\n# noiseless, noSSPnative, SRFonly [0, 3, 2]\n\n\n# cornerplots\na = np.stack((noiseless[:,6], noiseless[:,7], noiseless[:,8], noiseless[:,9]), axis=-1)\nb = np.stack((noSSPnative[:,6], noSSPnative[:,7], noSSPnative[:,8], noSSPnative[:,9]), axis=-1)\nc = np.stack((SRFonly[:,6], SRFonly[:,7], SRFonly[:,8], SRFonly[:,9]), axis=-1)\n\n\nGTC = pygtc.plotGTC(chains=[a, b, c], truths=[205, 0.5, 20, 347.7], paramNames=[r'$Tb_{0}$', r'$Tb_q$', r'$T_{\\rm{back}}$', r'$dV_{0}$'], chainLabels=(chainLabels[0], chainLabels[3], chainLabels[2]),\n truthLabels='Truth', legendMarker = 'All', customTickFont= {'family':'Arial', 'size':9}, figureSize=7.5)\nGTC.savefig(wdir + 'comparisons/cornerplot_%s_%s_%s_tb__tbq_tback_dV0.png' % (chainLabels[0], chainLabels[3], chainLabels[2]), dpi=300)\n\n#plt.show()\n","sub_path":"fit_Mdyn/plot_pygtc.py","file_name":"plot_pygtc.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556786441","text":"import scrapy, os, ctypes\r\nimport easygui as eg\r\nfrom scrapy.http import FormRequest\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------------------------------#\r\nclass GoodReads(scrapy.Spider):\r\n # init\r\n name = 'gg'\r\n start_urls = [\r\n 'https://www.goodreads.com/user/sign_in'\r\n ]\r\n email = 'YOUREMAIL@gmail.com' # ***change this****\r\n passwd = 'YOURPASSWORD' # ***change this****\r\n\r\n # for hrefs\r\n gr = 'https://www.goodreads.com'\r\n link = 'https://www.goodreads.com/user/show/35791861-hamidreza' # user page url ***change this****\r\n \r\n # read & cr & wtr\r\n shelves = {}\r\n\r\n # order\r\n order = 'read' # ***change this****\r\n\r\n # saving path\r\n path = 'C:\\\\Users\\\\Reza\\\\Desktop\\\\gdreads\\\\results\\\\'\r\n\r\n # name of the ordered file\r\n file_name = ''\r\n if order=='read':\r\n file_name='read'\r\n elif order=='cr':\r\n file_name='currently reading'\r\n elif order=='wtr':\r\n file_name='want to read'\r\n\r\n # for the book's name\r\n counter = 0\r\n\r\n months = {\r\n 'Jan' : '01',\r\n 'Feb' : '02',\r\n 'Mar' : '03',\r\n 'Apr' : '04',\r\n 'May' : '05',\r\n 'Jun' : '06',\r\n 'Jul' : '07',\r\n 'Aug' : '08',\r\n 'Sep' : '09',\r\n 'Oct' : '10',\r\n 'Nov' : '11',\r\n 'Dec' : '12'\r\n }\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------------------------------#\r\n # init parse\r\n def parse(self, response):\r\n values = {\r\n 'utf8' : '✓',\r\n 'authenticity_token' : response.css(\"form input::attr(value)\")[1].extract(),\r\n 'user[email]' : self.email,\r\n 'user[password]' : self.passwd,\r\n 'remember_me' : 'on',\r\n 'next' : 'Sign in',\r\n 'n' : response.css(\"form input::attr(value)\")[3].extract()\r\n }\r\n\r\n return FormRequest.from_response(response, formdata=values, callback=self.start_scraping)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------------------------------#\r\n # yield wanted page\r\n def start_scraping(self, response):\r\n yield scrapy.Request(self.link, self.shelf_finder)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------------------------------#\r\n # yield the wanted shelf\r\n def shelf_finder(self, response):\r\n self.shelves['read'] = self.gr + response.css('.userShowPageShelfListItem::attr(href)')[0].extract()\r\n self.shelves['cr'] = self.gr + response.css('.userShowPageShelfListItem::attr(href)')[1].extract()\r\n self.shelves['wtr'] = self.gr + response.css('.userShowPageShelfListItem::attr(href)')[2].extract()\r\n\r\n yield scrapy.Request(self.shelves[self.order], self.parse_book)\r\n\r\n#-------------------------------------------------------------------------------------------------------------------------------------------------#\r\n # yield the wanted shelf\r\n def parse_book(self, response):\r\n # open file\r\n f = open(self.path + f'{self.file_name}.py', 'a', encoding='utf-8') # for sorting\r\n\r\n book_dict = {}\r\n\r\n # scraper\r\n tbody = response.xpath(\"//tbody[@id='booksBody']/tr\")\r\n for x in tbody:\r\n # init book num\r\n self.counter += 1\r\n\r\n # book's name\r\n label = x.xpath(\"td[@class='field title']/div/a/text()\").extract_first().strip()\r\n \r\n # book's author\r\n author1 = x.xpath(\"td[@class='field author']/div/a/text()\").extract_first().strip()\r\n author = ''\r\n check = False\r\n for i in author1:\r\n if i==',':\r\n check = True\r\n break\r\n if check==True:\r\n author1 = author1.split(', ')\r\n author = author1[1]+' '+author1[0]\r\n else:\r\n author = author1\r\n\r\n # book's rating\r\n rate = x.xpath(\"td[@class='field rating']/div/div/@data-rating\").extract_first()\r\n \r\n # book's read date\r\n date = x.xpath(\"td[@class='field date_read']/div/div/div/span/text()\").extract_first()\r\n book_dict['label'] = label\r\n book_dict['author'] = author\r\n if self.order=='read': \r\n if rate is not None:\r\n book_dict['rate'] = rate.strip()\r\n else:\r\n book_dict['rate'] = 'NO_RATE'\r\n if date!=None:\r\n if date.strip()!='not set':\r\n book_dict['date'] = date.strip()\r\n date_list = date.strip().split(' ')\r\n if len(date_list)==3:\r\n date_list[1] = date_list[1][0:2]\r\n book_dict['number'] = date_list[2] + self.months[date_list[0]] + date_list[1]\r\n elif len(date_list)==2:\r\n book_dict['number'] = date_list[1] + self.months[date_list[0]] + '00'\r\n else:\r\n book_dict['number'] = date_list[0] + '0000'\r\n else:\r\n book_dict['number'] = 'NO_NUMBER'\r\n else:\r\n book_dict['date'] = 'NO_DATE'\r\n book_dict['number'] = 'NO_NUMBER'\r\n\r\n f.write(f'{book_dict}')\r\n f.write(',\\n')\r\n f.close()\r\n\r\n next_page = response.xpath(\"//a[@class='next_page']/@href\").extract_first()\r\n if next_page is not None:\r\n yield response.follow(\"https://www.goodreads.com\"+next_page, self.parse_book)","sub_path":"easy-to-use/goodreads/goodreads/spiders/gg.py","file_name":"gg.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391143410","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nclass RBF:\n\n '''a class for the Radial Basis Fuction Network'''\n '''We will have the input layer, the RBF layer and the output layer'''\n\n def __init__(self):\n '''arguments:\n RBF_nodes: number of nodes in the RBF layer\n d_inputs: the dimensions of the input\n d_output: the dimension of the output\n min_input: the vector of minimum values that the input can have in each dimension\n max_input: same as min_input but with maximum values\n std_dev: the standard deviation of the RBF nodes\n method: initialization method for the means of the RBF nodes\n '''\n\n '''computing mean and standard deviation for the RBS nodes'''\n self.RBF_means=np.random.rand(10,2)\n\n def CL(self,X,epochs=100,eta=0.1):\n for epoch in range(epochs):\n #permutation = np.random.permutation(X.shape[0])\n #X=X[permutation]\n for x in X:\n most_similar=None\n best_score=np.inf\n for i,node in enumerate(self.RBF_means):\n dist=np.linalg.norm(x-node)\n if dist< best_score:\n best_score=dist\n most_similar=i\n #find neighbours\n neighbours_index=[]\n for i,node in enumerate(self.RBF_means):\n if i!=most_similar:\n if i>most_similar-(epochs-epoch)//10 and i 9:\n if imost_similar+(epochs-epoch)//10+9:\n neighbours_index.append(i)\n self.RBF_means[most_similar]=self.RBF_means[most_similar]+eta*(x-self.RBF_means[most_similar])\n for neighbour in neighbours_index:\n self.RBF_means[neighbour] = self.RBF_means[neighbour] + eta * (x - self.RBF_means[neighbour])\n\n def predict(self,X):\n pos=[]\n for x in X:\n most_similar = None\n best_score = np.inf\n for i, node in enumerate(self.RBF_means):\n if i not in pos:\n dist = np.linalg.norm(x - node)\n if dist < best_score:\n best_score = dist\n most_similar = i\n\n pos.append(most_similar)\n return pos\n\n\n\n#data = np.genfromtxt('cities.dat',dtype='i')\ninfile = open('cities.dat', 'r')\n\nwith open('cities.dat', 'rb') as f:\n clean_lines = (line.replace(b';', b' ').replace(b',', b' ') for line in f)\n city = np.genfromtxt(clean_lines, skip_header=4)\n cities=[0,1,2,3,4,5,6,7,8,9]\n\n\n\nrbf=RBF()\nrbf.CL(city)\nindexes=rbf.predict(city)\nsorted_cities = [c for _,c in sorted(zip(indexes,cities))]\nfor i,c in enumerate(sorted_cities):\n if i < len(sorted_cities)-1:\n plt.plot((city[c,0],city[sorted_cities[i+1],0]),(city[c,1],city[sorted_cities[i+1],1]),'ro-')\n else:\n plt.plot((city[c, 0], city[sorted_cities[0], 0]), (city[c, 1], city[sorted_cities[0], 1]), 'ro-')\n #plt.scatter(city[c,0],city[c,1])\nplt.show()\n#sorted_animals = [name for _,name in sorted(zip(indexes,names))]\n#print(sorted_animals)","sub_path":"4.2.py","file_name":"4.2.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561833622","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\nfrom openerp.tools.translate import _\nimport nc_webservice\nimport json\nimport logging\n_logger = logging.getLogger(__name__)\nfrom openerp.osv import osv\nimport datetime\n\nclass order(models.Model):\n _name = 'tender.order'\n\n tender_id = fields.Many2one('tender.tender', readonly=True)\n supplement_id = fields.Many2one('tender.supplement', readonly=True)\n supplier_id = fields.Many2one('tender.partner', required=True, readonly=True)\n order_no = fields.Char(required=True, readonly=True)\n order_line_ids = fields.One2many('tender.order_line', 'order_id', readonly=True)\n consult_clause = fields.Text(readonly=True)\n text = fields.Text()\n invoice_count = fields.Integer(compute='get_invoice_count')\n payment_count = fields.Integer(compute='get_payment_count')\n storage_count = fields.Integer(compute='get_storage_count')\n nc_contract_no = fields.Char(readonly=True)\n state = fields.Selection([('draft', 'Draft'),\n ('done', 'Done')],\n string='Status', index=True, readonly=True, default='draft',\n track_visibility='onchange', copy=False)\n operid = fields.Many2one('res.users', required=True, readonly=True)\n pk_corp = fields.Char(readonly=True)\n @api.one\n def write(self, vals):\n for field in vals:\n if field not in ['state', 'text', 'nc_contract_no']:\n raise osv.except_osv(_('Error'), _(\"state != draft\"))\n return super(order, self).write(vals)\n @api.one\n def test(self):\n jsonhead = []\n jsonbody = []\n for pk_corp in list(set(self.order_line_ids.mapped('pk_corp'))):\n head = {}\n head['unitcode'] = pk_corp\n jsonhead.append(head)\n for line_id in self.env['tender.order_line'].search([['order_id', '=', self.id]]):\n if line_id.pk_corp == pk_corp:\n body = {}\n body['line_id'] = line_id.id\n jsonbody.append(body)\n raise osv.except_osv(_('Error'), jsonhead + jsonbody)\n\n @api.one\n def create_order(self):\n nc_inv = nc_webservice.insertpuorder()\n jsonhead = {}\n jsonhead['unitcode'] = self.pk_corp\n jsonhead['dorderdate'] = datetime.datetime.strptime(self.create_date, '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d')\n jsonhead['cemployeecode'] = self.operid.name\n jsonhead['coperator'] = 'odoo'\n jsonhead['custcode'] = self.supplier_id.nc_no\n jsonbody = []\n i = 1\n for line_id in self.env['tender.order_line'].search([['order_id', '=', self.id]]):\n if line_id.pk_corp == self.pk_corp:\n body = {}\n body['crowno'] = i\n body['cbaseid'] = line_id.invcode\n body['nordernum'] = line_id.num\n body['norgtaxprice'] = line_id.price\n body['ntaxrate'] = line_id.tax\n body['yldef2'] = line_id.line_id.cpraybill_bid\n jsonbody.append(body)\n i+=1\n _logger.warn('%s', json.dumps([jsonhead]))\n _logger.warn('%s', json.dumps(jsonbody))\n resultstr = nc_inv.insertpuorder(json.dumps([jsonhead]), json.dumps(jsonbody))\n _logger.warn('%s', resultstr)\n result = json.loads(resultstr)[0]\n if result['cdbs'] == u'N':\n raise osv.except_osv(_('Error'), result['cdbs'] + ',' + result['ddid'] + ',' + result['sbyy'])\n else:\n self.write({'state': 'done', 'nc_contract_no': result['ddid']})\n @api.one\n def to_draft(self):\n self.write({'state': 'draft'})\n @api.one\n def to_done(self):\n self.synchronous()\n self.write({'state': 'done'})\n @api.multi\n def create_invoice(self):\n try:\n compose_form_id = self.env['ir.model.data'].get_object_reference('tender', 'view_tender_invoice_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n return {\n 'name': _('Compose Invoice'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'tender.invoice',\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': {'default_order_id': self.id},\n }\n @api.multi\n def create_payment(self):\n try:\n compose_form_id = self.env['ir.model.data'].get_object_reference('tender', 'view_tender_payment_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n return {\n 'name': _('Compose Payment'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'tender.payment',\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': {'default_order_id': self.id},\n }\n @api.multi\n def create_storage(self):\n try:\n compose_form_id = self.env['ir.model.data'].get_object_reference('tender', 'view_tender_storage_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n default_storage_line_ids = []\n for order_line_id in self.order_line_ids:\n storage_line_ids = self.env['tender.storage_line'].search([['order_line_id', '=', order_line_id.id]])\n surplus_num = 0\n for storage_line_id in storage_line_ids:\n surplus_num += storage_line_id.storage_num\n default_storage_line_ids.append({'stock_id': order_line_id.stock_id.id,\n 'measname': order_line_id.measname,\n 'npraynum': order_line_id.npraynum,\n 'price': order_line_id.price,\n 'storage_num': float(order_line_id.npraynum) - surplus_num,\n 'order_line_id': order_line_id.id,\n 'surplus_num': float(order_line_id.npraynum) - surplus_num})\n return {\n 'name': _('Compose Storage'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'tender.storage',\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': {'default_order_id': self.id,\n 'default_storage_line_ids': default_storage_line_ids},\n }\n @api.multi\n def view_invoice(self):\n return {\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'tree'], [False, 'form']],\n 'res_model': 'tender.invoice',\n 'domain': [['order_id', '=', self.id]],\n }\n @api.multi\n def view_payment(self):\n return {\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'tree'], [False, 'form']],\n 'res_model': 'tender.payment',\n 'domain': [['order_id', '=', self.id]],\n }\n @api.multi\n def view_storage(self):\n return {\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'tree'], [False, 'form']],\n 'res_model': 'tender.storage',\n 'domain': [['order_id', '=', self.id]],\n }\n @api.one\n def get_invoice_count(self):\n self.invoice_count = self.env['tender.invoice'].search([('order_id', '=', self.id)]).ids.__len__()\n @api.one\n def get_payment_count(self):\n self.payment_count = self.env['tender.payment'].search([('order_id', '=', self.id)]).ids.__len__()\n @api.one\n def get_storage_count(self):\n self.storage_count = self.env['tender.storage'].search([('order_id', '=', self.id)]).ids.__len__()\n\n @api.model\n def create(self, context):\n context['order_no'] = self.env['ir.sequence'].get('order_no') or '/'\n return super(order, self).create(context)\n\nclass order_line(models.Model):\n _name = 'tender.order_line'\n\n measname = fields.Char(required=True, readonly=True)#计量单位\n npraynum = fields.Float(required=True, readonly=True)#需求数量\n buyer_received_date = fields.Date(readonly=True)#需求日期\n\n name = fields.Char(readonly=True)\n invcode = fields.Char(readonly=True)\n invspec = fields.Char(readonly=True)\n price = fields.Float(required=True, readonly=True)\n num = fields.Float(required=True, readonly=True)\n tax = fields.Float(required=True, readonly=True)\n order_id = fields.Many2one('tender.order', readonly=True)\n line_id = fields.Many2one('tender.line', readonly=True)\n quote_line_id = fields.Many2one('tender.quote_line', readonly=True)\n\n pk_corp = fields.Char(compute='_get_pk_corp')\n @api.one\n def _get_pk_corp(self):\n self.pk_corp = self.sudo().line_id.requisition_id.unitcode\n","sub_path":"models/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":8954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"28298137","text":"from django.core.exceptions import \\\n ImproperlyConfigured\nfrom django.db.models import Model\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import (\n get_object_or_404, redirect, render)\nfrom django.views.generic import View\n\n\nclass DetailView(View):\n context_object_name = ''\n model = None\n template_name = ''\n template_name_suffix = '_detail'\n\n def get(self, request, **kwargs):\n self.kwargs = kwargs\n self.object = self.get_object()\n template_name = self.get_template_names()\n context = self.get_context_data()\n return render(\n request, template_name, context)\n\n def get_context_data(self):\n context = {}\n if self.object:\n context_object_name = (\n self.get_context_object_name())\n if context_object_name:\n context[context_object_name] = (\n self.object)\n return context\n\n def get_context_object_name(self):\n if self.context_object_name:\n return self.context_object_name\n elif isinstance(self.object, Model):\n return self.object._meta.model_name\n else:\n return None\n\n def get_object(self):\n slug = self.kwargs.get('slug')\n if slug is None:\n raise AttributeError(\n \"{c} expects {p} parameter \"\n \" from URL pattern.\".format(\n c=self.__class__.__name__,\n a='slug'))\n if self.model:\n return get_object_or_404(\n self.model, slug__iexact=slug)\n else:\n raise ImproperlyConfigured(\n \"{c} needs {a} attribute \"\n \" specified to work.\".format(\n c=self.__class__.__name__,\n a='model'))\n\n def get_template_names(self):\n if self.template_name:\n return self.template_name\n return \"{app}/{model}{suffix}.html\".format(\n app=self.object._meta.app_label,\n model=self.object._meta.model_name,\n suffix=self.template_name_suffix)\n\n\nclass ObjectCreateMixin:\n form_class = None\n template_name = ''\n\n def get(self, request):\n return render(\n request,\n self.template_name,\n {'form': self.form_class()})\n\n def post(self, request):\n bound_form = self.form_class(request.POST)\n if bound_form.is_valid():\n new_object = bound_form.save()\n return redirect(new_object)\n else:\n return render(\n request,\n self.template_name,\n {'form': bound_form})\n\n\nclass ObjectDeleteMixin:\n model = None\n success_url = ''\n template_name = ''\n\n def get(self, request, slug):\n obj = get_object_or_404(\n self.model, slug__iexact=slug)\n context = {\n self.model.__name__.lower(): obj,\n }\n return render(\n request, self.template_name, context)\n\n def post(self, request, slug):\n obj = get_object_or_404(\n self.model, slug__iexact=slug)\n obj.delete()\n return HttpResponseRedirect(\n self.success_url)\n\n\nclass ObjectUpdateMixin:\n form_class = None\n model = None\n template_name = ''\n\n def get(self, request, slug):\n obj = get_object_or_404(\n self.model, slug__iexact=slug)\n context = {\n 'form': self.form_class(instance=obj),\n self.model.__name__.lower(): obj,\n }\n return render(\n request, self.template_name, context)\n\n def post(self, request, slug):\n obj = get_object_or_404(\n self.model, slug__iexact=slug)\n bound_form = self.form_class(\n request.POST, instance=obj)\n if bound_form.is_valid():\n new_object = bound_form.save()\n return redirect(new_object)\n else:\n context = {\n 'form': bound_form,\n self.model.__name__.lower(): obj,\n }\n return render(\n request,\n self.template_name,\n context)\n","sub_path":"organizer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163070823","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 14 09:25:25 2019\nAPS /WEEK\n@author: Max\n\"\"\"\nimport APS_Data_Trans as adt\nimport pandas as pd\nimport pulp\n\nimport datetime\n\n#hyposthesis: All models can be done by all lines\n#mkdates=datetime.datetime.strftime(datetime.datetime.now(),'%Y-%m-%d')\n#mkdates='2018-05-21'\n##read in all data\n#a list of production line \nprodline=pd.read_excel(r'./data/prod_line_info.xlsx',names=['line_no','line_desp','staff_num','work_hour'])\nprodline.index=prodline['line_no']\nprod_line=prodline.index.tolist()\nline_num=len(prod_line)\n#read working days referenc table\n#working_days=pd.read_excel(.columns=['work_dates','type'])\nplan_dates=['2019-04-20','2019-04-21','2019-04-22','2019-04-23','2019-04-24','2019-04-25']\ndays=len(plan_dates)\n\n\n# standard work hours for each product line in the next week, suppose is defined as hour \n#stdH=10*6\n\nmodel_SAH=pd.read_excel(r\"./data/model_std_hour.xlsx\",names=['model_no','sah'])\n\n#read learn pace table,H/piece\npractice_pace=pd.read_excel(r\"./data/practice_curve.xlsx\",names=['uid','model_no','line_no','day_process','effi'])\n\n\n#read order table actual and other predicted confirmed by production manager \nrawPool=pd.read_excel(r'./data/orderPool.xlsx',names=['order_id','model_no','order_num','order_date','deli_date','order_type','priority','epst','deli_ahead'])\nrawPool.index=rawPool['order_id']\n#need to adjust the parameter (which could affect the deputy of order type)\n\n#read production records\n#prd_records=pd.read_excel(names=['line_id','order_id','model_no','prd_num','prd_date'])\n\n\n\n\n##transform and clean data,create order pool, practice matrice as input or reference data\n#order pool\norderPool=rawPool.copy()\n\n\n\n#dicts that contain the process time(day) produced by each line sole\nprocess_days=adt.process_day(orderPool,practice_pace,model_SAH,prodline)\n#process_day=new_dict\n\n#unique order_no. and model_no.\norderList=orderPool.index.tolist()\nmodelList=orderPool['model_no'].unique().tolist()\n\ndate_s=pd.Series(plan_dates)\n#practice matrix\n#practice_matrix=pd.DataFrame()\n#for l in prod_line:\n #for i_m in modelList:\n #a=practice_pace[practice_pace['line']==l&practice_pace['model']==i_m].reset_index('day_process')\n #ref=a['learn_pace'].to_list()\n #rlt_table=pd.DataFrame(adt.left_lower_matrix(ref))\n \n #rlt_table['model_no']=i_m\n #rlt_table['line']=l\n #rlt_table=rlt_table.merge(date_s,left_index=True,right_index=True)\n \n #practice_matrix=practice_matrix.append(rlt_table)\n \n \n#a dict that contains the relationship between order and model, model is the key\n#model_orderDict=adt.table_to_Adict(orderPool[['order_id','mode_no']],'model_no','order_id')\n\n\n#last six working days of certain models produced \n#dateindex=working_days[working_days['work_dates']==mkdates].index()\n#index_list=working_days[(dateindex-7):dateindex].to_list()\n#latest_6=prd_records[(prd_records['prd_date'].isin(index_list))&(prd_records['model_no'].isin(modelList))] \n\n\n#slowest speed of all models\n#slowest=practice_pace[['line','model_no','learn_pace']][practice_pace['day_process']==1]\n\n\n#product_pace=adt.cross_sep_dict(learn_pace,'model_no','prod_no')\n\n#a list of all products to be manufactured\n#prod_list=ordertable['product'].unique()\n\n#all possible groups of lines,orders,plan dates\n#order_line=[tuple([i,k]) for i in prod_line for k in orderList]\n#line_date=[tuple(c) for c in pulp.allcombinations(prod_line,plan_dates)]\n\n#orders=adt.table_to_Adict(ordertable,'product','order_num')\n#got day volume produced based on practice curve\ndp_matrix=adt.day_speed_df(practice_pace,model_SAH,prodline)\norder_spd=adt.order_speed_df(dp_matrix,orderPool)\n\n#the problem variables\nCsums=pulp.LpVariable.dicts(\"line_prod\",(prod_line,orderList),0,None, pulp.LpInteger)\nr=pulp.LpVariable.dicts(\"release\",(prod_line,orderList),0)\ncompD=pulp.LpVariable.dicts(\"compDate\",(prod_line,orderList),0)\n#CLines=pulp.LpVariable(\"CLines\",[j for j in line_date] ,0) #total amount of all lines by day\nCmax=pulp.LpVariable.dicts(\"maxDate\",[i for i in orderList],0)\n#Create the 'prob' variable to contain the problem data\nprob=pulp.LpProblem(\"The APS Problem\",pulp.LpMaximize)\n#prob=pulp.LpProblem(\"The APS Problem\",pulp.LpMinimize)\n\n#objective function: consider order priority and leadtime\n#eps=1e-2 \n#lambda compD[l][o] if compD[l][o]<=len(plan_dates) else len(plan_dates)\n#\nprob+=pulp.lpSum([orderPool['priority'][o]*orderPool['order_type'][o]*\\\n adt.model_total_volume(order_spd[(order_spd['order_id']==o)&(order_spd['line_no']==l)][['day_process','num_by_day']],adt.prod_days(compD[l][o],len(plan_dates),r[l][o]),len(plan_dates))\\\n for o in orderList for l in prod_line])\n\n#prob+=Cmax+eps*lpSum([r[j] for j in order_line])-eps*[compD[j] for j in order_line]\n#The constraints\n#1. every order has to be completed before due date\n#2. relationships between release date and due date\n#3. release date>=max(0,epst-plan_dates[0]) \n#4.fixed sum across day and lines equal total num of orders\nfor o in orderList:\n for l in prod_line:\n prob+=compD[l][o] <= Cmax[o]\n #prob+=compD[l][o] >=r[l][o]+Csums[l][o]*(process_days[o][l]/orderPool['order_num'][o])\n \n prob+=compD[l][o]>=r[l][o]+adt.process_csum(Csums[l][o],order_spd[(order_spd['order_id']==o)&(order_spd['line_no']==l)][['day_process','num_by_day']])\n prob+=r[l][o]>=max(0,(orderPool['epst'][o]-datetime.datetime.strptime(plan_dates[0],'%Y-%m-%d')).days)\n prob+=pulp.lpSum([Csums[l][o] for l in prod_line])==orderPool['order_num'][o]\n prob+=Cmax[o]<=(orderPool['deli_date'][o]-datetime.datetime.strptime(plan_dates[0],'%Y-%m-%d')).days\nprob+=pulp.lpSum([Csums[l][o] for l in prod_line for o in orderList])==orderPool['order_num'].sum()\n#5.every line cannot made two orders at the same time\n\neps=1e-2 \nfor o in orderList:\n prob+=Cmax+eps*pulp.lpSum([r[l][o] for l in prod_line])-eps*pulp.lpSum([compD[l][o] for l in prod_line]) \n \nprob.writeLP(\"APSModel.lp\")\nprob.solve()\nprint(\"Status:\", pulp.LpStatus[prob.status])\n\n\n\n#for o in orderList:\n #for l in prod_line:\n #print((l,o),r[l,o].value(),compD[l,o].value(),Cmax[l,o].value())\n#a=[]\n#for v in prob.variables():\n #print(v.name, \"=\", v.varValue)\n #a.append((v.name,v.varValue))\n#rlt_df=pd.DataFrame(a)\n#rlt_df.to_csv(\"test.csv\")\n \nprint(\"Total amounts of products by week =\", pulp.value(prob.objective)) \n#for i in order_line:\n #if Csums[i]>0:\n #print(i, Csums[i],r[i],compD[i])\n \n #5 orders with the same models tend to be planned together, because a.change time and b.high efficiency\n #6 \n #prob+=orderPool['deli_date'][order]-datetime.timedelta(days=3)==prod_sums[l][order][d]/speed[order] for l in prod_lines for d in plan_dates\n#2.use practice curve to compute PST (LPST,EPST),plan_date should be greater than PST\n#3.the same model has to be planned in consecutive days\n#mo_prac=dict()\n#for line in prod_line:\n #inter_dict=dict()\n #check_startups=dict()\n #every line produced models\n #temp_one_line=latest_6[latest_6['line_id']==line]\n #if len(temp_one_line)!=0:\n #temp_old=np.zeros((6,1))\n #temp_old_dict=adt.table_to_Adict(temp_one_line,'model_no','order_no')\n \n #for model in modelList:\n #get model's plan_date if planned =1,not planed =0\n #consecutive pattern better than sparsely manufacture the same models(required now)\n #prob+=adt.checknum([np.sign(sum([prod_sums[line][order][d] for order in model_orderDict[model]])) for d in plan_dates])==[np.sign(sum([prod_sums[line][order][d] for order in model_orderDict[model]])) for d in plan_dates].count(1)\n #inter_dict[model]=temp\n \n #practice_pace['learn_pace'][practice_pace['line']==line & practice_pace['model_no']==model].max()\n #adt.checknum([np.sign(sum([prod_sums[line][order][d] for order in model_orderDict[model]])) for d in plan_dates)]))>=0\n #for now choose the slowest speed to simplify the problem\n #for a in model_orderDict[model]:\n #PST=adt.PST_onLine_date(orderPool['order_date'][orderPool['order_no']==a],12,orderPool['deli_date'][orderPool['order_no']==a],prod_sums[line][a][d]*slowest['learn_pace'][slowest['line']=line & slowest['model_no'=model]]+3\n #PST=adt.PST_onLine_date(orderPool['order_date'][orderPool['order_no']==a],\\\n #orderPool['deli_date'][orderPool['order_no']==a],12,\\\n #math.ceil(float(orderPool['order_num'][orderPool['order_no']==a]/(8*slowest['learn_pace'][slowest['line']==line & slowest['model_no']==model]))+3))\n #PST can also be just EPST,because there are more than 1 line can do manufacture\n \n #2.plan_date should be greater than EPST\n #prob+=pulp.lpSum([prod_sums[line][a][d] for d in plan_dates if dmax_gap_distance] = max_gap_distance\n data_arr[data_arrscale_permissible_diff*data_arr[i]:\n segment_storage.append(i)\n\n gap = Vector3()\n gap_length_msg = Float64()\n gap_angle_msg = Float64()\n\n gap_msg_info = gaps()\n\n for i in range(len(segment_storage)-1):\n #angle between gap points\n scan_gap_angle = angle_increment*(segment_storage[i+1]-segment_storage[i])\n\n #two sides of the gap \n s1 = data_arr[segment_storage[i]] \n s2 = data_arr[segment_storage[i+1]]\n\n gap_length = np.sqrt( s1**2 + s2**2 - 2*s2*s1*np.cos(scan_gap_angle))\n alpha =np.arccos(1/(2*s1*gap_length)*(s1**2 + gap_length*gap_length - s2**2))\n\n #vector from robot position to center of gap\n r_vector = np.sqrt(s1**2 + (gap_length/2)**2 - s1*gap_length*np.cos(alpha))\n center_angle = np.arccos((1/(2*s1*r_vector))*(s1**2 + r_vector**2 -(gap_length/2)**2 ))\n\n #transforming the angle of robot origin to scan reference frame.\n start_angle = angle_increment*segment_storage[i]\n projection_angle = start_angle + center_angle + min_angle\n \n #cartesian to polar coordinates\n gap.x = np.cos(projection_angle)*r_vector\n gap.y = np.sin(projection_angle)*r_vector\n gap.z = 0.0\n gap_msg_info.gap_centers.append(gap)\n\n gap_length_msg.data = gap_length\n gap_msg_info.gap_size.append(gap_length_msg)\n \n gap_angle = np.degrees(projection_angle)\n\n gap_angle_msg.data = gap_angle\n gap_msg_info.gap_angle.append(gap_angle_msg)\n\n gap_msg_info.num_gaps +=1\n\n\n global prev_gap_msg_info\n\n if gap_msg_info.num_gaps>0:\n best_gap_idx = np.argmax(gap_msg_info.gap_size)\n best_gap_vector = gap_msg_info.gap_centers[best_gap_idx] \n #publishing gap center and gap_info messages\n gap_center_pub.publish(best_gap_vector)\n gaps_pub.publish(gap_msg_info)\n\n# Callback that receives LIDAR data on the /scan topic.\n# data: the LIDAR data, published as sensor_msgs::LaserScan\ndef scan_callback(data):\n find_gaps(data)\n\n\n\n# Boilerplate code to start this ROS node.\nif __name__ == '__main__':\n rospy.init_node('gap_finding_node', anonymous=True)\n rospy.Subscriber(\"scan\", LaserScan, scan_callback)\n rospy.spin()\n\n","sub_path":"wall_following_lab/virtualfastkeyboardfurious_wall_following/scripts/find_gap.py","file_name":"find_gap.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113892815","text":"import sys\nimport time\nimport random as rd\nimport ctypes\nimport numpy as np\n# インポート個別に\n# 予約語被る可能性低そうだし, 全部インポートしちゃおうか\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nimport pyqtgraph as pg\n\n# 共有ライブラリ読み込み\n# このライブラリがある場所に移動して実行する\nexe2_win = ctypes.cdll.LoadLibrary(\"exe2_win.so\")\nshare01 = ctypes.cdll.LoadLibrary(\"share01.so\")\n# c_float が64個の配列型を定義\nFloatArray64 = ctypes.c_float * 64\nIntArray64 = ctypes.c_int32 * 64\n# 共有ライブラリを使う際の初期化\n# これが無いと不具合の可能性大\nexe2_win.initPy()\n\n# デバッグ関数\n#share01.sortTest()\n\n# 関数取得 & 返り値, 引数指定\ngetTopSprm = exe2_win.getTopSprmPy\ngetTopSprm.rectype = None\ngetTopSprm.argtypes = (ctypes.c_char_p, FloatArray64)\n\ngetActPrm1L = share01.getActPrm1LPy\ngetActPrm1L.rectype = ctypes.c_int32\ngetActPrm1L.argtypes = (IntArray64, ctypes.c_int32)\n\n# ライブラリの関数を使いやすく包みたい\n# 引数には欲しい世代番号を与える\n# ファイル名で取り出す\ndef getTopSprmWrap(fname):\n f_arr_c = FloatArray64()\n getTopSprm(fname.encode(), f_arr_c)\n return list(f_arr_c)\n\nclass Widget(QWidget):\n # よく使う色は先に定義してみる?\n MYGREEN = QColor(0, 200, 51)\n # 候補手用緑\n CANDGREEN = QColor(124, 252, 0)\n # うっすら緑\n BYAKUROKU = QColor(218, 234, 208)\n # マスの大きさ\n SQLEN = 80\n # グラフ用に12色決めておく\n MYCOLORS = (QColor(0xde, 0x96, 0x10), QColor(0xc9, 0x3a, 0x40),\n QColor(0xff, 0xf0, 0x01), QColor(0xd0, 0x6d, 0x8c),\n QColor(0x65, 0xac, 0xe4), QColor(0xa0, 0xc2, 0x38),\n QColor(0x56, 0xa7, 0x64), QColor(0xd1, 0x6b, 0x16),\n QColor(0xcc, 0x52, 0x8b), QColor(0x94, 0x60, 0xa0),\n QColor(0xf2, 0xcf, 0x01), QColor(0x00, 0x74, 0xbf))\n # 色数値を色に変換\n NUM2COLOR = {1: Qt.black, 2: Qt.white}\n # 8方向を示す数値のタプル\n DIRECTIONS = (-10, -9, -8, -1, 1, 8, 9, 10)\n \n def __init__(self, parent=None):\n super(Widget, self).__init__(parent)\n self.setWindowTitle(\"Othello\")\n # ウィンドウの位置と大きさ\n # サイズはマスから計算\n self.setGeometry(100, 100, self.SQLEN * 11 + 500, self.SQLEN * 11)\n # 背景カラー(微妙にグレー)\n #self.setStyleSheet(\"background:#eeeeee\")\n self.setStyleSheet(\"background:#ffffff\")\n # タグからマスの中心座標に変換したい\n # 需要が出てきそうなのでマスの左上の座標も追加する\n # (マスの中心, マスの左上)\n self.tag2pos = dict()\n # 盤面画像作成\n self.img = QImage(self.SQLEN * 10, self.SQLEN * 10, QImage.Format_ARGB32)\n # 余白幅\n self.margin = self.SQLEN\n # コマ半径\n self.radius = int(self.SQLEN * 0.45)\n # 黒の手番からスタート\n self.turn = 1\n # パスカウンタ(2になったら終了)\n self.pass_count = 0\n # ゲーム終了フラグ\n # まだ使い道無いからスタートフラグも兼ねるか\n self.end_flag = True\n # キャンバスのマウスクリック動作のロックフラグ\n # AIのターンで勝手に操作出来ないようにする予定\n self.press_lock = False\n # プレイヤーがAIか人か判別するための変数\n # players[0] が先手, [1]が後手\n # False が人, True がAI\n self.players = [False, False]\n # ペインター作成?\n imgcanvas = QPainter(self.img)\n # フォント設定\n font = QFont()\n font.setPointSize(15)\n imgcanvas.setFont(font)\n # 座標を示す文字を描画, タグと座標の対応辞書の作成\n for i in range(8):\n # 左側から埋めていく\n x = self.margin + self.SQLEN * i\n # アルファベット表示(列)\n imgcanvas.drawText(x + self.SQLEN // 2 - 10, self.margin - 20, chr(i + 97))\n # 数字表示(行)\n # 変数 x を y 座標の決定に使う(ややこしい)\n imgcanvas.drawText(self.margin - 30, x + self.SQLEN // 2 + 10, chr(i + 49))\n for j in range(8):\n # 正方形を描く\n y = self.margin + self.SQLEN * j\n tag = chr(i + 97) + chr(j + 49)\n # 正方形の中央の座標と左上の座標を記録\n # 左上の座標はタプルのまま!\n self.tag2pos[tag] = (QPoint(x + self.SQLEN // 2, y + self.SQLEN // 2), (x, y))\n \n # 初期盤面設定\n self.setInitBoard(imgcanvas)\n # 評価値リストを入手\n # ファイル名指定\n # クラス内変数で所持\n #self.use_sprm = getTopSprmWrap(\"prm//simple_prm100.bin\")\n #self.use_sprm = getTopSprmWrap(\"prm//sprm_not_mutate020.bin\")\n #self.use_sprm = getTopSprmWrap(\"prm//sprm_corner0.5_100.bin\")\n #self.use_sprm = getTopSprmWrap(\"prm//sprm_vsrand100.bin\")\n #self.use_sprm = getTopSprmWrap(\"prm//sprm_corner0.5neg_100.bin\")\n self.use_sprm = getTopSprmWrap(\"prm//sprm050_06_rlt_1p_rd005//sprm050_06_rlt_1p_rd005_g100.bin\")\n # ボタン等設定\n self.setButtons()\n self.setRadioButtons()\n # タイマー設定\n self.setTimers()\n # 画面切り替えフラグ?\n self.test_flag = False\n # グラフセット\n self.setGraphs()\n # テスト画像初期化\n self.setTestImage()\n #self.setEssImage()\n \n # 初期盤面設定\n # リセットしたいキャンバスを与える\n def setInitBoard(self, imgcanvas):\n # 盤面情報一次元配列を初期化\n # 黒1, 白2, 空0, 番兵-1とする\n self.board_info = [-(i <= 8 or i % 9 == 0 or i >= 81) for i in range(91)]\n # ペン設定\n pen = QPen(Qt.black)\n pen.setWidth(4)\n imgcanvas.setPen(pen)\n # ブラシ設定\n imgcanvas.setBrush(self.MYGREEN)\n # タグと座標の辞書から座標を取り出す\n for pos in self.tag2pos.values():\n imgcanvas.drawRect(*pos[1], self.SQLEN, self.SQLEN)\n \n # 最初の4つのコマを配置\n for tag, c_num in [(\"d4\", 1), (\"d5\", 2), (\"e4\", 2), (\"e5\", 1)]:\n self.putKoma(tag, c_num, imgcanvas)\n # ターンを先手にする\n self.turn = 1\n # 候補手探し\n self.getCandidates()\n # リセット時はエンド状態にしておく\n self.end_flag = True\n\n # 盤面情報を標準出力で確認する\n # 配列の先頭が右下になるように文字列を組み立てる\n def printBoard(self):\n self._printBoard(self.board_info)\n \n def _printBoard(self, board):\n moji = \"\"\n # 縦\n for y in range(9, 81, 9):\n # 文字列の先頭を改行文字にする\n moji = \"\\n\" + moji\n # 横\n for x in range(1, 9):\n # 文字列の先頭に加えてゆく\n moji = \"{:1d} \".format(board[x + y]) + moji\n print(moji)\n \n # ボタン作成\n def setButtons(self):\n self.test_button = QPushButton(\"test\", self)\n self.test_button.move(100, 820)\n self.test_button.setStyleSheet(\"\\\n font-size:20pt;\\\n font-weight:bold;\\\n font-family:Monotype Corsiva;\\\n background:#ffffff\")\n self.test_button.resize(140, 50)\n # クリックされたときに実行\n self.test_button.clicked.connect(self.testClicked)\n\n # リセットボタン\n self.reset_button = QPushButton(\"Reset\", self)\n self.reset_button.move(100, 750)\n self.reset_button.setStyleSheet(\"\\\n font-size:20pt;\\\n font-weight:thin;\\\n font-family:Times New Roman;\\\n background:#ffffff\")\n self.reset_button.resize(140, 50)\n # クリックされたときに実行\n self.reset_button.clicked.connect(self.resetClicked)\n\n # フォントファミリー, サイズ, 太さ, 斜体かどうかを決定\n font = QFont(\"Times New Roman\", 20, 500, True)\n # スタートボタン\n # プレイヤーの決定等を先に行ない, これを押したらスタートするようにしたい\n self.start_button = QPushButton(\"START\", self)\n self.start_button.setFont(font)\n self.start_button.resize(140, 50)\n self.start_button.move(250, 820)\n self.start_button.clicked.connect(self.startClicked)\n \n # グラフ作成\n # 試合経過と評価値の推移のグラフにしたい\n def setGraphs(self):\n pg.setConfigOptions(\n antialias=True, foreground='k', background=(255, 255, 255)\n )\n self.win = pg.GraphicsWindow(\n size=(500, 400), border=True, parent=self\n )\n self.win.move(800, 50)\n # ローカル変数でも維持されるっぽい?\n # グラフの追加とタイトルの設定\n self.graph = self.win.addPlot(title=\"Data\")\n # 'bottom' は縦軸, 'left' は横軸\n # 'units' は軸の単位\n self.graph.setLabel('left', \"point\")\n self.graph.setLabel('bottom', \"progress\")\n # 幅\n self.x_range = [0, 0]\n self.y_range = [-10, 10]\n # 横軸の最小値, 最大値, 縦軸の最小値, 最大値\n # オートスケールにした方が都合が良いかも\n #self.graph.setRange(xRange=self.x_range, yRange=self.y_range)\n #xaxis = self.graph.getAxis('bottom')\n # 横軸の目盛の場所とラベル (数値, ラベル) のタプルのリスト?\n # 数値 = ラベルとしておく\n #x_ticks = [(i, i) for i in range(x_range[0], x_range[1] + 1, 100)]\n #xaxis.setTicks([x_ticks])\n #yaxis = self.graph.getAxis('left')\n # 縦軸の目盛の場所とラベル\n #y_ticks = [(i, i) for i in [-0.5, -0.25, 0, 0.25, 0.5]]\n #yaxis.setTicks([y_ticks])\n # グリッド線の表示\n self.graph.showGrid(x=True, y=True)\n # 反例を表示\n #graph.addLegend()\n # どれだけターンが進行したか初期はターン0\n # 10 マス分のデータの配列を用意\n # 空リストの掛け算同じアドレスが10個コピーされてしまうみたい\n # タプルのリストで試してみる?\n gpen = pg.mkPen((0, 0, 255), width=2)\n # 曲線オブジェクト?\n # これでデータ書き換える\n # このときはデータ与えなくてもいいらしい\n self.curve = self.graph.plot(pen=gpen, name=\"point\")\n # データ初期化\n self.setInitGraph()\n \n # グラフ初期化関数\n def setInitGraph(self):\n # 進行ターン数\n self.progress = 0\n # 横軸リスト\n self.progress_list = [0]\n # 縦軸評価値\n self.points = [0]\n # グラフにデータセット\n self.curve.setData(self.progress_list, self.points)\n \n # ラジオボタンの設定\n def setRadioButtons(self):\n # ラジオボタンのx座標\n x = 800\n self.label1 = QLabel(\"black\", self)\n # テキストの位置を指定\n self.label1.move(x, 470)\n # テキストの詳細設定\n self.label1.setStyleSheet(\"\\\n font-size:10pt;\\\n font-weight:500;\\\n color:#000000;\\\n \")\n # 箱の大きさ変更?\n self.label1.resize(150, 30)\n # 左揃え\n self.label1.setAlignment(Qt.AlignLeft)\n # 先手をAIにするか人間にするか決めるラジオボタン\n self.rgroup1 = QButtonGroup(self)\n self.rbutton1 = QRadioButton(\"human\", self)\n self.rgroup1.addButton(self.rbutton1)\n self.rbutton2 = QRadioButton(\"AI\", self)\n self.rgroup1.addButton(self.rbutton2)\n self.rbutton1.move(x, 500)\n self.rbutton2.move(x, 530)\n self.rbutton1.resize(150, 30)\n self.rbutton2.resize(150, 40)\n # 初期値は人\n self.rbutton1.setChecked(True)\n # プレイヤー変更は片方のラジオボタンだけ見れば大丈夫\n # そもそもラジオボタンよりチェックボックスの方がよかったのでは?\n # それは後々考えよう\n self.rbutton1.toggled.connect(lambda : self.detPlayer(0))\n\n # フォントの決め方を変える\n font = QFont()\n font.setPointSize(10)\n font.setBold(True)\n self.label2 = QLabel(\"white\", self)\n self.label2.setFont(font)\n self.label2.move(x, 600)\n # 後手をAIにするか人間にするか決めるラジオボタン\n self.rgroup2 = QButtonGroup(self)\n self.rbutton3 = QRadioButton(\"human\", self)\n self.rgroup2.addButton(self.rbutton3)\n self.rbutton4 = QRadioButton(\"AI\", self)\n self.rgroup2.addButton(self.rbutton4)\n self.rbutton3.move(x, 630)\n self.rbutton4.move(x, 660)\n self.rbutton3.resize(150, 30)\n self.rbutton4.resize(150, 30)\n # 初期値は人\n self.rbutton3.setChecked(True)\n # 無名関数?\n self.rbutton3.toggled.connect(lambda : self.detPlayer(1))\n \n # タイマー設定関数\n # 複数形だけど今のところひとつ\n def setTimers(self):\n self.timer = QTimer(self)\n # 一発ずつ実行\n self.timer.setSingleShot(True)\n # 関数を繋げる\n self.timer.timeout.connect(self.randomAction)\n # 待ち時間(ミリ秒)\n self.wait_time = 1000\n \n # ラジオボタンが変更されたとき実行\n def detPlayer(self, index):\n # ブールそのまま代入すればいいのでは?\n if index == 0:\n self.players[0] = self.rbutton2.isChecked()\n else:\n self.players[1] = self.rbutton4.isChecked()\n #print(self.players)\n \n # テスト用画像を作成\n def setTestImage(self):\n self.test_img = QImage(self.SQLEN * 10, self.SQLEN * 10, QImage.Format_ARGB32)\n imgcanvas = QPainter(self.test_img)\n # ペン設定\n pen = QPen()\n pen.setColor(Qt.black)\n pen.setWidth(4)\n imgcanvas.setPen(pen)\n # フォント設定\n font = QFont()\n font.setPointSize(12)\n imgcanvas.setFont(font)\n # なんとなく色配列作っておく\n rgb = [0, 0, 0]\n # 評価値によって色を変えた正方形を描きたい\n for i in range(8):\n for j in range(8):\n # パラメータの添え字を計算\n index = i * 8 + j\n # 評価値を取り出す\n value = self.use_sprm[index]\n # 正なら緑に近づけたい, 負なら赤に\n # 0は黄色\n if value >= 0:\n rgb[0] = 255 - int(510 * value)\n rgb[1] = 255\n else:\n rgb[0] = 255\n rgb[1] = 255 + int(510 * value)\n # ここで配列分割 * の出番?\n color = QColor(*rgb)\n imgcanvas.setBrush(color)\n # 正方形を描く\n x = self.margin + self.SQLEN * i\n y = self.margin + self.SQLEN * j\n imgcanvas.drawRect(x, y, self.SQLEN, self.SQLEN)\n # 数値も表示\n imgcanvas.drawText(x + 18, y + 45, \"{:5.2f}\".format(value))\n \n # 主要マスのみ強調した画像を作りたい\n def setEssImage(self):\n self.test_img = QImage(self.SQLEN * 10, self.SQLEN * 10, QImage.Format_ARGB32)\n imgcanvas = QPainter(self.test_img)\n # ペン設定\n pen = QPen()\n pen.setColor(Qt.black)\n pen.setWidth(4)\n imgcanvas.setPen(pen)\n # フォント設定\n font = QFont()\n font.setPointSize(25)\n imgcanvas.setFont(font)\n # 左上の主要マス\n essence = {\n \"a1\": \" 1\", \"b1\": \" 2\", \"b2\": \" 5\", \"c1\": \" 3\", \"c2\": \" 6\",\n \"c3\": \" 8\", \"d1\": \" 4\", \"d2\": \" 7\", \"d3\": \" 9\", \"d4\": \"10\"\n }\n for tag, pos in self.tag2pos.items():\n # アンパック代入?\n x, y = pos[1]\n # 主要マスかどうかで色分け\n if tag in essence:\n imgcanvas.setBrush(self.MYGREEN)\n moji = essence[tag]\n else:\n imgcanvas.setBrush(self.BYAKUROKU)\n moji = \"\"\n imgcanvas.drawRect(x, y, self.SQLEN, self.SQLEN)\n imgcanvas.drawText(x + 15, y + 60, moji)\n\n # テストボタンが押された\n # 状態の切り替えのみ行なう\n def testClicked(self):\n # 通常に戻す\n if self.test_flag:\n self.test_flag = False\n self.update()\n return\n # テストモードにする\n self.test_flag = True\n self.update()\n \n # ペイント時にはここを実行しなければならない\n def paintEvent(self, event):\n # テスト時\n if self.test_flag:\n canvas = QPainter(self)\n canvas.drawImage(0, 0, self.test_img)\n return\n canvas = QPainter(self)\n canvas.drawImage(0, 0, self.img)\n \n # 押された座標取得\n def mousePressEvent(self, event):\n # ゲーム中以外\n if self.end_flag or self.test_flag:\n return\n if self.press_lock:\n print(\"locked\")\n return\n # 座標をタグに変換\n tag = self.pos2tag(event.pos())\n # 候補手に含まれないなら何もしない\n if tag not in self.candidates:\n return\n # 有効な手なら処理が終わるまでロック\n self.press_lock = True\n # 盤面更新\n self.updateBoard(tag)\n # 描画\n self.update()\n # AIのターンならランダムに打たせる\n # 待ち時間は適当\n if self.players[0] and self.turn == 1:\n self.timer.start(self.wait_time)\n elif self.players[1] and self.turn == 2:\n self.timer.start(self.wait_time)\n # 相手も人, もしくはパスでもう一度指せる場合はロック解除\n else:\n self.press_lock = False\n \n # マウスが動いた時の座標取得?\n def mouseMoveEvent(self, event):\n pass\n \n # マウスが離された\n def mouseReleaseEvent(self, event):\n pass\n \n # QtCore.QPoint のオブジェクトを与えると, 該当するタグを返す\n def pos2tag(self, pos):\n nx = (pos.x() - self.margin) // self.SQLEN\n if (nx < 0 or 7 < nx):\n return \"z0\"\n ny = (pos.y() - self.margin) // self.SQLEN\n if (ny < 0 or 7 < ny):\n return \"z0\"\n # ASCII で元に戻す\n return chr(nx + 97) + chr(ny + 49)\n \n # タグから添え字に変換する\n def tag2sub(self, tag):\n return (57 - ord(tag[1])) * 9 + 105 - ord(tag[0])\n \n # 添え字からタグに変換する\n def sub2tag(self, sub):\n return chr(105 - sub % 9) + chr(57 - sub // 9)\n \n # 該当するタグのマスに円を描く\n # 色は数値で指定するように変更\n # 引数にキャンバスを与えることにする\n # キャンバスだけでなく, 配列も同期するようにする\n def putKoma(self, tag, c_num, imgcanvas):\n # 色を得る\n color = self.NUM2COLOR[c_num]\n # 枠も中身も同じ色で統一\n imgcanvas.setPen(color)\n imgcanvas.setBrush(color)\n # 円を描く\n imgcanvas.drawEllipse(self.tag2pos[tag][0], self.radius, self.radius)\n # 配列も書き換え\n self.board_info[self.tag2sub(tag)] = c_num\n \n # リセットボタンクリック時動作\n def resetClicked(self):\n # ポップアップ表示を追加\n reply = QMessageBox.question(self, \"Menu\", \"Do you reset the board?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.Yes\n )\n # Yesが押された場合\n if reply == QMessageBox.Yes:\n print(\"reset!!\")\n # ラジオボタンを有効に戻す\n self.rbutton1.setEnabled(True)\n self.rbutton2.setEnabled(True)\n self.rbutton3.setEnabled(True)\n self.rbutton4.setEnabled(True)\n # スタートボタンも有効に\n self.start_button.setEnabled(True)\n # 盤面を初期状態に\n self.setInitBoard(QPainter(self.img))\n # グラフも初期状態に\n self.setInitGraph()\n # 盤面クリックは有効にしておく\n self.press_lock = False\n \n # スタートボタンクリック時動作\n def startClicked(self):\n print(\"START!!\")\n self.rbutton1.setEnabled(False)\n self.rbutton2.setEnabled(False)\n self.rbutton3.setEnabled(False)\n self.rbutton4.setEnabled(False)\n # ゲーム中はボタン無効化\n self.start_button.setEnabled(False)\n # 候補手色塗り\n self.coloringCandidates(QPainter(self.img))\n # 画像変更を適用\n self.update()\n # ゲーム開始!\n self.end_flag = False\n # 先手がAIの場合\n if self.players[0]:\n # ロック\n self.press_lock = True\n # 1発だけ\n self.timer.start(1)\n \n # 新バージョン?\n # 候補手辞書を作成する以外に, 終了処理も行なう\n def getCandidates(self):\n # 候補手辞書を取得\n cand_local = self.getCandidatesLocal(self.board_info, self.turn)\n # 空辞書の場合\n if not cand_local:\n print(\"パス\")\n # ターン変更して探索し直す\n self.turn ^= 3\n cand_local = self.getCandidatesLocal(self.board_info, self.turn)\n # さらに空辞書の場合, 終了処理\n if not cand_local:\n self.end_flag = True\n self.resultPopup()\n # 候補手辞書をクラス内変数に代入\n self.candidates = cand_local\n\n # 候補手のところの色を変える\n # 元に戻すなら back を True に\n def coloringCandidates(self, imgcanvas, back=False):\n # ペン設定\n pen = QPen(Qt.black)\n pen.setWidth(4)\n imgcanvas.setPen(pen)\n # ブラシ設定\n if back:\n imgcanvas.setBrush(self.MYGREEN)\n else:\n imgcanvas.setBrush(self.CANDGREEN)\n for tag in self.candidates.keys():\n # 正方形の左上の座標(タプル)を取得してスターで分解\n imgcanvas.drawRect(*self.tag2pos[tag][1], self.SQLEN, self.SQLEN)\n\n # 盤面更新\n # かなり重要��関数だと思う\n # コマを置くタグを渡す\n # 画像を既に読み込んでいる関数から呼び出してはいけない\n def updateBoard(self, tag):\n # キャンバス取り出し\n imgcanvas = QPainter(self.img)\n # 候補手の色を元に戻す\n self.coloringCandidates(imgcanvas, back=True)\n # ひっくり返すマスの添え字を代入\n for sub in self.candidates[tag]:\n # タグに変換してコマをひっくり返す\n self.putKoma(self.sub2tag(sub), self.turn, imgcanvas)\n # コマを置く\n self.putKoma(tag, self.turn, imgcanvas)\n # 盤面情報表示\n self.printBoard()\n # ターンを変更して候補手探し\n # 次がパスならターンが元に戻る可能性あり\n self.turn ^= 3\n self.getCandidates()\n # 次の候補手を色塗り\n self.coloringCandidates(imgcanvas)\n # 進行度インクリメント\n self.progress += 1\n self.progress_list.append(self.progress)\n # 今の盤面の評価値を追加\n self.points.append(self.evaluationBySprm(self.board_info))\n self.curve.setData(self.progress_list, self.points)\n # 描画適用\n self.update()\n \n # 結果のポップアップ\n def resultPopup(self):\n # それぞれのコマを数える\n bc = sum(1 for i in self.board_info if i == 1)\n wc = sum(1 for i in self.board_info if i == 2)\n dif = bc -wc\n if dif == 0:\n moji = \"引き分け\"\n if dif > 0:\n moji = \"黒の{:2d}枚勝ち\".format(dif)\n elif dif < 0:\n moji = \"白の{:2d}枚勝ち\".format(-dif)\n # 選択肢はOKのみ\n reply = QMessageBox.question(self, \"対局終了\", moji,\n QMessageBox.Ok,\n QMessageBox.Ok\n )\n \n # 候補手からランダムに選択\n # ランダムとか言いつつそれ以外も担当している\n def randomAction(self):\n cand_list = list(self.candidates.keys())\n # 候補手が存在しない\n if not cand_list:\n return\n # ランダムで選んで盤面更新\n #self.updateBoard(rd.choice(cand_list))\n # 試しに呼び出してみる\n self.getActWithCFunc()\n # シンプルパラメータを用いた最善手\n tag = self.getBestActBySprm(self.board_info, self.turn)\n # 盤面更新\n self.updateBoard(tag)\n # 次が人ならターンをロック解除\n if not self.players[0] and self.turn == 1:\n self.press_lock = False\n elif not self.players[1] and self.turn == 2:\n self.press_lock = False\n # 次もAIならすぐにこの関数を実行\n else:\n self.timer.start(1)\n \n # 盤面のシンプル評価\n # 評価値が高い程黒が有利と考える\n def evaluationBySprm(self, board):\n # 使う重みの添え字\n c = 0\n pt = 0\n # 番兵以外の添え字を繰り返す\n # 重みリストの添え字と対応する順番で\n for y in range(9, 81, 9):\n for x in range(1, 9):\n # コマ取得\n koma = board[x + y]\n # 黒なら評価値に重みを足す, 白なら引く\n # 空白は何もしない\n if koma == 1:\n pt += self.use_sprm[c]\n elif koma == 2:\n pt -= self.use_sprm[c]\n # 次の重みを参照\n c += 1\n return pt\n\n # クラス内変数の候補手ディクショナリは書き換えないように候補手を探索\n # 引数には盤面情報リスト、手番(ターン)を与える\n # パス処理はこの関数のラッパー関数で行なう予定\n # やっぱり盤面情報の引継ぎとかめんどいから一関数でまとめてみよう\n def getCandidatesLocal(self, board, teban):\n # 候補初期化\n cand_local = dict()\n # 相手の手番(コマの値)を計算\n opponent = teban ^ 3\n # タグ座標辞書から全てのタグを取り出す\n for tag in self.tag2pos.keys():\n # タグを添え字に変換\n sub = self.tag2sub(tag)\n # 空マスじゃなければやり直し\n if board[sub] != 0:\n continue\n # 全方向のひっくり返す添え字候補\n rev_tags = []\n # 全方向探索\n for d in self.DIRECTIONS:\n # 一方向のひっくり返す添え字候補\n tmp = []\n # 隣のマスのコマをチェック\n next_sub = sub + d\n koma = board[next_sub]\n # 相手のコマの場合, それ以外が出るまで対角マスを探索\n while koma == opponent:\n # リス��に追加\n tmp.append(next_sub)\n # 先のマスへ進みコマをチェック\n next_sub += d\n koma = board[next_sub]\n # 自分のコマが出たら, これまでの添え字を候補に追加\n # 空のマスか番兵が出たら, その方向のリストは無効化\n # 探索マスの隣が自分のコマなら, 空リストを足すだけ\n if koma == teban:\n rev_tags += tmp\n # ひっくり返せるマスが存在する場合\n # タグを辞書のキーとし, ひっくり返すマスの添え字リストを値にする\n if rev_tags:\n cand_local[tag] = rev_tags\n # 辞書を返す(空なら空のまま)\n return cand_local\n \n # タグをキーとし, 次の盤面のリストを値とする辞書を作成\n def getNextBoards(self, board, teban):\n # 空辞書で初期化\n next_boards = dict()\n cand_local = self.getCandidatesLocal(board, teban)\n # 候補タグとひっくり返すマスの添え字リストを繰り返し代入\n for c_tag, c_subs in cand_local.items():\n # 盤面コピーを作成\n nb = board.copy()\n # ひっくり返すマスを繰り返し代入\n for sub in c_subs:\n # 自分のコマに書き換え\n nb[sub] = teban\n # コマを置く\n nb[self.tag2sub(c_tag)] = teban\n # 辞書にキーと要素を追加\n next_boards[c_tag] = nb\n # 次の盤面の辞書を返す\n # 候補手が無ければ空辞書\n return next_boards\n \n # パラメータから最善手を得たい\n # 次の手で決着できる(勝てる)としても, 重みを使った評価だけで判断\n def getBestActBySprm(self, board, teban):\n # 次の盤面辞書を得る\n next_boards = self.getNextBoards(board, teban)\n # 返り値となるタグ(初期値は無効文字列)\n best_tag = \"z0\"\n # 先手の場合\n if teban == 1:\n mx = -float(\"inf\")\n # 指し手と盤面を取り出す\n for tag, nb in next_boards.items():\n pt = self.evaluationBySprm(nb)\n # 暫定最大値なら更新\n if mx < pt:\n mx = pt\n best_tag = tag\n print(mx, best_tag)\n # 後手の場合\n else:\n mn = float(\"inf\")\n for tag, nb in next_boards.items():\n pt = self.evaluationBySprm(nb)\n # 暫定最小値なら更新\n if pt < mn:\n mn = pt\n best_tag = tag\n print(mn, best_tag)\n # 返り値を間違えていた\n return best_tag\n \n # C言語の共有ライブラリで定義された関数で指し手を決定する\n def getActWithCFunc(self):\n # Cに渡すための盤面リスト\n board_list = []\n # 番兵を除いてリストを作る\n for i in range(9, 81, 9):\n for j in range(1, 9, 1):\n board_list.append(self.board_info[i + j])\n # スターを付けて渡し, cのint型配列にする\n i_arr_c = IntArray64(*board_list)\n getActPrm1L(i_arr_c, self.turn)\n\nclass Application(QApplication):\n def __init__(self):\n super(Application, self).__init__(sys.argv)\n # QWidgetの自作子クラス\n self.gui = Widget()\n # ウィンドウの大きさと表示位置を指定する\n # left, top, width, height\n # topは50以下くらいになると画面からはみ出る\n # 自作クラスの初期関数で設定した\n #self.gui.setGeometry(20, 50, 800, 800)\n # ウィンドウの設定\n #self.gui.setStyleSheet(\"background:#eeeeee\")\n # ラベル\n #self.labelTest()\n # 様々なボタン定義\n self.setButtons()\n\n # テキスト表示\n def labelTest(self):\n moji = \"PyQtを勉強中\\n園田継一郎 17T2088B\"\n self.label = QLabel(moji, self.gui)\n # テキストの位置を指定\n self.label.move(100, 100)\n # テキストの詳細設定\n # CSSフォーマットとは何ぞや\n self.label.setStyleSheet(\"\\\n font-size:20pt;\\\n font-weight:bold;\\\n color:#ff00ff;\\\n background-color:#00ff00;\\\n \")\n # 箱の大きさ変更?\n self.label.resize(400, 100)\n # 中央揃え\n self.label.setAlignment(Qt.AlignCenter)\n \n def setButtons(self):\n y = 750\n # 終了ボタン\n self.end_button = QPushButton(\"終了\", self.gui)\n self.end_button.move(250, y)\n self.end_button.setStyleSheet(\"\\\n font-size:15pt;\\\n font-weight:100;\\\n font-family:游明朝;\\\n background:#ff0000\")\n self.end_button.clicked.connect(self.clickEndButton)\n # 通常表示ボタン\n self.show_normal_button = QPushButton(\"通常表示\", self.gui)\n self.show_normal_button.move(400, y)\n self.show_normal_button.setStyleSheet(\"\\\n font-size:15pt;\\\n font-weight:800;\\\n font-family:游明朝;\\\n background:lime green\")\n self.show_normal_button.clicked.connect(self.gui.showNormal)\n # 全画面ボタン\n self.show_full_button = QPushButton(\"全画面\", self.gui)\n self.show_full_button.move(550, y)\n self.show_full_button.setStyleSheet(\"\\\n font-size:10pt;\\\n font-weight:600;\\\n font-family:游ゴシック;\\\n background:#ffff00\")\n self.show_full_button.resize(100, 50)\n self.show_full_button.clicked.connect(self.gui.showFullScreen)\n \n\n # 終了ボタン\n def clickEndButton(self):\n reply = QMessageBox.question(self.gui, \"終了\", \"終了しますか?\",\n QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel,\n QMessageBox.Yes\n )\n if reply == QMessageBox.Yes:\n print(\"end\")\n sys.exit()\n\n # テキスト変更\n def changeText(self):\n self.label.setText(\"Text Changes!\")\n \n def changeButtonText(self):\n self.button.setText(\"Changed\")\n\n def run(self):\n self.gui.show()\n sys.exit(self.exec_())\n\n\ndef main():\n app = Application()\n app.run()\n\nif __name__ == \"__main__\":\n main()","sub_path":"test006.py","file_name":"test006.py","file_ext":"py","file_size_in_byte":34901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}